]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
b411b363 PR |
31 | #include "drbd_req.h" |
32 | ||
33 | ||
b30ab791 | 34 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); |
57bcb6cf | 35 | |
b411b363 | 36 | /* Update disk stats at start of I/O request */ |
b30ab791 | 37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) |
b411b363 | 38 | { |
d62e26b3 JA |
39 | struct request_queue *q = device->rq_queue; |
40 | ||
41 | generic_start_io_acct(q, bio_data_dir(req->master_bio), | |
42 | req->i.size >> 9, &device->vdisk->part0); | |
b411b363 PR |
43 | } |
44 | ||
45 | /* Update disk stats when completing request upwards */ | |
b30ab791 | 46 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) |
b411b363 | 47 | { |
d62e26b3 JA |
48 | struct request_queue *q = device->rq_queue; |
49 | ||
50 | generic_end_io_acct(q, bio_data_dir(req->master_bio), | |
24480854 | 51 | &device->vdisk->part0, req->start_jif); |
b411b363 PR |
52 | } |
53 | ||
9104d31a | 54 | static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src) |
9e204cdd AG |
55 | { |
56 | struct drbd_request *req; | |
57 | ||
23fe8f8b | 58 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); |
9e204cdd AG |
59 | if (!req) |
60 | return NULL; | |
23fe8f8b | 61 | memset(req, 0, sizeof(*req)); |
9e204cdd AG |
62 | |
63 | drbd_req_make_private_bio(req, bio_src); | |
9104d31a LE |
64 | req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) |
65 | | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0) | |
45c21793 | 66 | | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_UNMAP : 0) |
9104d31a LE |
67 | | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); |
68 | req->device = device; | |
69 | req->master_bio = bio_src; | |
70 | req->epoch = 0; | |
53840641 | 71 | |
9e204cdd | 72 | drbd_clear_interval(&req->i); |
4f024f37 KO |
73 | req->i.sector = bio_src->bi_iter.bi_sector; |
74 | req->i.size = bio_src->bi_iter.bi_size; | |
5e472264 | 75 | req->i.local = true; |
53840641 AG |
76 | req->i.waiting = false; |
77 | ||
9e204cdd AG |
78 | INIT_LIST_HEAD(&req->tl_requests); |
79 | INIT_LIST_HEAD(&req->w.list); | |
844a6ae7 LE |
80 | INIT_LIST_HEAD(&req->req_pending_master_completion); |
81 | INIT_LIST_HEAD(&req->req_pending_local); | |
9e204cdd | 82 | |
a0d856df | 83 | /* one reference to be put by __drbd_make_request */ |
b406777e | 84 | atomic_set(&req->completion_ref, 1); |
a0d856df | 85 | /* one kref as long as completion_ref > 0 */ |
b406777e | 86 | kref_init(&req->kref); |
9e204cdd AG |
87 | return req; |
88 | } | |
89 | ||
08d0dabf LE |
90 | static void drbd_remove_request_interval(struct rb_root *root, |
91 | struct drbd_request *req) | |
92 | { | |
93 | struct drbd_device *device = req->device; | |
94 | struct drbd_interval *i = &req->i; | |
95 | ||
96 | drbd_remove_interval(root, i); | |
97 | ||
98 | /* Wake up any processes waiting for this request to complete. */ | |
99 | if (i->waiting) | |
100 | wake_up(&device->misc_wait); | |
101 | } | |
102 | ||
9a278a79 | 103 | void drbd_req_destroy(struct kref *kref) |
b411b363 | 104 | { |
b406777e | 105 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); |
84b8c06b | 106 | struct drbd_device *device = req->device; |
a0d856df LE |
107 | const unsigned s = req->rq_state; |
108 | ||
109 | if ((req->master_bio && !(s & RQ_POSTPONED)) || | |
110 | atomic_read(&req->completion_ref) || | |
111 | (s & RQ_LOCAL_PENDING) || | |
112 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { | |
d0180171 | 113 | drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", |
a0d856df LE |
114 | s, atomic_read(&req->completion_ref)); |
115 | return; | |
116 | } | |
288f422e | 117 | |
844a6ae7 LE |
118 | /* If called from mod_rq_state (expected normal case) or |
119 | * drbd_send_and_submit (the less likely normal path), this holds the | |
120 | * req_lock, and req->tl_requests will typicaly be on ->transfer_log, | |
121 | * though it may be still empty (never added to the transfer log). | |
122 | * | |
123 | * If called from do_retry(), we do NOT hold the req_lock, but we are | |
124 | * still allowed to unconditionally list_del(&req->tl_requests), | |
125 | * because it will be on a local on-stack list only. */ | |
2312f0b3 | 126 | list_del_init(&req->tl_requests); |
288f422e | 127 | |
08d0dabf LE |
128 | /* finally remove the request from the conflict detection |
129 | * respective block_id verification interval tree. */ | |
130 | if (!drbd_interval_empty(&req->i)) { | |
131 | struct rb_root *root; | |
132 | ||
133 | if (s & RQ_WRITE) | |
134 | root = &device->write_requests; | |
135 | else | |
136 | root = &device->read_requests; | |
137 | drbd_remove_request_interval(root, req); | |
138 | } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) | |
139 | drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", | |
140 | s, (unsigned long long)req->i.sector, req->i.size); | |
141 | ||
b411b363 PR |
142 | /* if it was a write, we may have to set the corresponding |
143 | * bit(s) out-of-sync first. If it had a local part, we need to | |
144 | * release the reference to the activity log. */ | |
b406777e | 145 | if (s & RQ_WRITE) { |
b411b363 PR |
146 | /* Set out-of-sync unless both OK flags are set |
147 | * (local only or remote failed). | |
148 | * Other places where we set out-of-sync: | |
149 | * READ with local io-error */ | |
b411b363 | 150 | |
70f17b6b LE |
151 | /* There is a special case: |
152 | * we may notice late that IO was suspended, | |
153 | * and postpone, or schedule for retry, a write, | |
154 | * before it even was submitted or sent. | |
155 | * In that case we do not want to touch the bitmap at all. | |
156 | */ | |
157 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { | |
d7644018 | 158 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
b30ab791 | 159 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
b411b363 | 160 | |
d7644018 | 161 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
b30ab791 | 162 | drbd_set_in_sync(device, req->i.sector, req->i.size); |
d7644018 | 163 | } |
b411b363 PR |
164 | |
165 | /* one might be tempted to move the drbd_al_complete_io | |
fcefa62e | 166 | * to the local io completion callback drbd_request_endio. |
b411b363 PR |
167 | * but, if this was a mirror write, we may only |
168 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
169 | * otherwise the extent could be dropped from the al | |
170 | * before it has actually been written on the peer. | |
171 | * if we crash before our peer knows about the request, | |
172 | * but after the extent has been dropped from the al, | |
173 | * we would forget to resync the corresponding extent. | |
174 | */ | |
76590cd1 | 175 | if (s & RQ_IN_ACT_LOG) { |
b30ab791 AG |
176 | if (get_ldev_if_state(device, D_FAILED)) { |
177 | drbd_al_complete_io(device, &req->i); | |
178 | put_ldev(device); | |
b411b363 | 179 | } else if (__ratelimit(&drbd_ratelimit_state)) { |
d0180171 | 180 | drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " |
181286ad LE |
181 | "but my Disk seems to have failed :(\n", |
182 | (unsigned long long) req->i.sector, req->i.size); | |
b411b363 PR |
183 | } |
184 | } | |
185 | } | |
186 | ||
9a278a79 | 187 | mempool_free(req, drbd_request_mempool); |
b411b363 PR |
188 | } |
189 | ||
bde89a9e AG |
190 | static void wake_all_senders(struct drbd_connection *connection) |
191 | { | |
192 | wake_up(&connection->sender_work.q_wait); | |
b411b363 PR |
193 | } |
194 | ||
b6dd1a89 | 195 | /* must hold resource->req_lock */ |
bde89a9e | 196 | void start_new_tl_epoch(struct drbd_connection *connection) |
b411b363 | 197 | { |
99b4d8fe | 198 | /* no point closing an epoch, if it is empty, anyways. */ |
bde89a9e | 199 | if (connection->current_tle_writes == 0) |
99b4d8fe | 200 | return; |
b411b363 | 201 | |
bde89a9e AG |
202 | connection->current_tle_writes = 0; |
203 | atomic_inc(&connection->current_tle_nr); | |
204 | wake_all_senders(connection); | |
b411b363 PR |
205 | } |
206 | ||
b30ab791 | 207 | void complete_master_bio(struct drbd_device *device, |
b411b363 PR |
208 | struct bio_and_error *m) |
209 | { | |
4e4cbee9 | 210 | m->bio->bi_status = errno_to_blk_status(m->error); |
4246a0b6 | 211 | bio_endio(m->bio); |
b30ab791 | 212 | dec_ap_bio(device); |
b411b363 PR |
213 | } |
214 | ||
53840641 | 215 | |
b411b363 PR |
216 | /* Helper for __req_mod(). |
217 | * Set m->bio to the master bio, if it is fit to be completed, | |
218 | * or leave it alone (it is initialized to NULL in __req_mod), | |
219 | * if it has already been completed, or cannot be completed yet. | |
220 | * If m->bio is set, the error status to be returned is placed in m->error. | |
221 | */ | |
6870ca6d | 222 | static |
a0d856df | 223 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
b411b363 | 224 | { |
a0d856df | 225 | const unsigned s = req->rq_state; |
84b8c06b | 226 | struct drbd_device *device = req->device; |
a0d856df | 227 | int error, ok; |
b411b363 | 228 | |
b411b363 PR |
229 | /* we must not complete the master bio, while it is |
230 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
231 | * not yet acknowledged by the peer | |
232 | * not yet completed by the local io subsystem | |
233 | * these flags may get cleared in any order by | |
234 | * the worker, | |
235 | * the receiver, | |
236 | * the bio_endio completion callbacks. | |
237 | */ | |
a0d856df LE |
238 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || |
239 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || | |
240 | (s & RQ_COMPLETION_SUSP)) { | |
d0180171 | 241 | drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); |
b411b363 | 242 | return; |
a0d856df LE |
243 | } |
244 | ||
245 | if (!req->master_bio) { | |
d0180171 | 246 | drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); |
b411b363 | 247 | return; |
a0d856df | 248 | } |
b411b363 | 249 | |
a0d856df LE |
250 | /* |
251 | * figure out whether to report success or failure. | |
252 | * | |
253 | * report success when at least one of the operations succeeded. | |
254 | * or, to put the other way, | |
255 | * only report failure, when both operations failed. | |
256 | * | |
257 | * what to do about the failures is handled elsewhere. | |
258 | * what we need to do here is just: complete the master_bio. | |
259 | * | |
260 | * local completion error, if any, has been stored as ERR_PTR | |
261 | * in private_bio within drbd_request_endio. | |
262 | */ | |
263 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
264 | error = PTR_ERR(req->private_bio); | |
b411b363 | 265 | |
a0d856df LE |
266 | /* Before we can signal completion to the upper layers, |
267 | * we may need to close the current transfer log epoch. | |
268 | * We are within the request lock, so we can simply compare | |
269 | * the request epoch number with the current transfer log | |
270 | * epoch number. If they match, increase the current_tle_nr, | |
271 | * and reset the transfer log epoch write_cnt. | |
272 | */ | |
70246286 | 273 | if (op_is_write(bio_op(req->master_bio)) && |
a6b32bc3 AG |
274 | req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) |
275 | start_new_tl_epoch(first_peer_device(device)->connection); | |
b411b363 | 276 | |
a0d856df | 277 | /* Update disk stats */ |
b30ab791 | 278 | _drbd_end_io_acct(device, req); |
b411b363 | 279 | |
a0d856df LE |
280 | /* If READ failed, |
281 | * have it be pushed back to the retry work queue, | |
282 | * so it will re-enter __drbd_make_request(), | |
283 | * and be re-assigned to a suitable local or remote path, | |
284 | * or failed if we do not have access to good data anymore. | |
285 | * | |
286 | * Unless it was failed early by __drbd_make_request(), | |
287 | * because no path was available, in which case | |
288 | * it was not even added to the transfer_log. | |
289 | * | |
70246286 | 290 | * read-ahead may fail, and will not be retried. |
a0d856df LE |
291 | * |
292 | * WRITE should have used all available paths already. | |
293 | */ | |
70246286 CH |
294 | if (!ok && |
295 | bio_op(req->master_bio) == REQ_OP_READ && | |
1eff9d32 | 296 | !(req->master_bio->bi_opf & REQ_RAHEAD) && |
70246286 | 297 | !list_empty(&req->tl_requests)) |
a0d856df | 298 | req->rq_state |= RQ_POSTPONED; |
b411b363 | 299 | |
a0d856df | 300 | if (!(req->rq_state & RQ_POSTPONED)) { |
b411b363 PR |
301 | m->error = ok ? 0 : (error ?: -EIO); |
302 | m->bio = req->master_bio; | |
303 | req->master_bio = NULL; | |
08d0dabf LE |
304 | /* We leave it in the tree, to be able to verify later |
305 | * write-acks in protocol != C during resync. | |
306 | * But we mark it as "complete", so it won't be counted as | |
307 | * conflict in a multi-primary setup. */ | |
308 | req->i.completed = true; | |
b411b363 | 309 | } |
08d0dabf LE |
310 | |
311 | if (req->i.waiting) | |
312 | wake_up(&device->misc_wait); | |
844a6ae7 LE |
313 | |
314 | /* Either we are about to complete to upper layers, | |
315 | * or we will restart this request. | |
316 | * In either case, the request object will be destroyed soon, | |
317 | * so better remove it from all lists. */ | |
318 | list_del_init(&req->req_pending_master_completion); | |
b411b363 | 319 | } |
b411b363 | 320 | |
844a6ae7 | 321 | /* still holds resource->req_lock */ |
a00ebd1c | 322 | static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
cfa03415 | 323 | { |
84b8c06b | 324 | struct drbd_device *device = req->device; |
0b0ba1ef | 325 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
a0d856df | 326 | |
a00ebd1c LE |
327 | if (!put) |
328 | return; | |
329 | ||
a0d856df | 330 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
a00ebd1c | 331 | return; |
2b4dd36f | 332 | |
a0d856df | 333 | drbd_req_complete(req, m); |
9a278a79 | 334 | |
a00ebd1c LE |
335 | /* local completion may still come in later, |
336 | * we need to keep the req object around. */ | |
337 | if (req->rq_state & RQ_LOCAL_ABORTED) | |
338 | return; | |
339 | ||
9a278a79 LE |
340 | if (req->rq_state & RQ_POSTPONED) { |
341 | /* don't destroy the req object just yet, | |
342 | * but queue it for retry */ | |
343 | drbd_restart_request(req); | |
a00ebd1c | 344 | return; |
b411b363 | 345 | } |
9a278a79 | 346 | |
a00ebd1c | 347 | kref_put(&req->kref, drbd_req_destroy); |
b411b363 PR |
348 | } |
349 | ||
7753a4c1 LE |
350 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
351 | { | |
352 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
353 | if (!connection) | |
354 | return; | |
355 | if (connection->req_next == NULL) | |
356 | connection->req_next = req; | |
357 | } | |
358 | ||
359 | static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
360 | { | |
361 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
362 | if (!connection) | |
363 | return; | |
364 | if (connection->req_next != req) | |
365 | return; | |
366 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
367 | const unsigned s = req->rq_state; | |
368 | if (s & RQ_NET_QUEUED) | |
369 | break; | |
370 | } | |
371 | if (&req->tl_requests == &connection->transfer_log) | |
372 | req = NULL; | |
373 | connection->req_next = req; | |
374 | } | |
375 | ||
376 | static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
377 | { | |
378 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
379 | if (!connection) | |
380 | return; | |
381 | if (connection->req_ack_pending == NULL) | |
382 | connection->req_ack_pending = req; | |
383 | } | |
384 | ||
385 | static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
386 | { | |
387 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
388 | if (!connection) | |
389 | return; | |
390 | if (connection->req_ack_pending != req) | |
391 | return; | |
392 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
393 | const unsigned s = req->rq_state; | |
394 | if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) | |
395 | break; | |
396 | } | |
397 | if (&req->tl_requests == &connection->transfer_log) | |
398 | req = NULL; | |
399 | connection->req_ack_pending = req; | |
400 | } | |
401 | ||
402 | static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
403 | { | |
404 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
405 | if (!connection) | |
406 | return; | |
407 | if (connection->req_not_net_done == NULL) | |
408 | connection->req_not_net_done = req; | |
409 | } | |
410 | ||
411 | static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
412 | { | |
413 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
414 | if (!connection) | |
415 | return; | |
416 | if (connection->req_not_net_done != req) | |
417 | return; | |
418 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
419 | const unsigned s = req->rq_state; | |
420 | if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) | |
421 | break; | |
422 | } | |
423 | if (&req->tl_requests == &connection->transfer_log) | |
424 | req = NULL; | |
425 | connection->req_not_net_done = req; | |
426 | } | |
427 | ||
a0d856df LE |
428 | /* I'd like this to be the only place that manipulates |
429 | * req->completion_ref and req->kref. */ | |
430 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |
431 | int clear, int set) | |
cfa03415 | 432 | { |
84b8c06b | 433 | struct drbd_device *device = req->device; |
7753a4c1 | 434 | struct drbd_peer_device *peer_device = first_peer_device(device); |
a0d856df LE |
435 | unsigned s = req->rq_state; |
436 | int c_put = 0; | |
cfa03415 | 437 | |
b30ab791 | 438 | if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) |
5af2e8ce | 439 | set |= RQ_COMPLETION_SUSP; |
cfa03415 | 440 | |
a0d856df | 441 | /* apply */ |
b411b363 | 442 | |
a0d856df LE |
443 | req->rq_state &= ~clear; |
444 | req->rq_state |= set; | |
b411b363 | 445 | |
a0d856df LE |
446 | /* no change? */ |
447 | if (req->rq_state == s) | |
448 | return; | |
b411b363 | 449 | |
a0d856df LE |
450 | /* intent: get references */ |
451 | ||
bdfafc4f PZ |
452 | kref_get(&req->kref); |
453 | ||
a0d856df LE |
454 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) |
455 | atomic_inc(&req->completion_ref); | |
456 | ||
457 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { | |
b30ab791 | 458 | inc_ap_pending(device); |
a0d856df | 459 | atomic_inc(&req->completion_ref); |
b411b363 PR |
460 | } |
461 | ||
e5f891b2 | 462 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { |
a0d856df | 463 | atomic_inc(&req->completion_ref); |
7753a4c1 | 464 | set_if_null_req_next(peer_device, req); |
e5f891b2 | 465 | } |
a0d856df LE |
466 | |
467 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) | |
468 | kref_get(&req->kref); /* wait for the DONE */ | |
469 | ||
e5f891b2 | 470 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { |
668700b4 | 471 | /* potentially already completed in the ack_receiver thread */ |
7753a4c1 | 472 | if (!(s & RQ_NET_DONE)) { |
e5f891b2 | 473 | atomic_add(req->i.size >> 9, &device->ap_in_flight); |
7753a4c1 LE |
474 | set_if_null_req_not_net_done(peer_device, req); |
475 | } | |
f85d9f2d | 476 | if (req->rq_state & RQ_NET_PENDING) |
7753a4c1 | 477 | set_if_null_req_ack_pending(peer_device, req); |
e5f891b2 | 478 | } |
a0d856df | 479 | |
5af2e8ce PR |
480 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) |
481 | atomic_inc(&req->completion_ref); | |
482 | ||
a0d856df LE |
483 | /* progress: put references */ |
484 | ||
485 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) | |
486 | ++c_put; | |
487 | ||
488 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { | |
0b0ba1ef | 489 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); |
a0d856df | 490 | ++c_put; |
b411b363 | 491 | } |
b411b363 | 492 | |
a0d856df LE |
493 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { |
494 | if (req->rq_state & RQ_LOCAL_ABORTED) | |
bdfafc4f | 495 | kref_put(&req->kref, drbd_req_destroy); |
a0d856df LE |
496 | else |
497 | ++c_put; | |
844a6ae7 | 498 | list_del_init(&req->req_pending_local); |
a0d856df | 499 | } |
b411b363 | 500 | |
a0d856df | 501 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { |
b30ab791 | 502 | dec_ap_pending(device); |
a0d856df | 503 | ++c_put; |
e5f891b2 | 504 | req->acked_jif = jiffies; |
7753a4c1 | 505 | advance_conn_req_ack_pending(peer_device, req); |
a0d856df LE |
506 | } |
507 | ||
7753a4c1 | 508 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) { |
a0d856df | 509 | ++c_put; |
7753a4c1 LE |
510 | advance_conn_req_next(peer_device, req); |
511 | } | |
a0d856df | 512 | |
e5f891b2 LE |
513 | if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { |
514 | if (s & RQ_NET_SENT) | |
b30ab791 | 515 | atomic_sub(req->i.size >> 9, &device->ap_in_flight); |
e5f891b2 | 516 | if (s & RQ_EXP_BARR_ACK) |
bdfafc4f | 517 | kref_put(&req->kref, drbd_req_destroy); |
e5f891b2 | 518 | req->net_done_jif = jiffies; |
7753a4c1 LE |
519 | |
520 | /* in ahead/behind mode, or just in case, | |
521 | * before we finally destroy this request, | |
522 | * the caching pointers must not reference it anymore */ | |
523 | advance_conn_req_next(peer_device, req); | |
524 | advance_conn_req_ack_pending(peer_device, req); | |
525 | advance_conn_req_not_net_done(peer_device, req); | |
a0d856df LE |
526 | } |
527 | ||
528 | /* potentially complete and destroy */ | |
529 | ||
a0d856df LE |
530 | /* If we made progress, retry conflicting peer requests, if any. */ |
531 | if (req->i.waiting) | |
b30ab791 | 532 | wake_up(&device->misc_wait); |
a0d856df | 533 | |
a00ebd1c LE |
534 | drbd_req_put_completion_ref(req, m, c_put); |
535 | kref_put(&req->kref, drbd_req_destroy); | |
b411b363 PR |
536 | } |
537 | ||
b30ab791 | 538 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
ccae7868 LE |
539 | { |
540 | char b[BDEVNAME_SIZE]; | |
541 | ||
42839f65 | 542 | if (!__ratelimit(&drbd_ratelimit_state)) |
ccae7868 LE |
543 | return; |
544 | ||
d0180171 | 545 | drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", |
ccae7868 | 546 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", |
42839f65 LE |
547 | (unsigned long long)req->i.sector, |
548 | req->i.size >> 9, | |
b30ab791 | 549 | bdevname(device->ldev->backing_bdev, b)); |
ccae7868 LE |
550 | } |
551 | ||
e5f891b2 LE |
552 | /* Helper for HANDED_OVER_TO_NETWORK. |
553 | * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? | |
554 | * Is it also still "PENDING"? | |
555 | * --> If so, clear PENDING and set NET_OK below. | |
556 | * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster | |
557 | * (and we must not set RQ_NET_OK) */ | |
558 | static inline bool is_pending_write_protocol_A(struct drbd_request *req) | |
559 | { | |
560 | return (req->rq_state & | |
561 | (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) | |
562 | == (RQ_WRITE|RQ_NET_PENDING); | |
563 | } | |
564 | ||
b411b363 PR |
565 | /* obviously this could be coded as many single functions |
566 | * instead of one huge switch, | |
567 | * or by putting the code directly in the respective locations | |
568 | * (as it has been before). | |
569 | * | |
570 | * but having it this way | |
571 | * enforces that it is all in this one place, where it is easier to audit, | |
572 | * it makes it obvious that whatever "event" "happens" to a request should | |
573 | * happen "atomically" within the req_lock, | |
574 | * and it enforces that we have to think in a very structured manner | |
575 | * about the "events" that may happen to a request during its life time ... | |
576 | */ | |
2a80699f | 577 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
578 | struct bio_and_error *m) |
579 | { | |
44a4d551 LE |
580 | struct drbd_device *const device = req->device; |
581 | struct drbd_peer_device *const peer_device = first_peer_device(device); | |
582 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | |
44ed167d | 583 | struct net_conf *nc; |
303d1448 | 584 | int p, rv = 0; |
7be8da07 AG |
585 | |
586 | if (m) | |
587 | m->bio = NULL; | |
b411b363 | 588 | |
b411b363 PR |
589 | switch (what) { |
590 | default: | |
d0180171 | 591 | drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); |
b411b363 PR |
592 | break; |
593 | ||
594 | /* does not happen... | |
595 | * initialization done in drbd_req_new | |
8554df1c | 596 | case CREATED: |
b411b363 PR |
597 | break; |
598 | */ | |
599 | ||
8554df1c | 600 | case TO_BE_SENT: /* via network */ |
7be8da07 | 601 | /* reached via __drbd_make_request |
b411b363 | 602 | * and from w_read_retry_remote */ |
0b0ba1ef | 603 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
44ed167d | 604 | rcu_read_lock(); |
44a4d551 | 605 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
606 | p = nc->wire_protocol; |
607 | rcu_read_unlock(); | |
303d1448 PR |
608 | req->rq_state |= |
609 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
610 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
a0d856df | 611 | mod_rq_state(req, m, 0, RQ_NET_PENDING); |
b411b363 PR |
612 | break; |
613 | ||
8554df1c | 614 | case TO_BE_SUBMITTED: /* locally */ |
7be8da07 | 615 | /* reached via __drbd_make_request */ |
0b0ba1ef | 616 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); |
a0d856df | 617 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
b411b363 PR |
618 | break; |
619 | ||
8554df1c | 620 | case COMPLETED_OK: |
2b4dd36f | 621 | if (req->rq_state & RQ_WRITE) |
b30ab791 | 622 | device->writ_cnt += req->i.size >> 9; |
b411b363 | 623 | else |
b30ab791 | 624 | device->read_cnt += req->i.size >> 9; |
b411b363 | 625 | |
a0d856df LE |
626 | mod_rq_state(req, m, RQ_LOCAL_PENDING, |
627 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
b411b363 PR |
628 | break; |
629 | ||
cdfda633 | 630 | case ABORT_DISK_IO: |
a0d856df | 631 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); |
2b4dd36f PR |
632 | break; |
633 | ||
edc9f5eb | 634 | case WRITE_COMPLETED_WITH_ERROR: |
b30ab791 AG |
635 | drbd_report_io_error(device, req); |
636 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); | |
edc9f5eb | 637 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
b411b363 PR |
638 | break; |
639 | ||
8554df1c | 640 | case READ_COMPLETED_WITH_ERROR: |
b30ab791 AG |
641 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
642 | drbd_report_io_error(device, req); | |
643 | __drbd_chk_io_error(device, DRBD_READ_ERROR); | |
a0d856df LE |
644 | /* fall through. */ |
645 | case READ_AHEAD_COMPLETED_WITH_ERROR: | |
70246286 | 646 | /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ |
a0d856df | 647 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
2f632aeb LE |
648 | break; |
649 | ||
650 | case DISCARD_COMPLETED_NOTSUPP: | |
651 | case DISCARD_COMPLETED_WITH_ERROR: | |
652 | /* I'd rather not detach from local disk just because it | |
653 | * failed a REQ_DISCARD. */ | |
654 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
4439c400 | 655 | break; |
b411b363 | 656 | |
8554df1c | 657 | case QUEUE_FOR_NET_READ: |
70246286 | 658 | /* READ, and |
b411b363 PR |
659 | * no local disk, |
660 | * or target area marked as invalid, | |
661 | * or just got an io-error. */ | |
7be8da07 | 662 | /* from __drbd_make_request |
b411b363 PR |
663 | * or from bio_endio during read io-error recovery */ |
664 | ||
6870ca6d LE |
665 | /* So we can verify the handle in the answer packet. |
666 | * Corresponding drbd_remove_request_interval is in | |
a0d856df | 667 | * drbd_req_complete() */ |
0b0ba1ef | 668 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
b30ab791 | 669 | drbd_insert_interval(&device->read_requests, &req->i); |
b411b363 | 670 | |
b30ab791 | 671 | set_bit(UNPLUG_REMOTE, &device->flags); |
b411b363 | 672 | |
0b0ba1ef AG |
673 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
674 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); | |
a0d856df | 675 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
4439c400 | 676 | req->w.cb = w_send_read_req; |
44a4d551 | 677 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 678 | &req->w); |
b411b363 PR |
679 | break; |
680 | ||
8554df1c | 681 | case QUEUE_FOR_NET_WRITE: |
b411b363 | 682 | /* assert something? */ |
7be8da07 | 683 | /* from __drbd_make_request only */ |
b411b363 | 684 | |
6870ca6d | 685 | /* Corresponding drbd_remove_request_interval is in |
a0d856df | 686 | * drbd_req_complete() */ |
0b0ba1ef | 687 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
b30ab791 | 688 | drbd_insert_interval(&device->write_requests, &req->i); |
b411b363 PR |
689 | |
690 | /* NOTE | |
691 | * In case the req ended up on the transfer log before being | |
692 | * queued on the worker, it could lead to this request being | |
693 | * missed during cleanup after connection loss. | |
694 | * So we have to do both operations here, | |
695 | * within the same lock that protects the transfer log. | |
696 | * | |
697 | * _req_add_to_epoch(req); this has to be after the | |
698 | * _maybe_start_new_epoch(req); which happened in | |
7be8da07 | 699 | * __drbd_make_request, because we now may set the bit |
b411b363 PR |
700 | * again ourselves to close the current epoch. |
701 | * | |
702 | * Add req to the (now) current epoch (barrier). */ | |
703 | ||
83c38830 LE |
704 | /* otherwise we may lose an unplug, which may cause some remote |
705 | * io-scheduler timeout to expire, increasing maximum latency, | |
706 | * hurting performance. */ | |
b30ab791 | 707 | set_bit(UNPLUG_REMOTE, &device->flags); |
b411b363 PR |
708 | |
709 | /* queue work item to send data */ | |
0b0ba1ef | 710 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
a0d856df | 711 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
b411b363 | 712 | req->w.cb = w_send_dblock; |
44a4d551 | 713 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 714 | &req->w); |
b411b363 PR |
715 | |
716 | /* close the epoch, in case it outgrew the limit */ | |
44ed167d | 717 | rcu_read_lock(); |
44a4d551 | 718 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
719 | p = nc->max_epoch_size; |
720 | rcu_read_unlock(); | |
44a4d551 LE |
721 | if (connection->current_tle_writes >= p) |
722 | start_new_tl_epoch(connection); | |
b411b363 PR |
723 | |
724 | break; | |
725 | ||
8554df1c | 726 | case QUEUE_FOR_SEND_OOS: |
a0d856df | 727 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
8f7bed77 | 728 | req->w.cb = w_send_out_of_sync; |
44a4d551 | 729 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 730 | &req->w); |
73a01a18 PR |
731 | break; |
732 | ||
ea9d6729 | 733 | case READ_RETRY_REMOTE_CANCELED: |
8554df1c | 734 | case SEND_CANCELED: |
8554df1c | 735 | case SEND_FAILED: |
b411b363 PR |
736 | /* real cleanup will be done from tl_clear. just update flags |
737 | * so it is no longer marked as on the worker queue */ | |
a0d856df | 738 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); |
b411b363 PR |
739 | break; |
740 | ||
8554df1c | 741 | case HANDED_OVER_TO_NETWORK: |
b411b363 | 742 | /* assert something? */ |
e5f891b2 | 743 | if (is_pending_write_protocol_A(req)) |
b411b363 PR |
744 | /* this is what is dangerous about protocol A: |
745 | * pretend it was successfully written on the peer. */ | |
e5f891b2 LE |
746 | mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, |
747 | RQ_NET_SENT|RQ_NET_OK); | |
748 | else | |
749 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); | |
750 | /* It is still not yet RQ_NET_DONE until the | |
751 | * corresponding epoch barrier got acked as well, | |
752 | * so we know what to dirty on connection loss. */ | |
6d49e101 LE |
753 | break; |
754 | ||
27a434fe | 755 | case OOS_HANDED_TO_NETWORK: |
6d49e101 LE |
756 | /* Was not set PENDING, no longer QUEUED, so is now DONE |
757 | * as far as this connection is concerned. */ | |
a0d856df | 758 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); |
b411b363 PR |
759 | break; |
760 | ||
8554df1c | 761 | case CONNECTION_LOST_WHILE_PENDING: |
b411b363 | 762 | /* transfer log cleanup after connection loss */ |
a0d856df LE |
763 | mod_rq_state(req, m, |
764 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, | |
765 | RQ_NET_DONE); | |
b411b363 PR |
766 | break; |
767 | ||
d4dabbe2 LE |
768 | case CONFLICT_RESOLVED: |
769 | /* for superseded conflicting writes of multiple primaries, | |
b411b363 | 770 | * there is no need to keep anything in the tl, potential |
934722a2 LE |
771 | * node crashes are covered by the activity log. |
772 | * | |
773 | * If this request had been marked as RQ_POSTPONED before, | |
d4dabbe2 | 774 | * it will actually not be completed, but "restarted", |
934722a2 | 775 | * resubmitted from the retry worker context. */ |
0b0ba1ef AG |
776 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
777 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); | |
934722a2 LE |
778 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); |
779 | break; | |
780 | ||
0afd569a | 781 | case WRITE_ACKED_BY_PEER_AND_SIS: |
934722a2 | 782 | req->rq_state |= RQ_NET_SIS; |
8554df1c | 783 | case WRITE_ACKED_BY_PEER: |
08d0dabf LE |
784 | /* Normal operation protocol C: successfully written on peer. |
785 | * During resync, even in protocol != C, | |
786 | * we requested an explicit write ack anyways. | |
787 | * Which means we cannot even assert anything here. | |
d64957c9 | 788 | * Nothing more to do here. |
b411b363 | 789 | * We want to keep the tl in place for all protocols, to cater |
d64957c9 | 790 | * for volatile write-back caches on lower level devices. */ |
303d1448 | 791 | goto ack_common; |
8554df1c | 792 | case RECV_ACKED_BY_PEER: |
0b0ba1ef | 793 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); |
b411b363 | 794 | /* protocol B; pretends to be successfully written on peer. |
8554df1c | 795 | * see also notes above in HANDED_OVER_TO_NETWORK about |
b411b363 | 796 | * protocol != C */ |
303d1448 | 797 | ack_common: |
a0d856df | 798 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
b411b363 PR |
799 | break; |
800 | ||
7be8da07 | 801 | case POSTPONE_WRITE: |
0b0ba1ef | 802 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
303d1448 | 803 | /* If this node has already detected the write conflict, the |
7be8da07 AG |
804 | * worker will be waiting on misc_wait. Wake it up once this |
805 | * request has completed locally. | |
806 | */ | |
0b0ba1ef | 807 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
7be8da07 | 808 | req->rq_state |= RQ_POSTPONED; |
a0d856df | 809 | if (req->i.waiting) |
b30ab791 | 810 | wake_up(&device->misc_wait); |
a0d856df LE |
811 | /* Do not clear RQ_NET_PENDING. This request will make further |
812 | * progress via restart_conflicting_writes() or | |
813 | * fail_postponed_requests(). Hopefully. */ | |
7be8da07 | 814 | break; |
b411b363 | 815 | |
8554df1c | 816 | case NEG_ACKED: |
46e21bba | 817 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); |
b411b363 PR |
818 | break; |
819 | ||
8554df1c | 820 | case FAIL_FROZEN_DISK_IO: |
265be2d0 PR |
821 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
822 | break; | |
a0d856df | 823 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
265be2d0 PR |
824 | break; |
825 | ||
8554df1c | 826 | case RESTART_FROZEN_DISK_IO: |
265be2d0 PR |
827 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
828 | break; | |
829 | ||
a0d856df LE |
830 | mod_rq_state(req, m, |
831 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, | |
832 | RQ_LOCAL_PENDING); | |
265be2d0 PR |
833 | |
834 | rv = MR_READ; | |
835 | if (bio_data_dir(req->master_bio) == WRITE) | |
836 | rv = MR_WRITE; | |
837 | ||
b30ab791 | 838 | get_ldev(device); /* always succeeds in this call path */ |
265be2d0 | 839 | req->w.cb = w_restart_disk_io; |
44a4d551 | 840 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 841 | &req->w); |
265be2d0 PR |
842 | break; |
843 | ||
8554df1c | 844 | case RESEND: |
509fc019 PR |
845 | /* Simply complete (local only) READs. */ |
846 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { | |
8a0bab2a | 847 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
509fc019 PR |
848 | break; |
849 | } | |
850 | ||
11b58e73 | 851 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
a0d856df LE |
852 | before the connection loss (B&C only); only P_BARRIER_ACK |
853 | (or the local completion?) was missing when we suspended. | |
6870ca6d LE |
854 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
855 | During connection handshake, we ensure that the peer was not rebooted. */ | |
11b58e73 | 856 | if (!(req->rq_state & RQ_NET_OK)) { |
84b8c06b | 857 | /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? |
a0d856df LE |
858 | * in that case we must not set RQ_NET_PENDING. */ |
859 | ||
860 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); | |
11b58e73 | 861 | if (req->w.cb) { |
44a4d551 LE |
862 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
863 | drbd_queue_work(&connection->sender_work, | |
84b8c06b | 864 | &req->w); |
11b58e73 | 865 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
a0d856df | 866 | } /* else: FIXME can this happen? */ |
11b58e73 PR |
867 | break; |
868 | } | |
8554df1c | 869 | /* else, fall through to BARRIER_ACKED */ |
11b58e73 | 870 | |
8554df1c | 871 | case BARRIER_ACKED: |
a0d856df | 872 | /* barrier ack for READ requests does not make sense */ |
288f422e PR |
873 | if (!(req->rq_state & RQ_WRITE)) |
874 | break; | |
875 | ||
b411b363 | 876 | if (req->rq_state & RQ_NET_PENDING) { |
a209b4ae | 877 | /* barrier came in before all requests were acked. |
b411b363 PR |
878 | * this is bad, because if the connection is lost now, |
879 | * we won't be able to clean them up... */ | |
d0180171 | 880 | drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); |
b411b363 | 881 | } |
a0d856df LE |
882 | /* Allowed to complete requests, even while suspended. |
883 | * As this is called for all requests within a matching epoch, | |
884 | * we need to filter, and only set RQ_NET_DONE for those that | |
885 | * have actually been on the wire. */ | |
886 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, | |
887 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); | |
b411b363 PR |
888 | break; |
889 | ||
8554df1c | 890 | case DATA_RECEIVED: |
0b0ba1ef | 891 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
a0d856df | 892 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
b411b363 | 893 | break; |
7074e4a7 LE |
894 | |
895 | case QUEUE_AS_DRBD_BARRIER: | |
44a4d551 | 896 | start_new_tl_epoch(connection); |
7074e4a7 LE |
897 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); |
898 | break; | |
b411b363 | 899 | }; |
2a80699f PR |
900 | |
901 | return rv; | |
b411b363 PR |
902 | } |
903 | ||
904 | /* we may do a local read if: | |
905 | * - we are consistent (of course), | |
906 | * - or we are generally inconsistent, | |
907 | * BUT we are still/already IN SYNC for this area. | |
908 | * since size may be bigger than BM_BLOCK_SIZE, | |
909 | * we may need to check several bits. | |
910 | */ | |
b30ab791 | 911 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) |
b411b363 PR |
912 | { |
913 | unsigned long sbnr, ebnr; | |
914 | sector_t esector, nr_sectors; | |
915 | ||
b30ab791 | 916 | if (device->state.disk == D_UP_TO_DATE) |
0da34df0 | 917 | return true; |
b30ab791 | 918 | if (device->state.disk != D_INCONSISTENT) |
0da34df0 | 919 | return false; |
b411b363 | 920 | esector = sector + (size >> 9) - 1; |
b30ab791 | 921 | nr_sectors = drbd_get_capacity(device->this_bdev); |
0b0ba1ef AG |
922 | D_ASSERT(device, sector < nr_sectors); |
923 | D_ASSERT(device, esector < nr_sectors); | |
b411b363 PR |
924 | |
925 | sbnr = BM_SECT_TO_BIT(sector); | |
926 | ebnr = BM_SECT_TO_BIT(esector); | |
927 | ||
b30ab791 | 928 | return drbd_bm_count_bits(device, sbnr, ebnr) == 0; |
b411b363 PR |
929 | } |
930 | ||
b30ab791 | 931 | static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, |
5da9c836 | 932 | enum drbd_read_balancing rbm) |
380207d0 | 933 | { |
380207d0 | 934 | struct backing_dev_info *bdi; |
d60de03a | 935 | int stripe_shift; |
380207d0 | 936 | |
380207d0 PR |
937 | switch (rbm) { |
938 | case RB_CONGESTED_REMOTE: | |
dc3b17cc | 939 | bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; |
380207d0 PR |
940 | return bdi_read_congested(bdi); |
941 | case RB_LEAST_PENDING: | |
b30ab791 AG |
942 | return atomic_read(&device->local_cnt) > |
943 | atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); | |
d60de03a PR |
944 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
945 | case RB_64K_STRIPING: | |
946 | case RB_128K_STRIPING: | |
947 | case RB_256K_STRIPING: | |
948 | case RB_512K_STRIPING: | |
949 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
950 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
951 | return (sector >> (stripe_shift - 9)) & 1; | |
380207d0 | 952 | case RB_ROUND_ROBIN: |
b30ab791 | 953 | return test_and_change_bit(READ_BALANCE_RR, &device->flags); |
380207d0 PR |
954 | case RB_PREFER_REMOTE: |
955 | return true; | |
956 | case RB_PREFER_LOCAL: | |
957 | default: | |
958 | return false; | |
959 | } | |
960 | } | |
961 | ||
6024fece AG |
962 | /* |
963 | * complete_conflicting_writes - wait for any conflicting write requests | |
964 | * | |
965 | * The write_requests tree contains all active write requests which we | |
966 | * currently know about. Wait for any requests to complete which conflict with | |
967 | * the new one. | |
648e46b5 LE |
968 | * |
969 | * Only way out: remove the conflicting intervals from the tree. | |
6024fece | 970 | */ |
648e46b5 | 971 | static void complete_conflicting_writes(struct drbd_request *req) |
6024fece | 972 | { |
648e46b5 | 973 | DEFINE_WAIT(wait); |
84b8c06b | 974 | struct drbd_device *device = req->device; |
648e46b5 LE |
975 | struct drbd_interval *i; |
976 | sector_t sector = req->i.sector; | |
977 | int size = req->i.size; | |
978 | ||
648e46b5 | 979 | for (;;) { |
1b228c98 LE |
980 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { |
981 | /* Ignore, if already completed to upper layers. */ | |
982 | if (i->completed) | |
983 | continue; | |
984 | /* Handle the first found overlap. After the schedule | |
985 | * we have to restart the tree walk. */ | |
648e46b5 | 986 | break; |
1b228c98 LE |
987 | } |
988 | if (!i) /* if any */ | |
989 | break; | |
990 | ||
648e46b5 | 991 | /* Indicate to wake up device->misc_wait on progress. */ |
1b228c98 | 992 | prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); |
648e46b5 | 993 | i->waiting = true; |
0500813f | 994 | spin_unlock_irq(&device->resource->req_lock); |
648e46b5 | 995 | schedule(); |
0500813f | 996 | spin_lock_irq(&device->resource->req_lock); |
6024fece | 997 | } |
b30ab791 | 998 | finish_wait(&device->misc_wait, &wait); |
b411b363 PR |
999 | } |
1000 | ||
7e5fec31 | 1001 | /* called within req_lock */ |
b30ab791 | 1002 | static void maybe_pull_ahead(struct drbd_device *device) |
0d5934e3 | 1003 | { |
a6b32bc3 | 1004 | struct drbd_connection *connection = first_peer_device(device)->connection; |
5da9c836 LE |
1005 | struct net_conf *nc; |
1006 | bool congested = false; | |
1007 | enum drbd_on_congestion on_congestion; | |
1008 | ||
607f25e5 | 1009 | rcu_read_lock(); |
bde89a9e | 1010 | nc = rcu_dereference(connection->net_conf); |
5da9c836 | 1011 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; |
607f25e5 | 1012 | rcu_read_unlock(); |
5da9c836 | 1013 | if (on_congestion == OC_BLOCK || |
bde89a9e | 1014 | connection->agreed_pro_version < 96) |
3b9ef85e | 1015 | return; |
0d5934e3 | 1016 | |
0c066bc3 LE |
1017 | if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) |
1018 | return; /* nothing to do ... */ | |
1019 | ||
0d5934e3 LE |
1020 | /* If I don't even have good local storage, we can not reasonably try |
1021 | * to pull ahead of the peer. We also need the local reference to make | |
b30ab791 | 1022 | * sure device->act_log is there. |
0d5934e3 | 1023 | */ |
b30ab791 | 1024 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) |
0d5934e3 LE |
1025 | return; |
1026 | ||
5da9c836 | 1027 | if (nc->cong_fill && |
b30ab791 | 1028 | atomic_read(&device->ap_in_flight) >= nc->cong_fill) { |
d0180171 | 1029 | drbd_info(device, "Congestion-fill threshold reached\n"); |
5da9c836 | 1030 | congested = true; |
0d5934e3 LE |
1031 | } |
1032 | ||
b30ab791 | 1033 | if (device->act_log->used >= nc->cong_extents) { |
d0180171 | 1034 | drbd_info(device, "Congestion-extents threshold reached\n"); |
5da9c836 | 1035 | congested = true; |
0d5934e3 LE |
1036 | } |
1037 | ||
1038 | if (congested) { | |
99b4d8fe | 1039 | /* start a new epoch for non-mirrored writes */ |
a6b32bc3 | 1040 | start_new_tl_epoch(first_peer_device(device)->connection); |
0d5934e3 | 1041 | |
5da9c836 | 1042 | if (on_congestion == OC_PULL_AHEAD) |
b30ab791 | 1043 | _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); |
5da9c836 | 1044 | else /*nc->on_congestion == OC_DISCONNECT */ |
b30ab791 | 1045 | _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); |
0d5934e3 | 1046 | } |
b30ab791 | 1047 | put_ldev(device); |
0d5934e3 LE |
1048 | } |
1049 | ||
5da9c836 LE |
1050 | /* If this returns false, and req->private_bio is still set, |
1051 | * this should be submitted locally. | |
1052 | * | |
1053 | * If it returns false, but req->private_bio is not set, | |
1054 | * we do not have access to good data :( | |
1055 | * | |
1056 | * Otherwise, this destroys req->private_bio, if any, | |
1057 | * and returns true. | |
1058 | */ | |
1059 | static bool do_remote_read(struct drbd_request *req) | |
1060 | { | |
84b8c06b | 1061 | struct drbd_device *device = req->device; |
5da9c836 LE |
1062 | enum drbd_read_balancing rbm; |
1063 | ||
1064 | if (req->private_bio) { | |
b30ab791 | 1065 | if (!drbd_may_do_local_read(device, |
5da9c836 LE |
1066 | req->i.sector, req->i.size)) { |
1067 | bio_put(req->private_bio); | |
1068 | req->private_bio = NULL; | |
b30ab791 | 1069 | put_ldev(device); |
5da9c836 LE |
1070 | } |
1071 | } | |
1072 | ||
b30ab791 | 1073 | if (device->state.pdsk != D_UP_TO_DATE) |
5da9c836 LE |
1074 | return false; |
1075 | ||
a0d856df LE |
1076 | if (req->private_bio == NULL) |
1077 | return true; | |
1078 | ||
5da9c836 LE |
1079 | /* TODO: improve read balancing decisions, take into account drbd |
1080 | * protocol, pending requests etc. */ | |
1081 | ||
1082 | rcu_read_lock(); | |
b30ab791 | 1083 | rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; |
5da9c836 LE |
1084 | rcu_read_unlock(); |
1085 | ||
1086 | if (rbm == RB_PREFER_LOCAL && req->private_bio) | |
1087 | return false; /* submit locally */ | |
1088 | ||
b30ab791 | 1089 | if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { |
5da9c836 LE |
1090 | if (req->private_bio) { |
1091 | bio_put(req->private_bio); | |
1092 | req->private_bio = NULL; | |
b30ab791 | 1093 | put_ldev(device); |
5da9c836 LE |
1094 | } |
1095 | return true; | |
1096 | } | |
1097 | ||
1098 | return false; | |
1099 | } | |
1100 | ||
2e9ffde6 AG |
1101 | bool drbd_should_do_remote(union drbd_dev_state s) |
1102 | { | |
1103 | return s.pdsk == D_UP_TO_DATE || | |
1104 | (s.pdsk >= D_INCONSISTENT && | |
1105 | s.conn >= C_WF_BITMAP_T && | |
1106 | s.conn < C_AHEAD); | |
1107 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. | |
1108 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* | |
1109 | states. */ | |
1110 | } | |
1111 | ||
1112 | static bool drbd_should_send_out_of_sync(union drbd_dev_state s) | |
1113 | { | |
1114 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; | |
1115 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary | |
1116 | since we enter state C_AHEAD only if proto >= 96 */ | |
1117 | } | |
1118 | ||
5da9c836 LE |
1119 | /* returns number of connections (== 1, for drbd 8.4) |
1120 | * expected to actually write this data, | |
1121 | * which does NOT include those that we are L_AHEAD for. */ | |
1122 | static int drbd_process_write_request(struct drbd_request *req) | |
1123 | { | |
84b8c06b | 1124 | struct drbd_device *device = req->device; |
5da9c836 LE |
1125 | int remote, send_oos; |
1126 | ||
b30ab791 AG |
1127 | remote = drbd_should_do_remote(device->state); |
1128 | send_oos = drbd_should_send_out_of_sync(device->state); | |
5da9c836 | 1129 | |
519b6d3e LE |
1130 | /* Need to replicate writes. Unless it is an empty flush, |
1131 | * which is better mapped to a DRBD P_BARRIER packet, | |
1132 | * also for drbd wire protocol compatibility reasons. | |
1133 | * If this was a flush, just start a new epoch. | |
1134 | * Unless the current epoch was empty anyways, or we are not currently | |
1135 | * replicating, in which case there is no point. */ | |
1136 | if (unlikely(req->i.size == 0)) { | |
1137 | /* The only size==0 bios we expect are empty flushes. */ | |
1eff9d32 | 1138 | D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); |
99b4d8fe | 1139 | if (remote) |
7074e4a7 LE |
1140 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
1141 | return remote; | |
519b6d3e LE |
1142 | } |
1143 | ||
5da9c836 LE |
1144 | if (!remote && !send_oos) |
1145 | return 0; | |
1146 | ||
0b0ba1ef | 1147 | D_ASSERT(device, !(remote && send_oos)); |
5da9c836 LE |
1148 | |
1149 | if (remote) { | |
1150 | _req_mod(req, TO_BE_SENT); | |
1151 | _req_mod(req, QUEUE_FOR_NET_WRITE); | |
b30ab791 | 1152 | } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) |
5da9c836 LE |
1153 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
1154 | ||
1155 | return remote; | |
1156 | } | |
1157 | ||
7435e901 LE |
1158 | static void drbd_process_discard_req(struct drbd_request *req) |
1159 | { | |
0dbed96a | 1160 | struct block_device *bdev = req->device->ldev->backing_bdev; |
7435e901 | 1161 | |
0dbed96a CH |
1162 | if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, |
1163 | GFP_NOIO, 0)) | |
4e4cbee9 | 1164 | req->private_bio->bi_status = BLK_STS_IOERR; |
7435e901 LE |
1165 | bio_endio(req->private_bio); |
1166 | } | |
1167 | ||
5da9c836 LE |
1168 | static void |
1169 | drbd_submit_req_private_bio(struct drbd_request *req) | |
1170 | { | |
84b8c06b | 1171 | struct drbd_device *device = req->device; |
5da9c836 | 1172 | struct bio *bio = req->private_bio; |
70246286 CH |
1173 | unsigned int type; |
1174 | ||
1175 | if (bio_op(bio) != REQ_OP_READ) | |
1176 | type = DRBD_FAULT_DT_WR; | |
1eff9d32 | 1177 | else if (bio->bi_opf & REQ_RAHEAD) |
70246286 CH |
1178 | type = DRBD_FAULT_DT_RA; |
1179 | else | |
1180 | type = DRBD_FAULT_DT_RD; | |
5da9c836 | 1181 | |
74d46992 | 1182 | bio_set_dev(bio, device->ldev->backing_bdev); |
5da9c836 LE |
1183 | |
1184 | /* State may have changed since we grabbed our reference on the | |
1185 | * ->ldev member. Double check, and short-circuit to endio. | |
1186 | * In case the last activity log transaction failed to get on | |
1187 | * stable storage, and this is a WRITE, we may not even submit | |
1188 | * this bio. */ | |
b30ab791 | 1189 | if (get_ldev(device)) { |
70246286 | 1190 | if (drbd_insert_fault(device, type)) |
4246a0b6 | 1191 | bio_io_error(bio); |
45c21793 CH |
1192 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES || |
1193 | bio_op(bio) == REQ_OP_DISCARD) | |
7435e901 | 1194 | drbd_process_discard_req(req); |
5da9c836 LE |
1195 | else |
1196 | generic_make_request(bio); | |
b30ab791 | 1197 | put_ldev(device); |
5da9c836 | 1198 | } else |
4246a0b6 | 1199 | bio_io_error(bio); |
5da9c836 LE |
1200 | } |
1201 | ||
b30ab791 | 1202 | static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) |
779b3fe4 | 1203 | { |
844a6ae7 | 1204 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 1205 | list_add_tail(&req->tl_requests, &device->submit.writes); |
844a6ae7 LE |
1206 | list_add_tail(&req->req_pending_master_completion, |
1207 | &device->pending_master_completion[1 /* WRITE */]); | |
1208 | spin_unlock_irq(&device->resource->req_lock); | |
b30ab791 | 1209 | queue_work(device->submit.wq, &device->submit.worker); |
f5b90b6b LE |
1210 | /* do_submit() may sleep internally on al_wait, too */ |
1211 | wake_up(&device->al_wait); | |
779b3fe4 LE |
1212 | } |
1213 | ||
6d9febe2 LE |
1214 | /* returns the new drbd_request pointer, if the caller is expected to |
1215 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the | |
1216 | * request on the submitter thread. | |
1217 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. | |
1218 | */ | |
01cd2636 | 1219 | static struct drbd_request * |
e5f891b2 | 1220 | drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
b411b363 | 1221 | { |
6d9febe2 | 1222 | const int rw = bio_data_dir(bio); |
b411b363 | 1223 | struct drbd_request *req; |
b411b363 PR |
1224 | |
1225 | /* allocate outside of all locks; */ | |
b30ab791 | 1226 | req = drbd_req_new(device, bio); |
b411b363 | 1227 | if (!req) { |
b30ab791 | 1228 | dec_ap_bio(device); |
b411b363 PR |
1229 | /* only pass the error to the upper layers. |
1230 | * if user cannot handle io errors, that's not our business. */ | |
d0180171 | 1231 | drbd_err(device, "could not kmalloc() req\n"); |
4e4cbee9 | 1232 | bio->bi_status = BLK_STS_RESOURCE; |
4246a0b6 | 1233 | bio_endio(bio); |
6d9febe2 | 1234 | return ERR_PTR(-ENOMEM); |
b411b363 | 1235 | } |
e5f891b2 | 1236 | req->start_jif = start_jif; |
b411b363 | 1237 | |
b30ab791 | 1238 | if (!get_ldev(device)) { |
5da9c836 | 1239 | bio_put(req->private_bio); |
b411b363 PR |
1240 | req->private_bio = NULL; |
1241 | } | |
b411b363 | 1242 | |
7e8c288f | 1243 | /* Update disk stats */ |
b30ab791 | 1244 | _drbd_start_io_acct(device, req); |
7e8c288f | 1245 | |
7435e901 | 1246 | /* process discards always from our submitter thread */ |
45c21793 CH |
1247 | if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || |
1248 | (bio_op(bio) & REQ_OP_DISCARD)) | |
7435e901 LE |
1249 | goto queue_for_submitter_thread; |
1250 | ||
519b6d3e | 1251 | if (rw == WRITE && req->private_bio && req->i.size |
b30ab791 | 1252 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
7435e901 LE |
1253 | if (!drbd_al_begin_io_fastpath(device, &req->i)) |
1254 | goto queue_for_submitter_thread; | |
0778286a | 1255 | req->rq_state |= RQ_IN_ACT_LOG; |
e5f891b2 | 1256 | req->in_actlog_jif = jiffies; |
0778286a | 1257 | } |
6d9febe2 | 1258 | return req; |
7435e901 LE |
1259 | |
1260 | queue_for_submitter_thread: | |
1261 | atomic_inc(&device->ap_actlog_cnt); | |
1262 | drbd_queue_write(device, req); | |
1263 | return NULL; | |
6d9febe2 LE |
1264 | } |
1265 | ||
0ead5cca LE |
1266 | /* Require at least one path to current data. |
1267 | * We don't want to allow writes on C_STANDALONE D_INCONSISTENT: | |
1268 | * We would not allow to read what was written, | |
1269 | * we would not have bumped the data generation uuids, | |
1270 | * we would cause data divergence for all the wrong reasons. | |
1271 | * | |
1272 | * If we don't see at least one D_UP_TO_DATE, we will fail this request, | |
1273 | * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO, | |
1274 | * and queues for retry later. | |
1275 | */ | |
1276 | static bool may_do_writes(struct drbd_device *device) | |
1277 | { | |
1278 | const union drbd_dev_state s = device->state; | |
1279 | return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; | |
1280 | } | |
1281 | ||
c51a0ef3 LE |
1282 | struct drbd_plug_cb { |
1283 | struct blk_plug_cb cb; | |
1284 | struct drbd_request *most_recent_req; | |
1285 | /* do we need more? */ | |
1286 | }; | |
1287 | ||
1288 | static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule) | |
1289 | { | |
1290 | struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb); | |
1291 | struct drbd_resource *resource = plug->cb.data; | |
1292 | struct drbd_request *req = plug->most_recent_req; | |
1293 | ||
de6978be | 1294 | kfree(cb); |
c51a0ef3 LE |
1295 | if (!req) |
1296 | return; | |
1297 | ||
1298 | spin_lock_irq(&resource->req_lock); | |
1299 | /* In case the sender did not process it yet, raise the flag to | |
1300 | * have it followed with P_UNPLUG_REMOTE just after. */ | |
1301 | req->rq_state |= RQ_UNPLUG; | |
1302 | /* but also queue a generic unplug */ | |
1303 | drbd_queue_unplug(req->device); | |
c51a0ef3 | 1304 | kref_put(&req->kref, drbd_req_destroy); |
de6978be | 1305 | spin_unlock_irq(&resource->req_lock); |
c51a0ef3 LE |
1306 | } |
1307 | ||
1308 | static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource) | |
1309 | { | |
1310 | /* A lot of text to say | |
1311 | * return (struct drbd_plug_cb*)blk_check_plugged(); */ | |
1312 | struct drbd_plug_cb *plug; | |
1313 | struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug)); | |
1314 | ||
1315 | if (cb) | |
1316 | plug = container_of(cb, struct drbd_plug_cb, cb); | |
1317 | else | |
1318 | plug = NULL; | |
1319 | return plug; | |
1320 | } | |
1321 | ||
1322 | static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) | |
1323 | { | |
1324 | struct drbd_request *tmp = plug->most_recent_req; | |
1325 | /* Will be sent to some peer. | |
1326 | * Remember to tag it with UNPLUG_REMOTE on unplug */ | |
1327 | kref_get(&req->kref); | |
1328 | plug->most_recent_req = req; | |
1329 | if (tmp) | |
1330 | kref_put(&tmp->kref, drbd_req_destroy); | |
1331 | } | |
1332 | ||
b30ab791 | 1333 | static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) |
6d9febe2 | 1334 | { |
35b5ed5b | 1335 | struct drbd_resource *resource = device->resource; |
70246286 | 1336 | const int rw = bio_data_dir(req->master_bio); |
6d9febe2 LE |
1337 | struct bio_and_error m = { NULL, }; |
1338 | bool no_remote = false; | |
35b5ed5b | 1339 | bool submit_private_bio = false; |
6d9febe2 | 1340 | |
35b5ed5b | 1341 | spin_lock_irq(&resource->req_lock); |
6024fece | 1342 | if (rw == WRITE) { |
648e46b5 LE |
1343 | /* This may temporarily give up the req_lock, |
1344 | * but will re-aquire it before it returns here. | |
1345 | * Needs to be before the check on drbd_suspended() */ | |
1346 | complete_conflicting_writes(req); | |
607f25e5 LE |
1347 | /* no more giving up req_lock from now on! */ |
1348 | ||
1349 | /* check for congestion, and potentially stop sending | |
1350 | * full data updates, but start sending "dirty bits" only. */ | |
b30ab791 | 1351 | maybe_pull_ahead(device); |
b411b363 PR |
1352 | } |
1353 | ||
9a25a04c | 1354 | |
b30ab791 | 1355 | if (drbd_suspended(device)) { |
5da9c836 LE |
1356 | /* push back and retry: */ |
1357 | req->rq_state |= RQ_POSTPONED; | |
1358 | if (req->private_bio) { | |
1359 | bio_put(req->private_bio); | |
1360 | req->private_bio = NULL; | |
b30ab791 | 1361 | put_ldev(device); |
b411b363 | 1362 | } |
5da9c836 | 1363 | goto out; |
b411b363 PR |
1364 | } |
1365 | ||
70246286 | 1366 | /* We fail READ early, if we can not serve it. |
5da9c836 | 1367 | * We must do this before req is registered on any lists. |
a0d856df | 1368 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ |
5da9c836 LE |
1369 | if (rw != WRITE) { |
1370 | if (!do_remote_read(req) && !req->private_bio) | |
1371 | goto nodata; | |
b411b363 PR |
1372 | } |
1373 | ||
b6dd1a89 | 1374 | /* which transfer log epoch does this belong to? */ |
a6b32bc3 | 1375 | req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); |
288f422e | 1376 | |
227f052f LE |
1377 | /* no point in adding empty flushes to the transfer log, |
1378 | * they are mapped to drbd barriers already. */ | |
99b4d8fe LE |
1379 | if (likely(req->i.size!=0)) { |
1380 | if (rw == WRITE) | |
a6b32bc3 | 1381 | first_peer_device(device)->connection->current_tle_writes++; |
288f422e | 1382 | |
a6b32bc3 | 1383 | list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); |
b411b363 | 1384 | } |
67531718 | 1385 | |
5da9c836 | 1386 | if (rw == WRITE) { |
0ead5cca LE |
1387 | if (req->private_bio && !may_do_writes(device)) { |
1388 | bio_put(req->private_bio); | |
1389 | req->private_bio = NULL; | |
1390 | put_ldev(device); | |
1391 | goto nodata; | |
1392 | } | |
5da9c836 LE |
1393 | if (!drbd_process_write_request(req)) |
1394 | no_remote = true; | |
1395 | } else { | |
1396 | /* We either have a private_bio, or we can read from remote. | |
1397 | * Otherwise we had done the goto nodata above. */ | |
1398 | if (req->private_bio == NULL) { | |
1399 | _req_mod(req, TO_BE_SENT); | |
1400 | _req_mod(req, QUEUE_FOR_NET_READ); | |
6719fb03 | 1401 | } else |
5da9c836 | 1402 | no_remote = true; |
b411b363 PR |
1403 | } |
1404 | ||
de6978be LE |
1405 | if (no_remote == false) { |
1406 | struct drbd_plug_cb *plug = drbd_check_plugged(resource); | |
1407 | if (plug) | |
1408 | drbd_update_plug(plug, req); | |
1409 | } | |
c51a0ef3 | 1410 | |
844a6ae7 LE |
1411 | /* If it took the fast path in drbd_request_prepare, add it here. |
1412 | * The slow path has added it already. */ | |
1413 | if (list_empty(&req->req_pending_master_completion)) | |
1414 | list_add_tail(&req->req_pending_master_completion, | |
1415 | &device->pending_master_completion[rw == WRITE]); | |
5da9c836 LE |
1416 | if (req->private_bio) { |
1417 | /* needs to be marked within the same spinlock */ | |
05cbbb39 | 1418 | req->pre_submit_jif = jiffies; |
844a6ae7 LE |
1419 | list_add_tail(&req->req_pending_local, |
1420 | &device->pending_completion[rw == WRITE]); | |
5da9c836 LE |
1421 | _req_mod(req, TO_BE_SUBMITTED); |
1422 | /* but we need to give up the spinlock to submit */ | |
35b5ed5b | 1423 | submit_private_bio = true; |
5da9c836 LE |
1424 | } else if (no_remote) { |
1425 | nodata: | |
1426 | if (__ratelimit(&drbd_ratelimit_state)) | |
d0180171 | 1427 | drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", |
42839f65 | 1428 | (unsigned long long)req->i.sector, req->i.size >> 9); |
5da9c836 | 1429 | /* A write may have been queued for send_oos, however. |
a0d856df | 1430 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ |
b411b363 | 1431 | } |
b411b363 | 1432 | |
5da9c836 | 1433 | out: |
a00ebd1c | 1434 | drbd_req_put_completion_ref(req, &m, 1); |
35b5ed5b LE |
1435 | spin_unlock_irq(&resource->req_lock); |
1436 | ||
1437 | /* Even though above is a kref_put(), this is safe. | |
1438 | * As long as we still need to submit our private bio, | |
1439 | * we hold a completion ref, and the request cannot disappear. | |
1440 | * If however this request did not even have a private bio to submit | |
1441 | * (e.g. remote read), req may already be invalid now. | |
1442 | * That's why we cannot check on req->private_bio. */ | |
1443 | if (submit_private_bio) | |
1444 | drbd_submit_req_private_bio(req); | |
5da9c836 | 1445 | if (m.bio) |
b30ab791 | 1446 | complete_master_bio(device, &m); |
6d9febe2 LE |
1447 | } |
1448 | ||
e5f891b2 | 1449 | void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
6d9febe2 | 1450 | { |
e5f891b2 | 1451 | struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); |
6d9febe2 LE |
1452 | if (IS_ERR_OR_NULL(req)) |
1453 | return; | |
b30ab791 | 1454 | drbd_send_and_submit(device, req); |
b411b363 PR |
1455 | } |
1456 | ||
b30ab791 | 1457 | static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) |
113fef9e | 1458 | { |
de6978be | 1459 | struct blk_plug plug; |
08a1ddab | 1460 | struct drbd_request *req, *tmp; |
de6978be LE |
1461 | |
1462 | blk_start_plug(&plug); | |
08a1ddab LE |
1463 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { |
1464 | const int rw = bio_data_dir(req->master_bio); | |
113fef9e | 1465 | |
08a1ddab LE |
1466 | if (rw == WRITE /* rw != WRITE should not even end up here! */ |
1467 | && req->private_bio && req->i.size | |
b30ab791 AG |
1468 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
1469 | if (!drbd_al_begin_io_fastpath(device, &req->i)) | |
08a1ddab LE |
1470 | continue; |
1471 | ||
1472 | req->rq_state |= RQ_IN_ACT_LOG; | |
e5f891b2 | 1473 | req->in_actlog_jif = jiffies; |
ad3fee79 | 1474 | atomic_dec(&device->ap_actlog_cnt); |
08a1ddab LE |
1475 | } |
1476 | ||
1477 | list_del_init(&req->tl_requests); | |
b30ab791 | 1478 | drbd_send_and_submit(device, req); |
113fef9e | 1479 | } |
de6978be | 1480 | blk_finish_plug(&plug); |
113fef9e LE |
1481 | } |
1482 | ||
b30ab791 | 1483 | static bool prepare_al_transaction_nonblock(struct drbd_device *device, |
08a1ddab | 1484 | struct list_head *incoming, |
f5b90b6b LE |
1485 | struct list_head *pending, |
1486 | struct list_head *later) | |
08a1ddab | 1487 | { |
9da10e8d | 1488 | struct drbd_request *req; |
08a1ddab LE |
1489 | int wake = 0; |
1490 | int err; | |
1491 | ||
b30ab791 | 1492 | spin_lock_irq(&device->al_lock); |
9da10e8d | 1493 | while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { |
b30ab791 | 1494 | err = drbd_al_begin_io_nonblock(device, &req->i); |
f5b90b6b LE |
1495 | if (err == -ENOBUFS) |
1496 | break; | |
08a1ddab LE |
1497 | if (err == -EBUSY) |
1498 | wake = 1; | |
1499 | if (err) | |
f5b90b6b LE |
1500 | list_move_tail(&req->tl_requests, later); |
1501 | else | |
1502 | list_move_tail(&req->tl_requests, pending); | |
08a1ddab | 1503 | } |
b30ab791 | 1504 | spin_unlock_irq(&device->al_lock); |
08a1ddab | 1505 | if (wake) |
b30ab791 | 1506 | wake_up(&device->al_wait); |
08a1ddab LE |
1507 | return !list_empty(pending); |
1508 | } | |
113fef9e | 1509 | |
de6978be | 1510 | static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) |
f5b90b6b | 1511 | { |
de6978be | 1512 | struct blk_plug plug; |
9da10e8d | 1513 | struct drbd_request *req; |
f5b90b6b | 1514 | |
de6978be | 1515 | blk_start_plug(&plug); |
9da10e8d | 1516 | while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { |
f5b90b6b LE |
1517 | req->rq_state |= RQ_IN_ACT_LOG; |
1518 | req->in_actlog_jif = jiffies; | |
1519 | atomic_dec(&device->ap_actlog_cnt); | |
1520 | list_del_init(&req->tl_requests); | |
1521 | drbd_send_and_submit(device, req); | |
1522 | } | |
de6978be | 1523 | blk_finish_plug(&plug); |
f5b90b6b LE |
1524 | } |
1525 | ||
113fef9e LE |
1526 | void do_submit(struct work_struct *ws) |
1527 | { | |
b30ab791 | 1528 | struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); |
f5b90b6b LE |
1529 | LIST_HEAD(incoming); /* from drbd_make_request() */ |
1530 | LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */ | |
1531 | LIST_HEAD(busy); /* blocked by resync requests */ | |
1532 | ||
1533 | /* grab new incoming requests */ | |
1534 | spin_lock_irq(&device->resource->req_lock); | |
1535 | list_splice_tail_init(&device->submit.writes, &incoming); | |
1536 | spin_unlock_irq(&device->resource->req_lock); | |
113fef9e | 1537 | |
08a1ddab | 1538 | for (;;) { |
f5b90b6b | 1539 | DEFINE_WAIT(wait); |
113fef9e | 1540 | |
f5b90b6b LE |
1541 | /* move used-to-be-busy back to front of incoming */ |
1542 | list_splice_init(&busy, &incoming); | |
b30ab791 | 1543 | submit_fast_path(device, &incoming); |
08a1ddab LE |
1544 | if (list_empty(&incoming)) |
1545 | break; | |
1546 | ||
45ad07b3 | 1547 | for (;;) { |
f5b90b6b LE |
1548 | prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); |
1549 | ||
1550 | list_splice_init(&busy, &incoming); | |
1551 | prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); | |
1552 | if (!list_empty(&pending)) | |
1553 | break; | |
1554 | ||
1555 | schedule(); | |
1556 | ||
1557 | /* If all currently "hot" activity log extents are kept busy by | |
1558 | * incoming requests, we still must not totally starve new | |
1559 | * requests to "cold" extents. | |
1560 | * Something left on &incoming means there had not been | |
1561 | * enough update slots available, and the activity log | |
1562 | * has been marked as "starving". | |
1563 | * | |
1564 | * Try again now, without looking for new requests, | |
1565 | * effectively blocking all new requests until we made | |
1566 | * at least _some_ progress with what we currently have. | |
1567 | */ | |
1568 | if (!list_empty(&incoming)) | |
1569 | continue; | |
1570 | ||
1571 | /* Nothing moved to pending, but nothing left | |
1572 | * on incoming: all moved to busy! | |
1573 | * Grab new and iterate. */ | |
1574 | spin_lock_irq(&device->resource->req_lock); | |
1575 | list_splice_tail_init(&device->submit.writes, &incoming); | |
1576 | spin_unlock_irq(&device->resource->req_lock); | |
1577 | } | |
1578 | finish_wait(&device->al_wait, &wait); | |
1579 | ||
1580 | /* If the transaction was full, before all incoming requests | |
1581 | * had been processed, skip ahead to commit, and iterate | |
1582 | * without splicing in more incoming requests from upper layers. | |
1583 | * | |
1584 | * Else, if all incoming have been processed, | |
1585 | * they have become either "pending" (to be submitted after | |
1586 | * next transaction commit) or "busy" (blocked by resync). | |
1587 | * | |
1588 | * Maybe more was queued, while we prepared the transaction? | |
1589 | * Try to stuff those into this transaction as well. | |
1590 | * Be strictly non-blocking here, | |
1591 | * we already have something to commit. | |
1592 | * | |
1593 | * Commit if we don't make any more progres. | |
1594 | */ | |
1595 | ||
1596 | while (list_empty(&incoming)) { | |
45ad07b3 LE |
1597 | LIST_HEAD(more_pending); |
1598 | LIST_HEAD(more_incoming); | |
1599 | bool made_progress; | |
1600 | ||
1601 | /* It is ok to look outside the lock, | |
1602 | * it's only an optimization anyways */ | |
b30ab791 | 1603 | if (list_empty(&device->submit.writes)) |
45ad07b3 LE |
1604 | break; |
1605 | ||
844a6ae7 | 1606 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 1607 | list_splice_tail_init(&device->submit.writes, &more_incoming); |
844a6ae7 | 1608 | spin_unlock_irq(&device->resource->req_lock); |
45ad07b3 LE |
1609 | |
1610 | if (list_empty(&more_incoming)) | |
1611 | break; | |
1612 | ||
f5b90b6b | 1613 | made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); |
45ad07b3 LE |
1614 | |
1615 | list_splice_tail_init(&more_pending, &pending); | |
1616 | list_splice_tail_init(&more_incoming, &incoming); | |
45ad07b3 LE |
1617 | if (!made_progress) |
1618 | break; | |
1619 | } | |
08a1ddab | 1620 | |
f5b90b6b LE |
1621 | drbd_al_begin_io_commit(device); |
1622 | send_and_submit_pending(device, &pending); | |
113fef9e LE |
1623 | } |
1624 | } | |
1625 | ||
dece1635 | 1626 | blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) |
b411b363 | 1627 | { |
b30ab791 | 1628 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
e5f891b2 | 1629 | unsigned long start_jif; |
b411b363 | 1630 | |
af67c31f | 1631 | blk_queue_split(q, &bio); |
54efd50b | 1632 | |
e5f891b2 | 1633 | start_jif = jiffies; |
aeda1cd6 | 1634 | |
b411b363 PR |
1635 | /* |
1636 | * what we "blindly" assume: | |
1637 | */ | |
0b0ba1ef | 1638 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); |
b411b363 | 1639 | |
b30ab791 | 1640 | inc_ap_bio(device); |
e5f891b2 | 1641 | __drbd_make_request(device, bio, start_jif); |
dece1635 | 1642 | return BLK_QC_T_NONE; |
b411b363 PR |
1643 | } |
1644 | ||
84d34f2f LE |
1645 | static bool net_timeout_reached(struct drbd_request *net_req, |
1646 | struct drbd_connection *connection, | |
1647 | unsigned long now, unsigned long ent, | |
1648 | unsigned int ko_count, unsigned int timeout) | |
1649 | { | |
1650 | struct drbd_device *device = net_req->device; | |
1651 | ||
1652 | if (!time_after(now, net_req->pre_send_jif + ent)) | |
1653 | return false; | |
1654 | ||
1655 | if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) | |
1656 | return false; | |
1657 | ||
1658 | if (net_req->rq_state & RQ_NET_PENDING) { | |
1659 | drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", | |
1660 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); | |
1661 | return true; | |
1662 | } | |
1663 | ||
1664 | /* We received an ACK already (or are using protocol A), | |
1665 | * but are waiting for the epoch closing barrier ack. | |
1666 | * Check if we sent the barrier already. We should not blame the peer | |
1667 | * for being unresponsive, if we did not even ask it yet. */ | |
1668 | if (net_req->epoch == connection->send.current_epoch_nr) { | |
1669 | drbd_warn(device, | |
1670 | "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n", | |
1671 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); | |
1672 | return false; | |
1673 | } | |
1674 | ||
1675 | /* Worst case: we may have been blocked for whatever reason, then | |
1676 | * suddenly are able to send a lot of requests (and epoch separating | |
1677 | * barriers) in quick succession. | |
1678 | * The timestamp of the net_req may be much too old and not correspond | |
1679 | * to the sending time of the relevant unack'ed barrier packet, so | |
1680 | * would trigger a spurious timeout. The latest barrier packet may | |
1681 | * have a too recent timestamp to trigger the timeout, potentially miss | |
1682 | * a timeout. Right now we don't have a place to conveniently store | |
1683 | * these timestamps. | |
1684 | * But in this particular situation, the application requests are still | |
1685 | * completed to upper layers, DRBD should still "feel" responsive. | |
1686 | * No need yet to kill this connection, it may still recover. | |
1687 | * If not, eventually we will have queued enough into the network for | |
1688 | * us to block. From that point of view, the timestamp of the last sent | |
1689 | * barrier packet is relevant enough. | |
1690 | */ | |
1691 | if (time_after(now, connection->send.last_sent_barrier_jif + ent)) { | |
1692 | drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", | |
1693 | connection->send.last_sent_barrier_jif, now, | |
1694 | jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout); | |
1695 | return true; | |
1696 | } | |
1697 | return false; | |
1698 | } | |
1699 | ||
1700 | /* A request is considered timed out, if | |
1701 | * - we have some effective timeout from the configuration, | |
1702 | * with some state restrictions applied, | |
1703 | * - the oldest request is waiting for a response from the network | |
1704 | * resp. the local disk, | |
1705 | * - the oldest request is in fact older than the effective timeout, | |
1706 | * - the connection was established (resp. disk was attached) | |
1707 | * for longer than the timeout already. | |
1708 | * Note that for 32bit jiffies and very stable connections/disks, | |
1709 | * we may have a wrap around, which is catched by | |
1710 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). | |
1711 | * | |
1712 | * Side effect: once per 32bit wrap-around interval, which means every | |
1713 | * ~198 days with 250 HZ, we have a window where the timeout would need | |
1714 | * to expire twice (worst case) to become effective. Good enough. | |
1715 | */ | |
1716 | ||
2bccef39 | 1717 | void request_timer_fn(struct timer_list *t) |
7fde2be9 | 1718 | { |
2bccef39 | 1719 | struct drbd_device *device = from_timer(device, t, request_timer); |
a6b32bc3 | 1720 | struct drbd_connection *connection = first_peer_device(device)->connection; |
7753a4c1 | 1721 | struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ |
44ed167d | 1722 | struct net_conf *nc; |
7753a4c1 | 1723 | unsigned long oldest_submit_jif; |
dfa8bedb | 1724 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
ba280c09 | 1725 | unsigned long now; |
84d34f2f | 1726 | unsigned int ko_count = 0, timeout = 0; |
7fde2be9 | 1727 | |
44ed167d | 1728 | rcu_read_lock(); |
bde89a9e | 1729 | nc = rcu_dereference(connection->net_conf); |
84d34f2f LE |
1730 | if (nc && device->state.conn >= C_WF_REPORT_PARAMS) { |
1731 | ko_count = nc->ko_count; | |
1732 | timeout = nc->timeout; | |
1733 | } | |
cdfda633 | 1734 | |
b30ab791 AG |
1735 | if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ |
1736 | dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; | |
1737 | put_ldev(device); | |
dfa8bedb | 1738 | } |
44ed167d | 1739 | rcu_read_unlock(); |
7fde2be9 | 1740 | |
84d34f2f LE |
1741 | |
1742 | ent = timeout * HZ/10 * ko_count; | |
dfa8bedb PR |
1743 | et = min_not_zero(dt, ent); |
1744 | ||
ba280c09 | 1745 | if (!et) |
7fde2be9 PR |
1746 | return; /* Recurring timer stopped */ |
1747 | ||
ba280c09 | 1748 | now = jiffies; |
7753a4c1 | 1749 | nt = now + et; |
ba280c09 | 1750 | |
0500813f | 1751 | spin_lock_irq(&device->resource->req_lock); |
7753a4c1 LE |
1752 | req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local); |
1753 | req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local); | |
84d34f2f | 1754 | |
7753a4c1 | 1755 | /* maybe the oldest request waiting for the peer is in fact still |
84d34f2f LE |
1756 | * blocking in tcp sendmsg. That's ok, though, that's handled via the |
1757 | * socket send timeout, requesting a ping, and bumping ko-count in | |
1758 | * we_should_drop_the_connection(). | |
1759 | */ | |
1760 | ||
1761 | /* check the oldest request we did successfully sent, | |
1762 | * but which is still waiting for an ACK. */ | |
1763 | req_peer = connection->req_ack_pending; | |
1764 | ||
1765 | /* if we don't have such request (e.g. protocoll A) | |
1766 | * check the oldest requests which is still waiting on its epoch | |
1767 | * closing barrier ack. */ | |
1768 | if (!req_peer) | |
1769 | req_peer = connection->req_not_net_done; | |
7753a4c1 LE |
1770 | |
1771 | /* evaluate the oldest peer request only in one timer! */ | |
1772 | if (req_peer && req_peer->device != device) | |
1773 | req_peer = NULL; | |
1774 | ||
1775 | /* do we have something to evaluate? */ | |
1776 | if (req_peer == NULL && req_write == NULL && req_read == NULL) | |
1777 | goto out; | |
1778 | ||
1779 | oldest_submit_jif = | |
1780 | (req_write && req_read) | |
1781 | ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif) | |
1782 | ? req_write->pre_submit_jif : req_read->pre_submit_jif ) | |
1783 | : req_write ? req_write->pre_submit_jif | |
1784 | : req_read ? req_read->pre_submit_jif : now; | |
7fde2be9 | 1785 | |
84d34f2f | 1786 | if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout)) |
9581f97a | 1787 | _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD); |
84d34f2f | 1788 | |
7753a4c1 LE |
1789 | if (dt && oldest_submit_jif != now && |
1790 | time_after(now, oldest_submit_jif + dt) && | |
b30ab791 | 1791 | !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { |
d0180171 | 1792 | drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); |
b30ab791 | 1793 | __drbd_chk_io_error(device, DRBD_FORCE_DETACH); |
dfa8bedb | 1794 | } |
08535466 LE |
1795 | |
1796 | /* Reschedule timer for the nearest not already expired timeout. | |
1797 | * Fallback to now + min(effective network timeout, disk timeout). */ | |
7753a4c1 LE |
1798 | ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) |
1799 | ? req_peer->pre_send_jif + ent : now + et; | |
1800 | dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt)) | |
1801 | ? oldest_submit_jif + dt : now + et; | |
08535466 | 1802 | nt = time_before(ent, dt) ? ent : dt; |
7753a4c1 | 1803 | out: |
8d4ba3f0 | 1804 | spin_unlock_irq(&device->resource->req_lock); |
b30ab791 | 1805 | mod_timer(&device->request_timer, nt); |
7fde2be9 | 1806 | } |