]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
26 | #include <linux/module.h> | |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
31 | #include "drbd_req.h" | |
32 | ||
33 | ||
34 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); | |
35 | ||
36 | /* Update disk stats at start of I/O request */ | |
37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) | |
38 | { | |
39 | generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, | |
40 | &device->vdisk->part0); | |
41 | } | |
42 | ||
43 | /* Update disk stats when completing request upwards */ | |
44 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) | |
45 | { | |
46 | generic_end_io_acct(bio_data_dir(req->master_bio), | |
47 | &device->vdisk->part0, req->start_jif); | |
48 | } | |
49 | ||
50 | static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src) | |
51 | { | |
52 | struct drbd_request *req; | |
53 | ||
54 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); | |
55 | if (!req) | |
56 | return NULL; | |
57 | memset(req, 0, sizeof(*req)); | |
58 | ||
59 | drbd_req_make_private_bio(req, bio_src); | |
60 | req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) | |
61 | | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0) | |
62 | | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); | |
63 | req->device = device; | |
64 | req->master_bio = bio_src; | |
65 | req->epoch = 0; | |
66 | ||
67 | drbd_clear_interval(&req->i); | |
68 | req->i.sector = bio_src->bi_iter.bi_sector; | |
69 | req->i.size = bio_src->bi_iter.bi_size; | |
70 | req->i.local = true; | |
71 | req->i.waiting = false; | |
72 | ||
73 | INIT_LIST_HEAD(&req->tl_requests); | |
74 | INIT_LIST_HEAD(&req->w.list); | |
75 | INIT_LIST_HEAD(&req->req_pending_master_completion); | |
76 | INIT_LIST_HEAD(&req->req_pending_local); | |
77 | ||
78 | /* one reference to be put by __drbd_make_request */ | |
79 | atomic_set(&req->completion_ref, 1); | |
80 | /* one kref as long as completion_ref > 0 */ | |
81 | kref_init(&req->kref); | |
82 | return req; | |
83 | } | |
84 | ||
85 | static void drbd_remove_request_interval(struct rb_root *root, | |
86 | struct drbd_request *req) | |
87 | { | |
88 | struct drbd_device *device = req->device; | |
89 | struct drbd_interval *i = &req->i; | |
90 | ||
91 | drbd_remove_interval(root, i); | |
92 | ||
93 | /* Wake up any processes waiting for this request to complete. */ | |
94 | if (i->waiting) | |
95 | wake_up(&device->misc_wait); | |
96 | } | |
97 | ||
98 | void drbd_req_destroy(struct kref *kref) | |
99 | { | |
100 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); | |
101 | struct drbd_device *device = req->device; | |
102 | const unsigned s = req->rq_state; | |
103 | ||
104 | if ((req->master_bio && !(s & RQ_POSTPONED)) || | |
105 | atomic_read(&req->completion_ref) || | |
106 | (s & RQ_LOCAL_PENDING) || | |
107 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { | |
108 | drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", | |
109 | s, atomic_read(&req->completion_ref)); | |
110 | return; | |
111 | } | |
112 | ||
113 | /* If called from mod_rq_state (expected normal case) or | |
114 | * drbd_send_and_submit (the less likely normal path), this holds the | |
115 | * req_lock, and req->tl_requests will typicaly be on ->transfer_log, | |
116 | * though it may be still empty (never added to the transfer log). | |
117 | * | |
118 | * If called from do_retry(), we do NOT hold the req_lock, but we are | |
119 | * still allowed to unconditionally list_del(&req->tl_requests), | |
120 | * because it will be on a local on-stack list only. */ | |
121 | list_del_init(&req->tl_requests); | |
122 | ||
123 | /* finally remove the request from the conflict detection | |
124 | * respective block_id verification interval tree. */ | |
125 | if (!drbd_interval_empty(&req->i)) { | |
126 | struct rb_root *root; | |
127 | ||
128 | if (s & RQ_WRITE) | |
129 | root = &device->write_requests; | |
130 | else | |
131 | root = &device->read_requests; | |
132 | drbd_remove_request_interval(root, req); | |
133 | } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) | |
134 | drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", | |
135 | s, (unsigned long long)req->i.sector, req->i.size); | |
136 | ||
137 | /* if it was a write, we may have to set the corresponding | |
138 | * bit(s) out-of-sync first. If it had a local part, we need to | |
139 | * release the reference to the activity log. */ | |
140 | if (s & RQ_WRITE) { | |
141 | /* Set out-of-sync unless both OK flags are set | |
142 | * (local only or remote failed). | |
143 | * Other places where we set out-of-sync: | |
144 | * READ with local io-error */ | |
145 | ||
146 | /* There is a special case: | |
147 | * we may notice late that IO was suspended, | |
148 | * and postpone, or schedule for retry, a write, | |
149 | * before it even was submitted or sent. | |
150 | * In that case we do not want to touch the bitmap at all. | |
151 | */ | |
152 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { | |
153 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) | |
154 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); | |
155 | ||
156 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) | |
157 | drbd_set_in_sync(device, req->i.sector, req->i.size); | |
158 | } | |
159 | ||
160 | /* one might be tempted to move the drbd_al_complete_io | |
161 | * to the local io completion callback drbd_request_endio. | |
162 | * but, if this was a mirror write, we may only | |
163 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
164 | * otherwise the extent could be dropped from the al | |
165 | * before it has actually been written on the peer. | |
166 | * if we crash before our peer knows about the request, | |
167 | * but after the extent has been dropped from the al, | |
168 | * we would forget to resync the corresponding extent. | |
169 | */ | |
170 | if (s & RQ_IN_ACT_LOG) { | |
171 | if (get_ldev_if_state(device, D_FAILED)) { | |
172 | drbd_al_complete_io(device, &req->i); | |
173 | put_ldev(device); | |
174 | } else if (__ratelimit(&drbd_ratelimit_state)) { | |
175 | drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " | |
176 | "but my Disk seems to have failed :(\n", | |
177 | (unsigned long long) req->i.sector, req->i.size); | |
178 | } | |
179 | } | |
180 | } | |
181 | ||
182 | mempool_free(req, drbd_request_mempool); | |
183 | } | |
184 | ||
185 | static void wake_all_senders(struct drbd_connection *connection) | |
186 | { | |
187 | wake_up(&connection->sender_work.q_wait); | |
188 | } | |
189 | ||
190 | /* must hold resource->req_lock */ | |
191 | void start_new_tl_epoch(struct drbd_connection *connection) | |
192 | { | |
193 | /* no point closing an epoch, if it is empty, anyways. */ | |
194 | if (connection->current_tle_writes == 0) | |
195 | return; | |
196 | ||
197 | connection->current_tle_writes = 0; | |
198 | atomic_inc(&connection->current_tle_nr); | |
199 | wake_all_senders(connection); | |
200 | } | |
201 | ||
202 | void complete_master_bio(struct drbd_device *device, | |
203 | struct bio_and_error *m) | |
204 | { | |
205 | m->bio->bi_error = m->error; | |
206 | bio_endio(m->bio); | |
207 | dec_ap_bio(device); | |
208 | } | |
209 | ||
210 | ||
211 | /* Helper for __req_mod(). | |
212 | * Set m->bio to the master bio, if it is fit to be completed, | |
213 | * or leave it alone (it is initialized to NULL in __req_mod), | |
214 | * if it has already been completed, or cannot be completed yet. | |
215 | * If m->bio is set, the error status to be returned is placed in m->error. | |
216 | */ | |
217 | static | |
218 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) | |
219 | { | |
220 | const unsigned s = req->rq_state; | |
221 | struct drbd_device *device = req->device; | |
222 | int error, ok; | |
223 | ||
224 | /* we must not complete the master bio, while it is | |
225 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
226 | * not yet acknowledged by the peer | |
227 | * not yet completed by the local io subsystem | |
228 | * these flags may get cleared in any order by | |
229 | * the worker, | |
230 | * the receiver, | |
231 | * the bio_endio completion callbacks. | |
232 | */ | |
233 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || | |
234 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || | |
235 | (s & RQ_COMPLETION_SUSP)) { | |
236 | drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); | |
237 | return; | |
238 | } | |
239 | ||
240 | if (!req->master_bio) { | |
241 | drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); | |
242 | return; | |
243 | } | |
244 | ||
245 | /* | |
246 | * figure out whether to report success or failure. | |
247 | * | |
248 | * report success when at least one of the operations succeeded. | |
249 | * or, to put the other way, | |
250 | * only report failure, when both operations failed. | |
251 | * | |
252 | * what to do about the failures is handled elsewhere. | |
253 | * what we need to do here is just: complete the master_bio. | |
254 | * | |
255 | * local completion error, if any, has been stored as ERR_PTR | |
256 | * in private_bio within drbd_request_endio. | |
257 | */ | |
258 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
259 | error = PTR_ERR(req->private_bio); | |
260 | ||
261 | /* Before we can signal completion to the upper layers, | |
262 | * we may need to close the current transfer log epoch. | |
263 | * We are within the request lock, so we can simply compare | |
264 | * the request epoch number with the current transfer log | |
265 | * epoch number. If they match, increase the current_tle_nr, | |
266 | * and reset the transfer log epoch write_cnt. | |
267 | */ | |
268 | if (op_is_write(bio_op(req->master_bio)) && | |
269 | req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) | |
270 | start_new_tl_epoch(first_peer_device(device)->connection); | |
271 | ||
272 | /* Update disk stats */ | |
273 | _drbd_end_io_acct(device, req); | |
274 | ||
275 | /* If READ failed, | |
276 | * have it be pushed back to the retry work queue, | |
277 | * so it will re-enter __drbd_make_request(), | |
278 | * and be re-assigned to a suitable local or remote path, | |
279 | * or failed if we do not have access to good data anymore. | |
280 | * | |
281 | * Unless it was failed early by __drbd_make_request(), | |
282 | * because no path was available, in which case | |
283 | * it was not even added to the transfer_log. | |
284 | * | |
285 | * read-ahead may fail, and will not be retried. | |
286 | * | |
287 | * WRITE should have used all available paths already. | |
288 | */ | |
289 | if (!ok && | |
290 | bio_op(req->master_bio) == REQ_OP_READ && | |
291 | !(req->master_bio->bi_opf & REQ_RAHEAD) && | |
292 | !list_empty(&req->tl_requests)) | |
293 | req->rq_state |= RQ_POSTPONED; | |
294 | ||
295 | if (!(req->rq_state & RQ_POSTPONED)) { | |
296 | m->error = ok ? 0 : (error ?: -EIO); | |
297 | m->bio = req->master_bio; | |
298 | req->master_bio = NULL; | |
299 | /* We leave it in the tree, to be able to verify later | |
300 | * write-acks in protocol != C during resync. | |
301 | * But we mark it as "complete", so it won't be counted as | |
302 | * conflict in a multi-primary setup. */ | |
303 | req->i.completed = true; | |
304 | } | |
305 | ||
306 | if (req->i.waiting) | |
307 | wake_up(&device->misc_wait); | |
308 | ||
309 | /* Either we are about to complete to upper layers, | |
310 | * or we will restart this request. | |
311 | * In either case, the request object will be destroyed soon, | |
312 | * so better remove it from all lists. */ | |
313 | list_del_init(&req->req_pending_master_completion); | |
314 | } | |
315 | ||
316 | /* still holds resource->req_lock */ | |
317 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) | |
318 | { | |
319 | struct drbd_device *device = req->device; | |
320 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); | |
321 | ||
322 | if (!atomic_sub_and_test(put, &req->completion_ref)) | |
323 | return 0; | |
324 | ||
325 | drbd_req_complete(req, m); | |
326 | ||
327 | if (req->rq_state & RQ_POSTPONED) { | |
328 | /* don't destroy the req object just yet, | |
329 | * but queue it for retry */ | |
330 | drbd_restart_request(req); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | return 1; | |
335 | } | |
336 | ||
337 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
338 | { | |
339 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
340 | if (!connection) | |
341 | return; | |
342 | if (connection->req_next == NULL) | |
343 | connection->req_next = req; | |
344 | } | |
345 | ||
346 | static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
347 | { | |
348 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
349 | if (!connection) | |
350 | return; | |
351 | if (connection->req_next != req) | |
352 | return; | |
353 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
354 | const unsigned s = req->rq_state; | |
355 | if (s & RQ_NET_QUEUED) | |
356 | break; | |
357 | } | |
358 | if (&req->tl_requests == &connection->transfer_log) | |
359 | req = NULL; | |
360 | connection->req_next = req; | |
361 | } | |
362 | ||
363 | static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
364 | { | |
365 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
366 | if (!connection) | |
367 | return; | |
368 | if (connection->req_ack_pending == NULL) | |
369 | connection->req_ack_pending = req; | |
370 | } | |
371 | ||
372 | static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
373 | { | |
374 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
375 | if (!connection) | |
376 | return; | |
377 | if (connection->req_ack_pending != req) | |
378 | return; | |
379 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
380 | const unsigned s = req->rq_state; | |
381 | if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) | |
382 | break; | |
383 | } | |
384 | if (&req->tl_requests == &connection->transfer_log) | |
385 | req = NULL; | |
386 | connection->req_ack_pending = req; | |
387 | } | |
388 | ||
389 | static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
390 | { | |
391 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
392 | if (!connection) | |
393 | return; | |
394 | if (connection->req_not_net_done == NULL) | |
395 | connection->req_not_net_done = req; | |
396 | } | |
397 | ||
398 | static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) | |
399 | { | |
400 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | |
401 | if (!connection) | |
402 | return; | |
403 | if (connection->req_not_net_done != req) | |
404 | return; | |
405 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { | |
406 | const unsigned s = req->rq_state; | |
407 | if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) | |
408 | break; | |
409 | } | |
410 | if (&req->tl_requests == &connection->transfer_log) | |
411 | req = NULL; | |
412 | connection->req_not_net_done = req; | |
413 | } | |
414 | ||
415 | /* I'd like this to be the only place that manipulates | |
416 | * req->completion_ref and req->kref. */ | |
417 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |
418 | int clear, int set) | |
419 | { | |
420 | struct drbd_device *device = req->device; | |
421 | struct drbd_peer_device *peer_device = first_peer_device(device); | |
422 | unsigned s = req->rq_state; | |
423 | int c_put = 0; | |
424 | ||
425 | if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) | |
426 | set |= RQ_COMPLETION_SUSP; | |
427 | ||
428 | /* apply */ | |
429 | ||
430 | req->rq_state &= ~clear; | |
431 | req->rq_state |= set; | |
432 | ||
433 | /* no change? */ | |
434 | if (req->rq_state == s) | |
435 | return; | |
436 | ||
437 | /* intent: get references */ | |
438 | ||
439 | kref_get(&req->kref); | |
440 | ||
441 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) | |
442 | atomic_inc(&req->completion_ref); | |
443 | ||
444 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { | |
445 | inc_ap_pending(device); | |
446 | atomic_inc(&req->completion_ref); | |
447 | } | |
448 | ||
449 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { | |
450 | atomic_inc(&req->completion_ref); | |
451 | set_if_null_req_next(peer_device, req); | |
452 | } | |
453 | ||
454 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) | |
455 | kref_get(&req->kref); /* wait for the DONE */ | |
456 | ||
457 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { | |
458 | /* potentially already completed in the ack_receiver thread */ | |
459 | if (!(s & RQ_NET_DONE)) { | |
460 | atomic_add(req->i.size >> 9, &device->ap_in_flight); | |
461 | set_if_null_req_not_net_done(peer_device, req); | |
462 | } | |
463 | if (req->rq_state & RQ_NET_PENDING) | |
464 | set_if_null_req_ack_pending(peer_device, req); | |
465 | } | |
466 | ||
467 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) | |
468 | atomic_inc(&req->completion_ref); | |
469 | ||
470 | /* progress: put references */ | |
471 | ||
472 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) | |
473 | ++c_put; | |
474 | ||
475 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { | |
476 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); | |
477 | ++c_put; | |
478 | } | |
479 | ||
480 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { | |
481 | if (req->rq_state & RQ_LOCAL_ABORTED) | |
482 | kref_put(&req->kref, drbd_req_destroy); | |
483 | else | |
484 | ++c_put; | |
485 | list_del_init(&req->req_pending_local); | |
486 | } | |
487 | ||
488 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { | |
489 | dec_ap_pending(device); | |
490 | ++c_put; | |
491 | req->acked_jif = jiffies; | |
492 | advance_conn_req_ack_pending(peer_device, req); | |
493 | } | |
494 | ||
495 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) { | |
496 | ++c_put; | |
497 | advance_conn_req_next(peer_device, req); | |
498 | } | |
499 | ||
500 | if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { | |
501 | if (s & RQ_NET_SENT) | |
502 | atomic_sub(req->i.size >> 9, &device->ap_in_flight); | |
503 | if (s & RQ_EXP_BARR_ACK) | |
504 | kref_put(&req->kref, drbd_req_destroy); | |
505 | req->net_done_jif = jiffies; | |
506 | ||
507 | /* in ahead/behind mode, or just in case, | |
508 | * before we finally destroy this request, | |
509 | * the caching pointers must not reference it anymore */ | |
510 | advance_conn_req_next(peer_device, req); | |
511 | advance_conn_req_ack_pending(peer_device, req); | |
512 | advance_conn_req_not_net_done(peer_device, req); | |
513 | } | |
514 | ||
515 | /* potentially complete and destroy */ | |
516 | ||
517 | /* If we made progress, retry conflicting peer requests, if any. */ | |
518 | if (req->i.waiting) | |
519 | wake_up(&device->misc_wait); | |
520 | ||
521 | if (c_put) { | |
522 | if (drbd_req_put_completion_ref(req, m, c_put)) | |
523 | kref_put(&req->kref, drbd_req_destroy); | |
524 | } else { | |
525 | kref_put(&req->kref, drbd_req_destroy); | |
526 | } | |
527 | } | |
528 | ||
529 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) | |
530 | { | |
531 | char b[BDEVNAME_SIZE]; | |
532 | ||
533 | if (!__ratelimit(&drbd_ratelimit_state)) | |
534 | return; | |
535 | ||
536 | drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", | |
537 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", | |
538 | (unsigned long long)req->i.sector, | |
539 | req->i.size >> 9, | |
540 | bdevname(device->ldev->backing_bdev, b)); | |
541 | } | |
542 | ||
543 | /* Helper for HANDED_OVER_TO_NETWORK. | |
544 | * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? | |
545 | * Is it also still "PENDING"? | |
546 | * --> If so, clear PENDING and set NET_OK below. | |
547 | * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster | |
548 | * (and we must not set RQ_NET_OK) */ | |
549 | static inline bool is_pending_write_protocol_A(struct drbd_request *req) | |
550 | { | |
551 | return (req->rq_state & | |
552 | (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) | |
553 | == (RQ_WRITE|RQ_NET_PENDING); | |
554 | } | |
555 | ||
556 | /* obviously this could be coded as many single functions | |
557 | * instead of one huge switch, | |
558 | * or by putting the code directly in the respective locations | |
559 | * (as it has been before). | |
560 | * | |
561 | * but having it this way | |
562 | * enforces that it is all in this one place, where it is easier to audit, | |
563 | * it makes it obvious that whatever "event" "happens" to a request should | |
564 | * happen "atomically" within the req_lock, | |
565 | * and it enforces that we have to think in a very structured manner | |
566 | * about the "events" that may happen to a request during its life time ... | |
567 | */ | |
568 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |
569 | struct bio_and_error *m) | |
570 | { | |
571 | struct drbd_device *const device = req->device; | |
572 | struct drbd_peer_device *const peer_device = first_peer_device(device); | |
573 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | |
574 | struct net_conf *nc; | |
575 | int p, rv = 0; | |
576 | ||
577 | if (m) | |
578 | m->bio = NULL; | |
579 | ||
580 | switch (what) { | |
581 | default: | |
582 | drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); | |
583 | break; | |
584 | ||
585 | /* does not happen... | |
586 | * initialization done in drbd_req_new | |
587 | case CREATED: | |
588 | break; | |
589 | */ | |
590 | ||
591 | case TO_BE_SENT: /* via network */ | |
592 | /* reached via __drbd_make_request | |
593 | * and from w_read_retry_remote */ | |
594 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); | |
595 | rcu_read_lock(); | |
596 | nc = rcu_dereference(connection->net_conf); | |
597 | p = nc->wire_protocol; | |
598 | rcu_read_unlock(); | |
599 | req->rq_state |= | |
600 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
601 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
602 | mod_rq_state(req, m, 0, RQ_NET_PENDING); | |
603 | break; | |
604 | ||
605 | case TO_BE_SUBMITTED: /* locally */ | |
606 | /* reached via __drbd_make_request */ | |
607 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); | |
608 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); | |
609 | break; | |
610 | ||
611 | case COMPLETED_OK: | |
612 | if (req->rq_state & RQ_WRITE) | |
613 | device->writ_cnt += req->i.size >> 9; | |
614 | else | |
615 | device->read_cnt += req->i.size >> 9; | |
616 | ||
617 | mod_rq_state(req, m, RQ_LOCAL_PENDING, | |
618 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
619 | break; | |
620 | ||
621 | case ABORT_DISK_IO: | |
622 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); | |
623 | break; | |
624 | ||
625 | case WRITE_COMPLETED_WITH_ERROR: | |
626 | drbd_report_io_error(device, req); | |
627 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); | |
628 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
629 | break; | |
630 | ||
631 | case READ_COMPLETED_WITH_ERROR: | |
632 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); | |
633 | drbd_report_io_error(device, req); | |
634 | __drbd_chk_io_error(device, DRBD_READ_ERROR); | |
635 | /* fall through. */ | |
636 | case READ_AHEAD_COMPLETED_WITH_ERROR: | |
637 | /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ | |
638 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
639 | break; | |
640 | ||
641 | case DISCARD_COMPLETED_NOTSUPP: | |
642 | case DISCARD_COMPLETED_WITH_ERROR: | |
643 | /* I'd rather not detach from local disk just because it | |
644 | * failed a REQ_DISCARD. */ | |
645 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
646 | break; | |
647 | ||
648 | case QUEUE_FOR_NET_READ: | |
649 | /* READ, and | |
650 | * no local disk, | |
651 | * or target area marked as invalid, | |
652 | * or just got an io-error. */ | |
653 | /* from __drbd_make_request | |
654 | * or from bio_endio during read io-error recovery */ | |
655 | ||
656 | /* So we can verify the handle in the answer packet. | |
657 | * Corresponding drbd_remove_request_interval is in | |
658 | * drbd_req_complete() */ | |
659 | D_ASSERT(device, drbd_interval_empty(&req->i)); | |
660 | drbd_insert_interval(&device->read_requests, &req->i); | |
661 | ||
662 | set_bit(UNPLUG_REMOTE, &device->flags); | |
663 | ||
664 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | |
665 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); | |
666 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); | |
667 | req->w.cb = w_send_read_req; | |
668 | drbd_queue_work(&connection->sender_work, | |
669 | &req->w); | |
670 | break; | |
671 | ||
672 | case QUEUE_FOR_NET_WRITE: | |
673 | /* assert something? */ | |
674 | /* from __drbd_make_request only */ | |
675 | ||
676 | /* Corresponding drbd_remove_request_interval is in | |
677 | * drbd_req_complete() */ | |
678 | D_ASSERT(device, drbd_interval_empty(&req->i)); | |
679 | drbd_insert_interval(&device->write_requests, &req->i); | |
680 | ||
681 | /* NOTE | |
682 | * In case the req ended up on the transfer log before being | |
683 | * queued on the worker, it could lead to this request being | |
684 | * missed during cleanup after connection loss. | |
685 | * So we have to do both operations here, | |
686 | * within the same lock that protects the transfer log. | |
687 | * | |
688 | * _req_add_to_epoch(req); this has to be after the | |
689 | * _maybe_start_new_epoch(req); which happened in | |
690 | * __drbd_make_request, because we now may set the bit | |
691 | * again ourselves to close the current epoch. | |
692 | * | |
693 | * Add req to the (now) current epoch (barrier). */ | |
694 | ||
695 | /* otherwise we may lose an unplug, which may cause some remote | |
696 | * io-scheduler timeout to expire, increasing maximum latency, | |
697 | * hurting performance. */ | |
698 | set_bit(UNPLUG_REMOTE, &device->flags); | |
699 | ||
700 | /* queue work item to send data */ | |
701 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | |
702 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); | |
703 | req->w.cb = w_send_dblock; | |
704 | drbd_queue_work(&connection->sender_work, | |
705 | &req->w); | |
706 | ||
707 | /* close the epoch, in case it outgrew the limit */ | |
708 | rcu_read_lock(); | |
709 | nc = rcu_dereference(connection->net_conf); | |
710 | p = nc->max_epoch_size; | |
711 | rcu_read_unlock(); | |
712 | if (connection->current_tle_writes >= p) | |
713 | start_new_tl_epoch(connection); | |
714 | ||
715 | break; | |
716 | ||
717 | case QUEUE_FOR_SEND_OOS: | |
718 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); | |
719 | req->w.cb = w_send_out_of_sync; | |
720 | drbd_queue_work(&connection->sender_work, | |
721 | &req->w); | |
722 | break; | |
723 | ||
724 | case READ_RETRY_REMOTE_CANCELED: | |
725 | case SEND_CANCELED: | |
726 | case SEND_FAILED: | |
727 | /* real cleanup will be done from tl_clear. just update flags | |
728 | * so it is no longer marked as on the worker queue */ | |
729 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); | |
730 | break; | |
731 | ||
732 | case HANDED_OVER_TO_NETWORK: | |
733 | /* assert something? */ | |
734 | if (is_pending_write_protocol_A(req)) | |
735 | /* this is what is dangerous about protocol A: | |
736 | * pretend it was successfully written on the peer. */ | |
737 | mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, | |
738 | RQ_NET_SENT|RQ_NET_OK); | |
739 | else | |
740 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); | |
741 | /* It is still not yet RQ_NET_DONE until the | |
742 | * corresponding epoch barrier got acked as well, | |
743 | * so we know what to dirty on connection loss. */ | |
744 | break; | |
745 | ||
746 | case OOS_HANDED_TO_NETWORK: | |
747 | /* Was not set PENDING, no longer QUEUED, so is now DONE | |
748 | * as far as this connection is concerned. */ | |
749 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); | |
750 | break; | |
751 | ||
752 | case CONNECTION_LOST_WHILE_PENDING: | |
753 | /* transfer log cleanup after connection loss */ | |
754 | mod_rq_state(req, m, | |
755 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, | |
756 | RQ_NET_DONE); | |
757 | break; | |
758 | ||
759 | case CONFLICT_RESOLVED: | |
760 | /* for superseded conflicting writes of multiple primaries, | |
761 | * there is no need to keep anything in the tl, potential | |
762 | * node crashes are covered by the activity log. | |
763 | * | |
764 | * If this request had been marked as RQ_POSTPONED before, | |
765 | * it will actually not be completed, but "restarted", | |
766 | * resubmitted from the retry worker context. */ | |
767 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | |
768 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); | |
769 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); | |
770 | break; | |
771 | ||
772 | case WRITE_ACKED_BY_PEER_AND_SIS: | |
773 | req->rq_state |= RQ_NET_SIS; | |
774 | case WRITE_ACKED_BY_PEER: | |
775 | /* Normal operation protocol C: successfully written on peer. | |
776 | * During resync, even in protocol != C, | |
777 | * we requested an explicit write ack anyways. | |
778 | * Which means we cannot even assert anything here. | |
779 | * Nothing more to do here. | |
780 | * We want to keep the tl in place for all protocols, to cater | |
781 | * for volatile write-back caches on lower level devices. */ | |
782 | goto ack_common; | |
783 | case RECV_ACKED_BY_PEER: | |
784 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); | |
785 | /* protocol B; pretends to be successfully written on peer. | |
786 | * see also notes above in HANDED_OVER_TO_NETWORK about | |
787 | * protocol != C */ | |
788 | ack_common: | |
789 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); | |
790 | break; | |
791 | ||
792 | case POSTPONE_WRITE: | |
793 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); | |
794 | /* If this node has already detected the write conflict, the | |
795 | * worker will be waiting on misc_wait. Wake it up once this | |
796 | * request has completed locally. | |
797 | */ | |
798 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | |
799 | req->rq_state |= RQ_POSTPONED; | |
800 | if (req->i.waiting) | |
801 | wake_up(&device->misc_wait); | |
802 | /* Do not clear RQ_NET_PENDING. This request will make further | |
803 | * progress via restart_conflicting_writes() or | |
804 | * fail_postponed_requests(). Hopefully. */ | |
805 | break; | |
806 | ||
807 | case NEG_ACKED: | |
808 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); | |
809 | break; | |
810 | ||
811 | case FAIL_FROZEN_DISK_IO: | |
812 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) | |
813 | break; | |
814 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); | |
815 | break; | |
816 | ||
817 | case RESTART_FROZEN_DISK_IO: | |
818 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) | |
819 | break; | |
820 | ||
821 | mod_rq_state(req, m, | |
822 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, | |
823 | RQ_LOCAL_PENDING); | |
824 | ||
825 | rv = MR_READ; | |
826 | if (bio_data_dir(req->master_bio) == WRITE) | |
827 | rv = MR_WRITE; | |
828 | ||
829 | get_ldev(device); /* always succeeds in this call path */ | |
830 | req->w.cb = w_restart_disk_io; | |
831 | drbd_queue_work(&connection->sender_work, | |
832 | &req->w); | |
833 | break; | |
834 | ||
835 | case RESEND: | |
836 | /* Simply complete (local only) READs. */ | |
837 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { | |
838 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); | |
839 | break; | |
840 | } | |
841 | ||
842 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK | |
843 | before the connection loss (B&C only); only P_BARRIER_ACK | |
844 | (or the local completion?) was missing when we suspended. | |
845 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. | |
846 | During connection handshake, we ensure that the peer was not rebooted. */ | |
847 | if (!(req->rq_state & RQ_NET_OK)) { | |
848 | /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? | |
849 | * in that case we must not set RQ_NET_PENDING. */ | |
850 | ||
851 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); | |
852 | if (req->w.cb) { | |
853 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ | |
854 | drbd_queue_work(&connection->sender_work, | |
855 | &req->w); | |
856 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; | |
857 | } /* else: FIXME can this happen? */ | |
858 | break; | |
859 | } | |
860 | /* else, fall through to BARRIER_ACKED */ | |
861 | ||
862 | case BARRIER_ACKED: | |
863 | /* barrier ack for READ requests does not make sense */ | |
864 | if (!(req->rq_state & RQ_WRITE)) | |
865 | break; | |
866 | ||
867 | if (req->rq_state & RQ_NET_PENDING) { | |
868 | /* barrier came in before all requests were acked. | |
869 | * this is bad, because if the connection is lost now, | |
870 | * we won't be able to clean them up... */ | |
871 | drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); | |
872 | } | |
873 | /* Allowed to complete requests, even while suspended. | |
874 | * As this is called for all requests within a matching epoch, | |
875 | * we need to filter, and only set RQ_NET_DONE for those that | |
876 | * have actually been on the wire. */ | |
877 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, | |
878 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); | |
879 | break; | |
880 | ||
881 | case DATA_RECEIVED: | |
882 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | |
883 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); | |
884 | break; | |
885 | ||
886 | case QUEUE_AS_DRBD_BARRIER: | |
887 | start_new_tl_epoch(connection); | |
888 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); | |
889 | break; | |
890 | }; | |
891 | ||
892 | return rv; | |
893 | } | |
894 | ||
895 | /* we may do a local read if: | |
896 | * - we are consistent (of course), | |
897 | * - or we are generally inconsistent, | |
898 | * BUT we are still/already IN SYNC for this area. | |
899 | * since size may be bigger than BM_BLOCK_SIZE, | |
900 | * we may need to check several bits. | |
901 | */ | |
902 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) | |
903 | { | |
904 | unsigned long sbnr, ebnr; | |
905 | sector_t esector, nr_sectors; | |
906 | ||
907 | if (device->state.disk == D_UP_TO_DATE) | |
908 | return true; | |
909 | if (device->state.disk != D_INCONSISTENT) | |
910 | return false; | |
911 | esector = sector + (size >> 9) - 1; | |
912 | nr_sectors = drbd_get_capacity(device->this_bdev); | |
913 | D_ASSERT(device, sector < nr_sectors); | |
914 | D_ASSERT(device, esector < nr_sectors); | |
915 | ||
916 | sbnr = BM_SECT_TO_BIT(sector); | |
917 | ebnr = BM_SECT_TO_BIT(esector); | |
918 | ||
919 | return drbd_bm_count_bits(device, sbnr, ebnr) == 0; | |
920 | } | |
921 | ||
922 | static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, | |
923 | enum drbd_read_balancing rbm) | |
924 | { | |
925 | struct backing_dev_info *bdi; | |
926 | int stripe_shift; | |
927 | ||
928 | switch (rbm) { | |
929 | case RB_CONGESTED_REMOTE: | |
930 | bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; | |
931 | return bdi_read_congested(bdi); | |
932 | case RB_LEAST_PENDING: | |
933 | return atomic_read(&device->local_cnt) > | |
934 | atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); | |
935 | case RB_32K_STRIPING: /* stripe_shift = 15 */ | |
936 | case RB_64K_STRIPING: | |
937 | case RB_128K_STRIPING: | |
938 | case RB_256K_STRIPING: | |
939 | case RB_512K_STRIPING: | |
940 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
941 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
942 | return (sector >> (stripe_shift - 9)) & 1; | |
943 | case RB_ROUND_ROBIN: | |
944 | return test_and_change_bit(READ_BALANCE_RR, &device->flags); | |
945 | case RB_PREFER_REMOTE: | |
946 | return true; | |
947 | case RB_PREFER_LOCAL: | |
948 | default: | |
949 | return false; | |
950 | } | |
951 | } | |
952 | ||
953 | /* | |
954 | * complete_conflicting_writes - wait for any conflicting write requests | |
955 | * | |
956 | * The write_requests tree contains all active write requests which we | |
957 | * currently know about. Wait for any requests to complete which conflict with | |
958 | * the new one. | |
959 | * | |
960 | * Only way out: remove the conflicting intervals from the tree. | |
961 | */ | |
962 | static void complete_conflicting_writes(struct drbd_request *req) | |
963 | { | |
964 | DEFINE_WAIT(wait); | |
965 | struct drbd_device *device = req->device; | |
966 | struct drbd_interval *i; | |
967 | sector_t sector = req->i.sector; | |
968 | int size = req->i.size; | |
969 | ||
970 | for (;;) { | |
971 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { | |
972 | /* Ignore, if already completed to upper layers. */ | |
973 | if (i->completed) | |
974 | continue; | |
975 | /* Handle the first found overlap. After the schedule | |
976 | * we have to restart the tree walk. */ | |
977 | break; | |
978 | } | |
979 | if (!i) /* if any */ | |
980 | break; | |
981 | ||
982 | /* Indicate to wake up device->misc_wait on progress. */ | |
983 | prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | |
984 | i->waiting = true; | |
985 | spin_unlock_irq(&device->resource->req_lock); | |
986 | schedule(); | |
987 | spin_lock_irq(&device->resource->req_lock); | |
988 | } | |
989 | finish_wait(&device->misc_wait, &wait); | |
990 | } | |
991 | ||
992 | /* called within req_lock */ | |
993 | static void maybe_pull_ahead(struct drbd_device *device) | |
994 | { | |
995 | struct drbd_connection *connection = first_peer_device(device)->connection; | |
996 | struct net_conf *nc; | |
997 | bool congested = false; | |
998 | enum drbd_on_congestion on_congestion; | |
999 | ||
1000 | rcu_read_lock(); | |
1001 | nc = rcu_dereference(connection->net_conf); | |
1002 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; | |
1003 | rcu_read_unlock(); | |
1004 | if (on_congestion == OC_BLOCK || | |
1005 | connection->agreed_pro_version < 96) | |
1006 | return; | |
1007 | ||
1008 | if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) | |
1009 | return; /* nothing to do ... */ | |
1010 | ||
1011 | /* If I don't even have good local storage, we can not reasonably try | |
1012 | * to pull ahead of the peer. We also need the local reference to make | |
1013 | * sure device->act_log is there. | |
1014 | */ | |
1015 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) | |
1016 | return; | |
1017 | ||
1018 | if (nc->cong_fill && | |
1019 | atomic_read(&device->ap_in_flight) >= nc->cong_fill) { | |
1020 | drbd_info(device, "Congestion-fill threshold reached\n"); | |
1021 | congested = true; | |
1022 | } | |
1023 | ||
1024 | if (device->act_log->used >= nc->cong_extents) { | |
1025 | drbd_info(device, "Congestion-extents threshold reached\n"); | |
1026 | congested = true; | |
1027 | } | |
1028 | ||
1029 | if (congested) { | |
1030 | /* start a new epoch for non-mirrored writes */ | |
1031 | start_new_tl_epoch(first_peer_device(device)->connection); | |
1032 | ||
1033 | if (on_congestion == OC_PULL_AHEAD) | |
1034 | _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); | |
1035 | else /*nc->on_congestion == OC_DISCONNECT */ | |
1036 | _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); | |
1037 | } | |
1038 | put_ldev(device); | |
1039 | } | |
1040 | ||
1041 | /* If this returns false, and req->private_bio is still set, | |
1042 | * this should be submitted locally. | |
1043 | * | |
1044 | * If it returns false, but req->private_bio is not set, | |
1045 | * we do not have access to good data :( | |
1046 | * | |
1047 | * Otherwise, this destroys req->private_bio, if any, | |
1048 | * and returns true. | |
1049 | */ | |
1050 | static bool do_remote_read(struct drbd_request *req) | |
1051 | { | |
1052 | struct drbd_device *device = req->device; | |
1053 | enum drbd_read_balancing rbm; | |
1054 | ||
1055 | if (req->private_bio) { | |
1056 | if (!drbd_may_do_local_read(device, | |
1057 | req->i.sector, req->i.size)) { | |
1058 | bio_put(req->private_bio); | |
1059 | req->private_bio = NULL; | |
1060 | put_ldev(device); | |
1061 | } | |
1062 | } | |
1063 | ||
1064 | if (device->state.pdsk != D_UP_TO_DATE) | |
1065 | return false; | |
1066 | ||
1067 | if (req->private_bio == NULL) | |
1068 | return true; | |
1069 | ||
1070 | /* TODO: improve read balancing decisions, take into account drbd | |
1071 | * protocol, pending requests etc. */ | |
1072 | ||
1073 | rcu_read_lock(); | |
1074 | rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; | |
1075 | rcu_read_unlock(); | |
1076 | ||
1077 | if (rbm == RB_PREFER_LOCAL && req->private_bio) | |
1078 | return false; /* submit locally */ | |
1079 | ||
1080 | if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { | |
1081 | if (req->private_bio) { | |
1082 | bio_put(req->private_bio); | |
1083 | req->private_bio = NULL; | |
1084 | put_ldev(device); | |
1085 | } | |
1086 | return true; | |
1087 | } | |
1088 | ||
1089 | return false; | |
1090 | } | |
1091 | ||
1092 | bool drbd_should_do_remote(union drbd_dev_state s) | |
1093 | { | |
1094 | return s.pdsk == D_UP_TO_DATE || | |
1095 | (s.pdsk >= D_INCONSISTENT && | |
1096 | s.conn >= C_WF_BITMAP_T && | |
1097 | s.conn < C_AHEAD); | |
1098 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. | |
1099 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* | |
1100 | states. */ | |
1101 | } | |
1102 | ||
1103 | static bool drbd_should_send_out_of_sync(union drbd_dev_state s) | |
1104 | { | |
1105 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; | |
1106 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary | |
1107 | since we enter state C_AHEAD only if proto >= 96 */ | |
1108 | } | |
1109 | ||
1110 | /* returns number of connections (== 1, for drbd 8.4) | |
1111 | * expected to actually write this data, | |
1112 | * which does NOT include those that we are L_AHEAD for. */ | |
1113 | static int drbd_process_write_request(struct drbd_request *req) | |
1114 | { | |
1115 | struct drbd_device *device = req->device; | |
1116 | int remote, send_oos; | |
1117 | ||
1118 | remote = drbd_should_do_remote(device->state); | |
1119 | send_oos = drbd_should_send_out_of_sync(device->state); | |
1120 | ||
1121 | /* Need to replicate writes. Unless it is an empty flush, | |
1122 | * which is better mapped to a DRBD P_BARRIER packet, | |
1123 | * also for drbd wire protocol compatibility reasons. | |
1124 | * If this was a flush, just start a new epoch. | |
1125 | * Unless the current epoch was empty anyways, or we are not currently | |
1126 | * replicating, in which case there is no point. */ | |
1127 | if (unlikely(req->i.size == 0)) { | |
1128 | /* The only size==0 bios we expect are empty flushes. */ | |
1129 | D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); | |
1130 | if (remote) | |
1131 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); | |
1132 | return remote; | |
1133 | } | |
1134 | ||
1135 | if (!remote && !send_oos) | |
1136 | return 0; | |
1137 | ||
1138 | D_ASSERT(device, !(remote && send_oos)); | |
1139 | ||
1140 | if (remote) { | |
1141 | _req_mod(req, TO_BE_SENT); | |
1142 | _req_mod(req, QUEUE_FOR_NET_WRITE); | |
1143 | } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) | |
1144 | _req_mod(req, QUEUE_FOR_SEND_OOS); | |
1145 | ||
1146 | return remote; | |
1147 | } | |
1148 | ||
1149 | static void drbd_process_discard_req(struct drbd_request *req) | |
1150 | { | |
1151 | int err = drbd_issue_discard_or_zero_out(req->device, | |
1152 | req->i.sector, req->i.size >> 9, true); | |
1153 | ||
1154 | if (err) | |
1155 | req->private_bio->bi_error = -EIO; | |
1156 | bio_endio(req->private_bio); | |
1157 | } | |
1158 | ||
1159 | static void | |
1160 | drbd_submit_req_private_bio(struct drbd_request *req) | |
1161 | { | |
1162 | struct drbd_device *device = req->device; | |
1163 | struct bio *bio = req->private_bio; | |
1164 | unsigned int type; | |
1165 | ||
1166 | if (bio_op(bio) != REQ_OP_READ) | |
1167 | type = DRBD_FAULT_DT_WR; | |
1168 | else if (bio->bi_opf & REQ_RAHEAD) | |
1169 | type = DRBD_FAULT_DT_RA; | |
1170 | else | |
1171 | type = DRBD_FAULT_DT_RD; | |
1172 | ||
1173 | bio->bi_bdev = device->ldev->backing_bdev; | |
1174 | ||
1175 | /* State may have changed since we grabbed our reference on the | |
1176 | * ->ldev member. Double check, and short-circuit to endio. | |
1177 | * In case the last activity log transaction failed to get on | |
1178 | * stable storage, and this is a WRITE, we may not even submit | |
1179 | * this bio. */ | |
1180 | if (get_ldev(device)) { | |
1181 | if (drbd_insert_fault(device, type)) | |
1182 | bio_io_error(bio); | |
1183 | else if (bio_op(bio) == REQ_OP_DISCARD) | |
1184 | drbd_process_discard_req(req); | |
1185 | else | |
1186 | generic_make_request(bio); | |
1187 | put_ldev(device); | |
1188 | } else | |
1189 | bio_io_error(bio); | |
1190 | } | |
1191 | ||
1192 | static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) | |
1193 | { | |
1194 | spin_lock_irq(&device->resource->req_lock); | |
1195 | list_add_tail(&req->tl_requests, &device->submit.writes); | |
1196 | list_add_tail(&req->req_pending_master_completion, | |
1197 | &device->pending_master_completion[1 /* WRITE */]); | |
1198 | spin_unlock_irq(&device->resource->req_lock); | |
1199 | queue_work(device->submit.wq, &device->submit.worker); | |
1200 | /* do_submit() may sleep internally on al_wait, too */ | |
1201 | wake_up(&device->al_wait); | |
1202 | } | |
1203 | ||
1204 | /* returns the new drbd_request pointer, if the caller is expected to | |
1205 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the | |
1206 | * request on the submitter thread. | |
1207 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. | |
1208 | */ | |
1209 | static struct drbd_request * | |
1210 | drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) | |
1211 | { | |
1212 | const int rw = bio_data_dir(bio); | |
1213 | struct drbd_request *req; | |
1214 | ||
1215 | /* allocate outside of all locks; */ | |
1216 | req = drbd_req_new(device, bio); | |
1217 | if (!req) { | |
1218 | dec_ap_bio(device); | |
1219 | /* only pass the error to the upper layers. | |
1220 | * if user cannot handle io errors, that's not our business. */ | |
1221 | drbd_err(device, "could not kmalloc() req\n"); | |
1222 | bio->bi_error = -ENOMEM; | |
1223 | bio_endio(bio); | |
1224 | return ERR_PTR(-ENOMEM); | |
1225 | } | |
1226 | req->start_jif = start_jif; | |
1227 | ||
1228 | if (!get_ldev(device)) { | |
1229 | bio_put(req->private_bio); | |
1230 | req->private_bio = NULL; | |
1231 | } | |
1232 | ||
1233 | /* Update disk stats */ | |
1234 | _drbd_start_io_acct(device, req); | |
1235 | ||
1236 | /* process discards always from our submitter thread */ | |
1237 | if (bio_op(bio) & REQ_OP_DISCARD) | |
1238 | goto queue_for_submitter_thread; | |
1239 | ||
1240 | if (rw == WRITE && req->private_bio && req->i.size | |
1241 | && !test_bit(AL_SUSPENDED, &device->flags)) { | |
1242 | if (!drbd_al_begin_io_fastpath(device, &req->i)) | |
1243 | goto queue_for_submitter_thread; | |
1244 | req->rq_state |= RQ_IN_ACT_LOG; | |
1245 | req->in_actlog_jif = jiffies; | |
1246 | } | |
1247 | return req; | |
1248 | ||
1249 | queue_for_submitter_thread: | |
1250 | atomic_inc(&device->ap_actlog_cnt); | |
1251 | drbd_queue_write(device, req); | |
1252 | return NULL; | |
1253 | } | |
1254 | ||
1255 | /* Require at least one path to current data. | |
1256 | * We don't want to allow writes on C_STANDALONE D_INCONSISTENT: | |
1257 | * We would not allow to read what was written, | |
1258 | * we would not have bumped the data generation uuids, | |
1259 | * we would cause data divergence for all the wrong reasons. | |
1260 | * | |
1261 | * If we don't see at least one D_UP_TO_DATE, we will fail this request, | |
1262 | * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO, | |
1263 | * and queues for retry later. | |
1264 | */ | |
1265 | static bool may_do_writes(struct drbd_device *device) | |
1266 | { | |
1267 | const union drbd_dev_state s = device->state; | |
1268 | return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; | |
1269 | } | |
1270 | ||
1271 | static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) | |
1272 | { | |
1273 | struct drbd_resource *resource = device->resource; | |
1274 | const int rw = bio_data_dir(req->master_bio); | |
1275 | struct bio_and_error m = { NULL, }; | |
1276 | bool no_remote = false; | |
1277 | bool submit_private_bio = false; | |
1278 | ||
1279 | spin_lock_irq(&resource->req_lock); | |
1280 | if (rw == WRITE) { | |
1281 | /* This may temporarily give up the req_lock, | |
1282 | * but will re-aquire it before it returns here. | |
1283 | * Needs to be before the check on drbd_suspended() */ | |
1284 | complete_conflicting_writes(req); | |
1285 | /* no more giving up req_lock from now on! */ | |
1286 | ||
1287 | /* check for congestion, and potentially stop sending | |
1288 | * full data updates, but start sending "dirty bits" only. */ | |
1289 | maybe_pull_ahead(device); | |
1290 | } | |
1291 | ||
1292 | ||
1293 | if (drbd_suspended(device)) { | |
1294 | /* push back and retry: */ | |
1295 | req->rq_state |= RQ_POSTPONED; | |
1296 | if (req->private_bio) { | |
1297 | bio_put(req->private_bio); | |
1298 | req->private_bio = NULL; | |
1299 | put_ldev(device); | |
1300 | } | |
1301 | goto out; | |
1302 | } | |
1303 | ||
1304 | /* We fail READ early, if we can not serve it. | |
1305 | * We must do this before req is registered on any lists. | |
1306 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ | |
1307 | if (rw != WRITE) { | |
1308 | if (!do_remote_read(req) && !req->private_bio) | |
1309 | goto nodata; | |
1310 | } | |
1311 | ||
1312 | /* which transfer log epoch does this belong to? */ | |
1313 | req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); | |
1314 | ||
1315 | /* no point in adding empty flushes to the transfer log, | |
1316 | * they are mapped to drbd barriers already. */ | |
1317 | if (likely(req->i.size!=0)) { | |
1318 | if (rw == WRITE) | |
1319 | first_peer_device(device)->connection->current_tle_writes++; | |
1320 | ||
1321 | list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); | |
1322 | } | |
1323 | ||
1324 | if (rw == WRITE) { | |
1325 | if (req->private_bio && !may_do_writes(device)) { | |
1326 | bio_put(req->private_bio); | |
1327 | req->private_bio = NULL; | |
1328 | put_ldev(device); | |
1329 | goto nodata; | |
1330 | } | |
1331 | if (!drbd_process_write_request(req)) | |
1332 | no_remote = true; | |
1333 | } else { | |
1334 | /* We either have a private_bio, or we can read from remote. | |
1335 | * Otherwise we had done the goto nodata above. */ | |
1336 | if (req->private_bio == NULL) { | |
1337 | _req_mod(req, TO_BE_SENT); | |
1338 | _req_mod(req, QUEUE_FOR_NET_READ); | |
1339 | } else | |
1340 | no_remote = true; | |
1341 | } | |
1342 | ||
1343 | /* If it took the fast path in drbd_request_prepare, add it here. | |
1344 | * The slow path has added it already. */ | |
1345 | if (list_empty(&req->req_pending_master_completion)) | |
1346 | list_add_tail(&req->req_pending_master_completion, | |
1347 | &device->pending_master_completion[rw == WRITE]); | |
1348 | if (req->private_bio) { | |
1349 | /* needs to be marked within the same spinlock */ | |
1350 | req->pre_submit_jif = jiffies; | |
1351 | list_add_tail(&req->req_pending_local, | |
1352 | &device->pending_completion[rw == WRITE]); | |
1353 | _req_mod(req, TO_BE_SUBMITTED); | |
1354 | /* but we need to give up the spinlock to submit */ | |
1355 | submit_private_bio = true; | |
1356 | } else if (no_remote) { | |
1357 | nodata: | |
1358 | if (__ratelimit(&drbd_ratelimit_state)) | |
1359 | drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", | |
1360 | (unsigned long long)req->i.sector, req->i.size >> 9); | |
1361 | /* A write may have been queued for send_oos, however. | |
1362 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ | |
1363 | } | |
1364 | ||
1365 | out: | |
1366 | if (drbd_req_put_completion_ref(req, &m, 1)) | |
1367 | kref_put(&req->kref, drbd_req_destroy); | |
1368 | spin_unlock_irq(&resource->req_lock); | |
1369 | ||
1370 | /* Even though above is a kref_put(), this is safe. | |
1371 | * As long as we still need to submit our private bio, | |
1372 | * we hold a completion ref, and the request cannot disappear. | |
1373 | * If however this request did not even have a private bio to submit | |
1374 | * (e.g. remote read), req may already be invalid now. | |
1375 | * That's why we cannot check on req->private_bio. */ | |
1376 | if (submit_private_bio) | |
1377 | drbd_submit_req_private_bio(req); | |
1378 | if (m.bio) | |
1379 | complete_master_bio(device, &m); | |
1380 | } | |
1381 | ||
1382 | void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) | |
1383 | { | |
1384 | struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); | |
1385 | if (IS_ERR_OR_NULL(req)) | |
1386 | return; | |
1387 | drbd_send_and_submit(device, req); | |
1388 | } | |
1389 | ||
1390 | static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) | |
1391 | { | |
1392 | struct drbd_request *req, *tmp; | |
1393 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { | |
1394 | const int rw = bio_data_dir(req->master_bio); | |
1395 | ||
1396 | if (rw == WRITE /* rw != WRITE should not even end up here! */ | |
1397 | && req->private_bio && req->i.size | |
1398 | && !test_bit(AL_SUSPENDED, &device->flags)) { | |
1399 | if (!drbd_al_begin_io_fastpath(device, &req->i)) | |
1400 | continue; | |
1401 | ||
1402 | req->rq_state |= RQ_IN_ACT_LOG; | |
1403 | req->in_actlog_jif = jiffies; | |
1404 | atomic_dec(&device->ap_actlog_cnt); | |
1405 | } | |
1406 | ||
1407 | list_del_init(&req->tl_requests); | |
1408 | drbd_send_and_submit(device, req); | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | static bool prepare_al_transaction_nonblock(struct drbd_device *device, | |
1413 | struct list_head *incoming, | |
1414 | struct list_head *pending, | |
1415 | struct list_head *later) | |
1416 | { | |
1417 | struct drbd_request *req, *tmp; | |
1418 | int wake = 0; | |
1419 | int err; | |
1420 | ||
1421 | spin_lock_irq(&device->al_lock); | |
1422 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { | |
1423 | err = drbd_al_begin_io_nonblock(device, &req->i); | |
1424 | if (err == -ENOBUFS) | |
1425 | break; | |
1426 | if (err == -EBUSY) | |
1427 | wake = 1; | |
1428 | if (err) | |
1429 | list_move_tail(&req->tl_requests, later); | |
1430 | else | |
1431 | list_move_tail(&req->tl_requests, pending); | |
1432 | } | |
1433 | spin_unlock_irq(&device->al_lock); | |
1434 | if (wake) | |
1435 | wake_up(&device->al_wait); | |
1436 | return !list_empty(pending); | |
1437 | } | |
1438 | ||
1439 | void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) | |
1440 | { | |
1441 | struct drbd_request *req, *tmp; | |
1442 | ||
1443 | list_for_each_entry_safe(req, tmp, pending, tl_requests) { | |
1444 | req->rq_state |= RQ_IN_ACT_LOG; | |
1445 | req->in_actlog_jif = jiffies; | |
1446 | atomic_dec(&device->ap_actlog_cnt); | |
1447 | list_del_init(&req->tl_requests); | |
1448 | drbd_send_and_submit(device, req); | |
1449 | } | |
1450 | } | |
1451 | ||
1452 | void do_submit(struct work_struct *ws) | |
1453 | { | |
1454 | struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); | |
1455 | LIST_HEAD(incoming); /* from drbd_make_request() */ | |
1456 | LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */ | |
1457 | LIST_HEAD(busy); /* blocked by resync requests */ | |
1458 | ||
1459 | /* grab new incoming requests */ | |
1460 | spin_lock_irq(&device->resource->req_lock); | |
1461 | list_splice_tail_init(&device->submit.writes, &incoming); | |
1462 | spin_unlock_irq(&device->resource->req_lock); | |
1463 | ||
1464 | for (;;) { | |
1465 | DEFINE_WAIT(wait); | |
1466 | ||
1467 | /* move used-to-be-busy back to front of incoming */ | |
1468 | list_splice_init(&busy, &incoming); | |
1469 | submit_fast_path(device, &incoming); | |
1470 | if (list_empty(&incoming)) | |
1471 | break; | |
1472 | ||
1473 | for (;;) { | |
1474 | prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); | |
1475 | ||
1476 | list_splice_init(&busy, &incoming); | |
1477 | prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); | |
1478 | if (!list_empty(&pending)) | |
1479 | break; | |
1480 | ||
1481 | schedule(); | |
1482 | ||
1483 | /* If all currently "hot" activity log extents are kept busy by | |
1484 | * incoming requests, we still must not totally starve new | |
1485 | * requests to "cold" extents. | |
1486 | * Something left on &incoming means there had not been | |
1487 | * enough update slots available, and the activity log | |
1488 | * has been marked as "starving". | |
1489 | * | |
1490 | * Try again now, without looking for new requests, | |
1491 | * effectively blocking all new requests until we made | |
1492 | * at least _some_ progress with what we currently have. | |
1493 | */ | |
1494 | if (!list_empty(&incoming)) | |
1495 | continue; | |
1496 | ||
1497 | /* Nothing moved to pending, but nothing left | |
1498 | * on incoming: all moved to busy! | |
1499 | * Grab new and iterate. */ | |
1500 | spin_lock_irq(&device->resource->req_lock); | |
1501 | list_splice_tail_init(&device->submit.writes, &incoming); | |
1502 | spin_unlock_irq(&device->resource->req_lock); | |
1503 | } | |
1504 | finish_wait(&device->al_wait, &wait); | |
1505 | ||
1506 | /* If the transaction was full, before all incoming requests | |
1507 | * had been processed, skip ahead to commit, and iterate | |
1508 | * without splicing in more incoming requests from upper layers. | |
1509 | * | |
1510 | * Else, if all incoming have been processed, | |
1511 | * they have become either "pending" (to be submitted after | |
1512 | * next transaction commit) or "busy" (blocked by resync). | |
1513 | * | |
1514 | * Maybe more was queued, while we prepared the transaction? | |
1515 | * Try to stuff those into this transaction as well. | |
1516 | * Be strictly non-blocking here, | |
1517 | * we already have something to commit. | |
1518 | * | |
1519 | * Commit if we don't make any more progres. | |
1520 | */ | |
1521 | ||
1522 | while (list_empty(&incoming)) { | |
1523 | LIST_HEAD(more_pending); | |
1524 | LIST_HEAD(more_incoming); | |
1525 | bool made_progress; | |
1526 | ||
1527 | /* It is ok to look outside the lock, | |
1528 | * it's only an optimization anyways */ | |
1529 | if (list_empty(&device->submit.writes)) | |
1530 | break; | |
1531 | ||
1532 | spin_lock_irq(&device->resource->req_lock); | |
1533 | list_splice_tail_init(&device->submit.writes, &more_incoming); | |
1534 | spin_unlock_irq(&device->resource->req_lock); | |
1535 | ||
1536 | if (list_empty(&more_incoming)) | |
1537 | break; | |
1538 | ||
1539 | made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); | |
1540 | ||
1541 | list_splice_tail_init(&more_pending, &pending); | |
1542 | list_splice_tail_init(&more_incoming, &incoming); | |
1543 | if (!made_progress) | |
1544 | break; | |
1545 | } | |
1546 | ||
1547 | drbd_al_begin_io_commit(device); | |
1548 | send_and_submit_pending(device, &pending); | |
1549 | } | |
1550 | } | |
1551 | ||
1552 | blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) | |
1553 | { | |
1554 | struct drbd_device *device = (struct drbd_device *) q->queuedata; | |
1555 | unsigned long start_jif; | |
1556 | ||
1557 | blk_queue_split(q, &bio, q->bio_split); | |
1558 | ||
1559 | start_jif = jiffies; | |
1560 | ||
1561 | /* | |
1562 | * what we "blindly" assume: | |
1563 | */ | |
1564 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); | |
1565 | ||
1566 | inc_ap_bio(device); | |
1567 | __drbd_make_request(device, bio, start_jif); | |
1568 | return BLK_QC_T_NONE; | |
1569 | } | |
1570 | ||
1571 | static bool net_timeout_reached(struct drbd_request *net_req, | |
1572 | struct drbd_connection *connection, | |
1573 | unsigned long now, unsigned long ent, | |
1574 | unsigned int ko_count, unsigned int timeout) | |
1575 | { | |
1576 | struct drbd_device *device = net_req->device; | |
1577 | ||
1578 | if (!time_after(now, net_req->pre_send_jif + ent)) | |
1579 | return false; | |
1580 | ||
1581 | if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) | |
1582 | return false; | |
1583 | ||
1584 | if (net_req->rq_state & RQ_NET_PENDING) { | |
1585 | drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", | |
1586 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); | |
1587 | return true; | |
1588 | } | |
1589 | ||
1590 | /* We received an ACK already (or are using protocol A), | |
1591 | * but are waiting for the epoch closing barrier ack. | |
1592 | * Check if we sent the barrier already. We should not blame the peer | |
1593 | * for being unresponsive, if we did not even ask it yet. */ | |
1594 | if (net_req->epoch == connection->send.current_epoch_nr) { | |
1595 | drbd_warn(device, | |
1596 | "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n", | |
1597 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); | |
1598 | return false; | |
1599 | } | |
1600 | ||
1601 | /* Worst case: we may have been blocked for whatever reason, then | |
1602 | * suddenly are able to send a lot of requests (and epoch separating | |
1603 | * barriers) in quick succession. | |
1604 | * The timestamp of the net_req may be much too old and not correspond | |
1605 | * to the sending time of the relevant unack'ed barrier packet, so | |
1606 | * would trigger a spurious timeout. The latest barrier packet may | |
1607 | * have a too recent timestamp to trigger the timeout, potentially miss | |
1608 | * a timeout. Right now we don't have a place to conveniently store | |
1609 | * these timestamps. | |
1610 | * But in this particular situation, the application requests are still | |
1611 | * completed to upper layers, DRBD should still "feel" responsive. | |
1612 | * No need yet to kill this connection, it may still recover. | |
1613 | * If not, eventually we will have queued enough into the network for | |
1614 | * us to block. From that point of view, the timestamp of the last sent | |
1615 | * barrier packet is relevant enough. | |
1616 | */ | |
1617 | if (time_after(now, connection->send.last_sent_barrier_jif + ent)) { | |
1618 | drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", | |
1619 | connection->send.last_sent_barrier_jif, now, | |
1620 | jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout); | |
1621 | return true; | |
1622 | } | |
1623 | return false; | |
1624 | } | |
1625 | ||
1626 | /* A request is considered timed out, if | |
1627 | * - we have some effective timeout from the configuration, | |
1628 | * with some state restrictions applied, | |
1629 | * - the oldest request is waiting for a response from the network | |
1630 | * resp. the local disk, | |
1631 | * - the oldest request is in fact older than the effective timeout, | |
1632 | * - the connection was established (resp. disk was attached) | |
1633 | * for longer than the timeout already. | |
1634 | * Note that for 32bit jiffies and very stable connections/disks, | |
1635 | * we may have a wrap around, which is catched by | |
1636 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). | |
1637 | * | |
1638 | * Side effect: once per 32bit wrap-around interval, which means every | |
1639 | * ~198 days with 250 HZ, we have a window where the timeout would need | |
1640 | * to expire twice (worst case) to become effective. Good enough. | |
1641 | */ | |
1642 | ||
1643 | void request_timer_fn(unsigned long data) | |
1644 | { | |
1645 | struct drbd_device *device = (struct drbd_device *) data; | |
1646 | struct drbd_connection *connection = first_peer_device(device)->connection; | |
1647 | struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ | |
1648 | struct net_conf *nc; | |
1649 | unsigned long oldest_submit_jif; | |
1650 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ | |
1651 | unsigned long now; | |
1652 | unsigned int ko_count = 0, timeout = 0; | |
1653 | ||
1654 | rcu_read_lock(); | |
1655 | nc = rcu_dereference(connection->net_conf); | |
1656 | if (nc && device->state.conn >= C_WF_REPORT_PARAMS) { | |
1657 | ko_count = nc->ko_count; | |
1658 | timeout = nc->timeout; | |
1659 | } | |
1660 | ||
1661 | if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ | |
1662 | dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; | |
1663 | put_ldev(device); | |
1664 | } | |
1665 | rcu_read_unlock(); | |
1666 | ||
1667 | ||
1668 | ent = timeout * HZ/10 * ko_count; | |
1669 | et = min_not_zero(dt, ent); | |
1670 | ||
1671 | if (!et) | |
1672 | return; /* Recurring timer stopped */ | |
1673 | ||
1674 | now = jiffies; | |
1675 | nt = now + et; | |
1676 | ||
1677 | spin_lock_irq(&device->resource->req_lock); | |
1678 | req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local); | |
1679 | req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local); | |
1680 | ||
1681 | /* maybe the oldest request waiting for the peer is in fact still | |
1682 | * blocking in tcp sendmsg. That's ok, though, that's handled via the | |
1683 | * socket send timeout, requesting a ping, and bumping ko-count in | |
1684 | * we_should_drop_the_connection(). | |
1685 | */ | |
1686 | ||
1687 | /* check the oldest request we did successfully sent, | |
1688 | * but which is still waiting for an ACK. */ | |
1689 | req_peer = connection->req_ack_pending; | |
1690 | ||
1691 | /* if we don't have such request (e.g. protocoll A) | |
1692 | * check the oldest requests which is still waiting on its epoch | |
1693 | * closing barrier ack. */ | |
1694 | if (!req_peer) | |
1695 | req_peer = connection->req_not_net_done; | |
1696 | ||
1697 | /* evaluate the oldest peer request only in one timer! */ | |
1698 | if (req_peer && req_peer->device != device) | |
1699 | req_peer = NULL; | |
1700 | ||
1701 | /* do we have something to evaluate? */ | |
1702 | if (req_peer == NULL && req_write == NULL && req_read == NULL) | |
1703 | goto out; | |
1704 | ||
1705 | oldest_submit_jif = | |
1706 | (req_write && req_read) | |
1707 | ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif) | |
1708 | ? req_write->pre_submit_jif : req_read->pre_submit_jif ) | |
1709 | : req_write ? req_write->pre_submit_jif | |
1710 | : req_read ? req_read->pre_submit_jif : now; | |
1711 | ||
1712 | if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout)) | |
1713 | _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD); | |
1714 | ||
1715 | if (dt && oldest_submit_jif != now && | |
1716 | time_after(now, oldest_submit_jif + dt) && | |
1717 | !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { | |
1718 | drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); | |
1719 | __drbd_chk_io_error(device, DRBD_FORCE_DETACH); | |
1720 | } | |
1721 | ||
1722 | /* Reschedule timer for the nearest not already expired timeout. | |
1723 | * Fallback to now + min(effective network timeout, disk timeout). */ | |
1724 | ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) | |
1725 | ? req_peer->pre_send_jif + ent : now + et; | |
1726 | dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt)) | |
1727 | ? oldest_submit_jif + dt : now + et; | |
1728 | nt = time_before(ent, dt) ? ent : dt; | |
1729 | out: | |
1730 | spin_unlock_irq(&device->resource->req_lock); | |
1731 | mod_timer(&device->request_timer, nt); | |
1732 | } |