]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
b411b363 PR |
31 | #include "drbd_req.h" |
32 | ||
33 | ||
57bcb6cf PR |
34 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); |
35 | ||
b411b363 | 36 | /* Update disk stats at start of I/O request */ |
6d9febe2 | 37 | static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req) |
b411b363 | 38 | { |
6d9febe2 | 39 | const int rw = bio_data_dir(req->master_bio); |
b411b363 PR |
40 | int cpu; |
41 | cpu = part_stat_lock(); | |
031a7c17 | 42 | part_round_stats(cpu, &mdev->vdisk->part0); |
b411b363 | 43 | part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); |
6d9febe2 | 44 | part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9); |
376694a0 PR |
45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like |
46 | the compiler warning about cpu only assigned but never used... */ | |
753c8913 | 47 | part_inc_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 48 | part_stat_unlock(); |
b411b363 PR |
49 | } |
50 | ||
51 | /* Update disk stats when completing request upwards */ | |
52 | static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) | |
53 | { | |
54 | int rw = bio_data_dir(req->master_bio); | |
55 | unsigned long duration = jiffies - req->start_time; | |
56 | int cpu; | |
57 | cpu = part_stat_lock(); | |
58 | part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); | |
59 | part_round_stats(cpu, &mdev->vdisk->part0); | |
753c8913 | 60 | part_dec_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 61 | part_stat_unlock(); |
b411b363 PR |
62 | } |
63 | ||
9e204cdd AG |
64 | static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
65 | struct bio *bio_src) | |
66 | { | |
67 | struct drbd_request *req; | |
68 | ||
69 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); | |
70 | if (!req) | |
71 | return NULL; | |
72 | ||
73 | drbd_req_make_private_bio(req, bio_src); | |
74 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; | |
a21e9298 | 75 | req->w.mdev = mdev; |
9e204cdd AG |
76 | req->master_bio = bio_src; |
77 | req->epoch = 0; | |
53840641 | 78 | |
9e204cdd AG |
79 | drbd_clear_interval(&req->i); |
80 | req->i.sector = bio_src->bi_sector; | |
81 | req->i.size = bio_src->bi_size; | |
5e472264 | 82 | req->i.local = true; |
53840641 AG |
83 | req->i.waiting = false; |
84 | ||
9e204cdd AG |
85 | INIT_LIST_HEAD(&req->tl_requests); |
86 | INIT_LIST_HEAD(&req->w.list); | |
87 | ||
a0d856df | 88 | /* one reference to be put by __drbd_make_request */ |
b406777e | 89 | atomic_set(&req->completion_ref, 1); |
a0d856df | 90 | /* one kref as long as completion_ref > 0 */ |
b406777e | 91 | kref_init(&req->kref); |
9e204cdd AG |
92 | return req; |
93 | } | |
94 | ||
9a278a79 | 95 | void drbd_req_destroy(struct kref *kref) |
b411b363 | 96 | { |
b406777e LE |
97 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); |
98 | struct drbd_conf *mdev = req->w.mdev; | |
a0d856df LE |
99 | const unsigned s = req->rq_state; |
100 | ||
101 | if ((req->master_bio && !(s & RQ_POSTPONED)) || | |
102 | atomic_read(&req->completion_ref) || | |
103 | (s & RQ_LOCAL_PENDING) || | |
104 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { | |
105 | dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", | |
106 | s, atomic_read(&req->completion_ref)); | |
107 | return; | |
108 | } | |
288f422e PR |
109 | |
110 | /* remove it from the transfer log. | |
111 | * well, only if it had been there in the first | |
112 | * place... if it had not (local only or conflicting | |
113 | * and never sent), it should still be "empty" as | |
114 | * initialized in drbd_req_new(), so we can list_del() it | |
115 | * here unconditionally */ | |
2312f0b3 | 116 | list_del_init(&req->tl_requests); |
288f422e | 117 | |
b411b363 PR |
118 | /* if it was a write, we may have to set the corresponding |
119 | * bit(s) out-of-sync first. If it had a local part, we need to | |
120 | * release the reference to the activity log. */ | |
b406777e | 121 | if (s & RQ_WRITE) { |
b411b363 PR |
122 | /* Set out-of-sync unless both OK flags are set |
123 | * (local only or remote failed). | |
124 | * Other places where we set out-of-sync: | |
125 | * READ with local io-error */ | |
b411b363 | 126 | |
70f17b6b LE |
127 | /* There is a special case: |
128 | * we may notice late that IO was suspended, | |
129 | * and postpone, or schedule for retry, a write, | |
130 | * before it even was submitted or sent. | |
131 | * In that case we do not want to touch the bitmap at all. | |
132 | */ | |
133 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { | |
d7644018 PR |
134 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
135 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); | |
b411b363 | 136 | |
d7644018 PR |
137 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
138 | drbd_set_in_sync(mdev, req->i.sector, req->i.size); | |
139 | } | |
b411b363 PR |
140 | |
141 | /* one might be tempted to move the drbd_al_complete_io | |
fcefa62e | 142 | * to the local io completion callback drbd_request_endio. |
b411b363 PR |
143 | * but, if this was a mirror write, we may only |
144 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
145 | * otherwise the extent could be dropped from the al | |
146 | * before it has actually been written on the peer. | |
147 | * if we crash before our peer knows about the request, | |
148 | * but after the extent has been dropped from the al, | |
149 | * we would forget to resync the corresponding extent. | |
150 | */ | |
76590cd1 | 151 | if (s & RQ_IN_ACT_LOG) { |
b411b363 | 152 | if (get_ldev_if_state(mdev, D_FAILED)) { |
76590cd1 | 153 | drbd_al_complete_io(mdev, &req->i); |
b411b363 PR |
154 | put_ldev(mdev); |
155 | } else if (__ratelimit(&drbd_ratelimit_state)) { | |
181286ad LE |
156 | dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " |
157 | "but my Disk seems to have failed :(\n", | |
158 | (unsigned long long) req->i.sector, req->i.size); | |
b411b363 PR |
159 | } |
160 | } | |
161 | } | |
162 | ||
9a278a79 | 163 | mempool_free(req, drbd_request_mempool); |
b411b363 PR |
164 | } |
165 | ||
b6dd1a89 LE |
166 | static void wake_all_senders(struct drbd_tconn *tconn) { |
167 | wake_up(&tconn->sender_work.q_wait); | |
b411b363 PR |
168 | } |
169 | ||
b6dd1a89 | 170 | /* must hold resource->req_lock */ |
2681f7f6 | 171 | void start_new_tl_epoch(struct drbd_tconn *tconn) |
b411b363 | 172 | { |
99b4d8fe LE |
173 | /* no point closing an epoch, if it is empty, anyways. */ |
174 | if (tconn->current_tle_writes == 0) | |
175 | return; | |
b411b363 | 176 | |
b6dd1a89 LE |
177 | tconn->current_tle_writes = 0; |
178 | atomic_inc(&tconn->current_tle_nr); | |
179 | wake_all_senders(tconn); | |
b411b363 PR |
180 | } |
181 | ||
182 | void complete_master_bio(struct drbd_conf *mdev, | |
183 | struct bio_and_error *m) | |
184 | { | |
b411b363 PR |
185 | bio_endio(m->bio, m->error); |
186 | dec_ap_bio(mdev); | |
187 | } | |
188 | ||
53840641 AG |
189 | |
190 | static void drbd_remove_request_interval(struct rb_root *root, | |
191 | struct drbd_request *req) | |
192 | { | |
a21e9298 | 193 | struct drbd_conf *mdev = req->w.mdev; |
53840641 AG |
194 | struct drbd_interval *i = &req->i; |
195 | ||
196 | drbd_remove_interval(root, i); | |
197 | ||
198 | /* Wake up any processes waiting for this request to complete. */ | |
199 | if (i->waiting) | |
200 | wake_up(&mdev->misc_wait); | |
201 | } | |
202 | ||
b411b363 PR |
203 | /* Helper for __req_mod(). |
204 | * Set m->bio to the master bio, if it is fit to be completed, | |
205 | * or leave it alone (it is initialized to NULL in __req_mod), | |
206 | * if it has already been completed, or cannot be completed yet. | |
207 | * If m->bio is set, the error status to be returned is placed in m->error. | |
208 | */ | |
6870ca6d | 209 | static |
a0d856df | 210 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
b411b363 | 211 | { |
a0d856df | 212 | const unsigned s = req->rq_state; |
a21e9298 | 213 | struct drbd_conf *mdev = req->w.mdev; |
a0d856df LE |
214 | int rw; |
215 | int error, ok; | |
b411b363 | 216 | |
b411b363 PR |
217 | /* we must not complete the master bio, while it is |
218 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
219 | * not yet acknowledged by the peer | |
220 | * not yet completed by the local io subsystem | |
221 | * these flags may get cleared in any order by | |
222 | * the worker, | |
223 | * the receiver, | |
224 | * the bio_endio completion callbacks. | |
225 | */ | |
a0d856df LE |
226 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || |
227 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || | |
228 | (s & RQ_COMPLETION_SUSP)) { | |
229 | dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); | |
b411b363 | 230 | return; |
a0d856df LE |
231 | } |
232 | ||
233 | if (!req->master_bio) { | |
234 | dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); | |
b411b363 | 235 | return; |
a0d856df | 236 | } |
b411b363 | 237 | |
a0d856df | 238 | rw = bio_rw(req->master_bio); |
b411b363 | 239 | |
a0d856df LE |
240 | /* |
241 | * figure out whether to report success or failure. | |
242 | * | |
243 | * report success when at least one of the operations succeeded. | |
244 | * or, to put the other way, | |
245 | * only report failure, when both operations failed. | |
246 | * | |
247 | * what to do about the failures is handled elsewhere. | |
248 | * what we need to do here is just: complete the master_bio. | |
249 | * | |
250 | * local completion error, if any, has been stored as ERR_PTR | |
251 | * in private_bio within drbd_request_endio. | |
252 | */ | |
253 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
254 | error = PTR_ERR(req->private_bio); | |
b411b363 | 255 | |
a0d856df LE |
256 | /* remove the request from the conflict detection |
257 | * respective block_id verification hash */ | |
258 | if (!drbd_interval_empty(&req->i)) { | |
259 | struct rb_root *root; | |
b411b363 | 260 | |
b411b363 | 261 | if (rw == WRITE) |
a0d856df LE |
262 | root = &mdev->write_requests; |
263 | else | |
264 | root = &mdev->read_requests; | |
265 | drbd_remove_request_interval(root, req); | |
266 | } else if (!(s & RQ_POSTPONED)) | |
267 | D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); | |
b411b363 | 268 | |
a0d856df LE |
269 | /* Before we can signal completion to the upper layers, |
270 | * we may need to close the current transfer log epoch. | |
271 | * We are within the request lock, so we can simply compare | |
272 | * the request epoch number with the current transfer log | |
273 | * epoch number. If they match, increase the current_tle_nr, | |
274 | * and reset the transfer log epoch write_cnt. | |
275 | */ | |
276 | if (rw == WRITE && | |
277 | req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) | |
278 | start_new_tl_epoch(mdev->tconn); | |
b411b363 | 279 | |
a0d856df LE |
280 | /* Update disk stats */ |
281 | _drbd_end_io_acct(mdev, req); | |
b411b363 | 282 | |
a0d856df LE |
283 | /* If READ failed, |
284 | * have it be pushed back to the retry work queue, | |
285 | * so it will re-enter __drbd_make_request(), | |
286 | * and be re-assigned to a suitable local or remote path, | |
287 | * or failed if we do not have access to good data anymore. | |
288 | * | |
289 | * Unless it was failed early by __drbd_make_request(), | |
290 | * because no path was available, in which case | |
291 | * it was not even added to the transfer_log. | |
292 | * | |
293 | * READA may fail, and will not be retried. | |
294 | * | |
295 | * WRITE should have used all available paths already. | |
296 | */ | |
297 | if (!ok && rw == READ && !list_empty(&req->tl_requests)) | |
298 | req->rq_state |= RQ_POSTPONED; | |
b411b363 | 299 | |
a0d856df | 300 | if (!(req->rq_state & RQ_POSTPONED)) { |
b411b363 PR |
301 | m->error = ok ? 0 : (error ?: -EIO); |
302 | m->bio = req->master_bio; | |
303 | req->master_bio = NULL; | |
304 | } | |
b411b363 | 305 | } |
b411b363 | 306 | |
a0d856df | 307 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
cfa03415 | 308 | { |
a21e9298 | 309 | struct drbd_conf *mdev = req->w.mdev; |
a0d856df LE |
310 | D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); |
311 | ||
312 | if (!atomic_sub_and_test(put, &req->completion_ref)) | |
313 | return 0; | |
2b4dd36f | 314 | |
a0d856df | 315 | drbd_req_complete(req, m); |
9a278a79 LE |
316 | |
317 | if (req->rq_state & RQ_POSTPONED) { | |
318 | /* don't destroy the req object just yet, | |
319 | * but queue it for retry */ | |
320 | drbd_restart_request(req); | |
321 | return 0; | |
b411b363 | 322 | } |
9a278a79 | 323 | |
a0d856df | 324 | return 1; |
b411b363 PR |
325 | } |
326 | ||
a0d856df LE |
327 | /* I'd like this to be the only place that manipulates |
328 | * req->completion_ref and req->kref. */ | |
329 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |
330 | int clear, int set) | |
cfa03415 | 331 | { |
a0d856df LE |
332 | struct drbd_conf *mdev = req->w.mdev; |
333 | unsigned s = req->rq_state; | |
334 | int c_put = 0; | |
335 | int k_put = 0; | |
cfa03415 | 336 | |
5af2e8ce PR |
337 | if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP)) |
338 | set |= RQ_COMPLETION_SUSP; | |
cfa03415 | 339 | |
a0d856df | 340 | /* apply */ |
b411b363 | 341 | |
a0d856df LE |
342 | req->rq_state &= ~clear; |
343 | req->rq_state |= set; | |
b411b363 | 344 | |
a0d856df LE |
345 | /* no change? */ |
346 | if (req->rq_state == s) | |
347 | return; | |
b411b363 | 348 | |
a0d856df LE |
349 | /* intent: get references */ |
350 | ||
351 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) | |
352 | atomic_inc(&req->completion_ref); | |
353 | ||
354 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { | |
355 | inc_ap_pending(mdev); | |
356 | atomic_inc(&req->completion_ref); | |
b411b363 PR |
357 | } |
358 | ||
a0d856df LE |
359 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) |
360 | atomic_inc(&req->completion_ref); | |
361 | ||
362 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) | |
363 | kref_get(&req->kref); /* wait for the DONE */ | |
364 | ||
365 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) | |
366 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); | |
367 | ||
5af2e8ce PR |
368 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) |
369 | atomic_inc(&req->completion_ref); | |
370 | ||
a0d856df LE |
371 | /* progress: put references */ |
372 | ||
373 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) | |
374 | ++c_put; | |
375 | ||
376 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { | |
377 | D_ASSERT(req->rq_state & RQ_LOCAL_PENDING); | |
378 | /* local completion may still come in later, | |
379 | * we need to keep the req object around. */ | |
380 | kref_get(&req->kref); | |
381 | ++c_put; | |
b411b363 | 382 | } |
b411b363 | 383 | |
a0d856df LE |
384 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { |
385 | if (req->rq_state & RQ_LOCAL_ABORTED) | |
386 | ++k_put; | |
387 | else | |
388 | ++c_put; | |
389 | } | |
b411b363 | 390 | |
a0d856df LE |
391 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { |
392 | dec_ap_pending(mdev); | |
393 | ++c_put; | |
394 | } | |
395 | ||
396 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) | |
397 | ++c_put; | |
398 | ||
399 | if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { | |
400 | if (req->rq_state & RQ_NET_SENT) | |
401 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); | |
402 | ++k_put; | |
403 | } | |
404 | ||
405 | /* potentially complete and destroy */ | |
406 | ||
407 | if (k_put || c_put) { | |
408 | /* Completion does it's own kref_put. If we are going to | |
409 | * kref_sub below, we need req to be still around then. */ | |
410 | int at_least = k_put + !!c_put; | |
411 | int refcount = atomic_read(&req->kref.refcount); | |
412 | if (refcount < at_least) | |
413 | dev_err(DEV, | |
414 | "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", | |
415 | s, req->rq_state, refcount, at_least); | |
416 | } | |
417 | ||
418 | /* If we made progress, retry conflicting peer requests, if any. */ | |
419 | if (req->i.waiting) | |
420 | wake_up(&mdev->misc_wait); | |
421 | ||
422 | if (c_put) | |
423 | k_put += drbd_req_put_completion_ref(req, m, c_put); | |
424 | if (k_put) | |
425 | kref_sub(&req->kref, k_put, drbd_req_destroy); | |
b411b363 PR |
426 | } |
427 | ||
ccae7868 LE |
428 | static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req) |
429 | { | |
430 | char b[BDEVNAME_SIZE]; | |
431 | ||
42839f65 | 432 | if (!__ratelimit(&drbd_ratelimit_state)) |
ccae7868 LE |
433 | return; |
434 | ||
435 | dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n", | |
436 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", | |
42839f65 LE |
437 | (unsigned long long)req->i.sector, |
438 | req->i.size >> 9, | |
ccae7868 LE |
439 | bdevname(mdev->ldev->backing_bdev, b)); |
440 | } | |
441 | ||
b411b363 PR |
442 | /* obviously this could be coded as many single functions |
443 | * instead of one huge switch, | |
444 | * or by putting the code directly in the respective locations | |
445 | * (as it has been before). | |
446 | * | |
447 | * but having it this way | |
448 | * enforces that it is all in this one place, where it is easier to audit, | |
449 | * it makes it obvious that whatever "event" "happens" to a request should | |
450 | * happen "atomically" within the req_lock, | |
451 | * and it enforces that we have to think in a very structured manner | |
452 | * about the "events" that may happen to a request during its life time ... | |
453 | */ | |
2a80699f | 454 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
455 | struct bio_and_error *m) |
456 | { | |
a21e9298 | 457 | struct drbd_conf *mdev = req->w.mdev; |
44ed167d | 458 | struct net_conf *nc; |
303d1448 | 459 | int p, rv = 0; |
7be8da07 AG |
460 | |
461 | if (m) | |
462 | m->bio = NULL; | |
b411b363 | 463 | |
b411b363 PR |
464 | switch (what) { |
465 | default: | |
466 | dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); | |
467 | break; | |
468 | ||
469 | /* does not happen... | |
470 | * initialization done in drbd_req_new | |
8554df1c | 471 | case CREATED: |
b411b363 PR |
472 | break; |
473 | */ | |
474 | ||
8554df1c | 475 | case TO_BE_SENT: /* via network */ |
7be8da07 | 476 | /* reached via __drbd_make_request |
b411b363 PR |
477 | * and from w_read_retry_remote */ |
478 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | |
44ed167d PR |
479 | rcu_read_lock(); |
480 | nc = rcu_dereference(mdev->tconn->net_conf); | |
481 | p = nc->wire_protocol; | |
482 | rcu_read_unlock(); | |
303d1448 PR |
483 | req->rq_state |= |
484 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
485 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
a0d856df | 486 | mod_rq_state(req, m, 0, RQ_NET_PENDING); |
b411b363 PR |
487 | break; |
488 | ||
8554df1c | 489 | case TO_BE_SUBMITTED: /* locally */ |
7be8da07 | 490 | /* reached via __drbd_make_request */ |
b411b363 | 491 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); |
a0d856df | 492 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
b411b363 PR |
493 | break; |
494 | ||
8554df1c | 495 | case COMPLETED_OK: |
2b4dd36f | 496 | if (req->rq_state & RQ_WRITE) |
ace652ac | 497 | mdev->writ_cnt += req->i.size >> 9; |
b411b363 | 498 | else |
ace652ac | 499 | mdev->read_cnt += req->i.size >> 9; |
b411b363 | 500 | |
a0d856df LE |
501 | mod_rq_state(req, m, RQ_LOCAL_PENDING, |
502 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
b411b363 PR |
503 | break; |
504 | ||
cdfda633 | 505 | case ABORT_DISK_IO: |
a0d856df | 506 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); |
2b4dd36f PR |
507 | break; |
508 | ||
edc9f5eb | 509 | case WRITE_COMPLETED_WITH_ERROR: |
ccae7868 | 510 | drbd_report_io_error(mdev, req); |
a2a3c74f | 511 | __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR); |
edc9f5eb | 512 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
b411b363 PR |
513 | break; |
514 | ||
8554df1c | 515 | case READ_COMPLETED_WITH_ERROR: |
ace652ac | 516 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
ccae7868 | 517 | drbd_report_io_error(mdev, req); |
a2a3c74f | 518 | __drbd_chk_io_error(mdev, DRBD_READ_ERROR); |
a0d856df LE |
519 | /* fall through. */ |
520 | case READ_AHEAD_COMPLETED_WITH_ERROR: | |
521 | /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ | |
522 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
4439c400 | 523 | break; |
b411b363 | 524 | |
8554df1c | 525 | case QUEUE_FOR_NET_READ: |
b411b363 PR |
526 | /* READ or READA, and |
527 | * no local disk, | |
528 | * or target area marked as invalid, | |
529 | * or just got an io-error. */ | |
7be8da07 | 530 | /* from __drbd_make_request |
b411b363 PR |
531 | * or from bio_endio during read io-error recovery */ |
532 | ||
6870ca6d LE |
533 | /* So we can verify the handle in the answer packet. |
534 | * Corresponding drbd_remove_request_interval is in | |
a0d856df | 535 | * drbd_req_complete() */ |
97ddb687 | 536 | D_ASSERT(drbd_interval_empty(&req->i)); |
dac1389c | 537 | drbd_insert_interval(&mdev->read_requests, &req->i); |
b411b363 | 538 | |
83c38830 | 539 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
b411b363 PR |
540 | |
541 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
4439c400 | 542 | D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); |
a0d856df | 543 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
4439c400 | 544 | req->w.cb = w_send_read_req; |
d5b27b01 | 545 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
b411b363 PR |
546 | break; |
547 | ||
8554df1c | 548 | case QUEUE_FOR_NET_WRITE: |
b411b363 | 549 | /* assert something? */ |
7be8da07 | 550 | /* from __drbd_make_request only */ |
b411b363 | 551 | |
6870ca6d | 552 | /* Corresponding drbd_remove_request_interval is in |
a0d856df | 553 | * drbd_req_complete() */ |
97ddb687 | 554 | D_ASSERT(drbd_interval_empty(&req->i)); |
de696716 | 555 | drbd_insert_interval(&mdev->write_requests, &req->i); |
b411b363 PR |
556 | |
557 | /* NOTE | |
558 | * In case the req ended up on the transfer log before being | |
559 | * queued on the worker, it could lead to this request being | |
560 | * missed during cleanup after connection loss. | |
561 | * So we have to do both operations here, | |
562 | * within the same lock that protects the transfer log. | |
563 | * | |
564 | * _req_add_to_epoch(req); this has to be after the | |
565 | * _maybe_start_new_epoch(req); which happened in | |
7be8da07 | 566 | * __drbd_make_request, because we now may set the bit |
b411b363 PR |
567 | * again ourselves to close the current epoch. |
568 | * | |
569 | * Add req to the (now) current epoch (barrier). */ | |
570 | ||
83c38830 LE |
571 | /* otherwise we may lose an unplug, which may cause some remote |
572 | * io-scheduler timeout to expire, increasing maximum latency, | |
573 | * hurting performance. */ | |
574 | set_bit(UNPLUG_REMOTE, &mdev->flags); | |
b411b363 PR |
575 | |
576 | /* queue work item to send data */ | |
577 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
a0d856df | 578 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
b411b363 | 579 | req->w.cb = w_send_dblock; |
d5b27b01 | 580 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
b411b363 PR |
581 | |
582 | /* close the epoch, in case it outgrew the limit */ | |
44ed167d PR |
583 | rcu_read_lock(); |
584 | nc = rcu_dereference(mdev->tconn->net_conf); | |
585 | p = nc->max_epoch_size; | |
586 | rcu_read_unlock(); | |
b6dd1a89 LE |
587 | if (mdev->tconn->current_tle_writes >= p) |
588 | start_new_tl_epoch(mdev->tconn); | |
b411b363 PR |
589 | |
590 | break; | |
591 | ||
8554df1c | 592 | case QUEUE_FOR_SEND_OOS: |
a0d856df | 593 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
8f7bed77 | 594 | req->w.cb = w_send_out_of_sync; |
d5b27b01 | 595 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
73a01a18 PR |
596 | break; |
597 | ||
ea9d6729 | 598 | case READ_RETRY_REMOTE_CANCELED: |
8554df1c | 599 | case SEND_CANCELED: |
8554df1c | 600 | case SEND_FAILED: |
b411b363 PR |
601 | /* real cleanup will be done from tl_clear. just update flags |
602 | * so it is no longer marked as on the worker queue */ | |
a0d856df | 603 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); |
b411b363 PR |
604 | break; |
605 | ||
8554df1c | 606 | case HANDED_OVER_TO_NETWORK: |
b411b363 PR |
607 | /* assert something? */ |
608 | if (bio_data_dir(req->master_bio) == WRITE && | |
303d1448 | 609 | !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { |
b411b363 PR |
610 | /* this is what is dangerous about protocol A: |
611 | * pretend it was successfully written on the peer. */ | |
a0d856df LE |
612 | if (req->rq_state & RQ_NET_PENDING) |
613 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); | |
614 | /* else: neg-ack was faster... */ | |
b411b363 PR |
615 | /* it is still not yet RQ_NET_DONE until the |
616 | * corresponding epoch barrier got acked as well, | |
617 | * so we know what to dirty on connection loss */ | |
618 | } | |
a0d856df | 619 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); |
6d49e101 LE |
620 | break; |
621 | ||
27a434fe | 622 | case OOS_HANDED_TO_NETWORK: |
6d49e101 LE |
623 | /* Was not set PENDING, no longer QUEUED, so is now DONE |
624 | * as far as this connection is concerned. */ | |
a0d856df | 625 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); |
b411b363 PR |
626 | break; |
627 | ||
8554df1c | 628 | case CONNECTION_LOST_WHILE_PENDING: |
b411b363 | 629 | /* transfer log cleanup after connection loss */ |
a0d856df LE |
630 | mod_rq_state(req, m, |
631 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, | |
632 | RQ_NET_DONE); | |
b411b363 PR |
633 | break; |
634 | ||
d4dabbe2 LE |
635 | case CONFLICT_RESOLVED: |
636 | /* for superseded conflicting writes of multiple primaries, | |
b411b363 | 637 | * there is no need to keep anything in the tl, potential |
934722a2 LE |
638 | * node crashes are covered by the activity log. |
639 | * | |
640 | * If this request had been marked as RQ_POSTPONED before, | |
d4dabbe2 | 641 | * it will actually not be completed, but "restarted", |
934722a2 LE |
642 | * resubmitted from the retry worker context. */ |
643 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
644 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); | |
645 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); | |
646 | break; | |
647 | ||
0afd569a | 648 | case WRITE_ACKED_BY_PEER_AND_SIS: |
934722a2 | 649 | req->rq_state |= RQ_NET_SIS; |
8554df1c | 650 | case WRITE_ACKED_BY_PEER: |
303d1448 | 651 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
b411b363 | 652 | /* protocol C; successfully written on peer. |
d64957c9 | 653 | * Nothing more to do here. |
b411b363 | 654 | * We want to keep the tl in place for all protocols, to cater |
d64957c9 | 655 | * for volatile write-back caches on lower level devices. */ |
b411b363 | 656 | |
303d1448 | 657 | goto ack_common; |
8554df1c | 658 | case RECV_ACKED_BY_PEER: |
303d1448 | 659 | D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); |
b411b363 | 660 | /* protocol B; pretends to be successfully written on peer. |
8554df1c | 661 | * see also notes above in HANDED_OVER_TO_NETWORK about |
b411b363 | 662 | * protocol != C */ |
303d1448 | 663 | ack_common: |
b411b363 | 664 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
a0d856df | 665 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
b411b363 PR |
666 | break; |
667 | ||
7be8da07 | 668 | case POSTPONE_WRITE: |
303d1448 PR |
669 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
670 | /* If this node has already detected the write conflict, the | |
7be8da07 AG |
671 | * worker will be waiting on misc_wait. Wake it up once this |
672 | * request has completed locally. | |
673 | */ | |
674 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
675 | req->rq_state |= RQ_POSTPONED; | |
a0d856df LE |
676 | if (req->i.waiting) |
677 | wake_up(&mdev->misc_wait); | |
678 | /* Do not clear RQ_NET_PENDING. This request will make further | |
679 | * progress via restart_conflicting_writes() or | |
680 | * fail_postponed_requests(). Hopefully. */ | |
7be8da07 | 681 | break; |
b411b363 | 682 | |
8554df1c | 683 | case NEG_ACKED: |
46e21bba | 684 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); |
b411b363 PR |
685 | break; |
686 | ||
8554df1c | 687 | case FAIL_FROZEN_DISK_IO: |
265be2d0 PR |
688 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
689 | break; | |
a0d856df | 690 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
265be2d0 PR |
691 | break; |
692 | ||
8554df1c | 693 | case RESTART_FROZEN_DISK_IO: |
265be2d0 PR |
694 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
695 | break; | |
696 | ||
a0d856df LE |
697 | mod_rq_state(req, m, |
698 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, | |
699 | RQ_LOCAL_PENDING); | |
265be2d0 PR |
700 | |
701 | rv = MR_READ; | |
702 | if (bio_data_dir(req->master_bio) == WRITE) | |
703 | rv = MR_WRITE; | |
704 | ||
a0d856df | 705 | get_ldev(mdev); /* always succeeds in this call path */ |
265be2d0 | 706 | req->w.cb = w_restart_disk_io; |
d5b27b01 | 707 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
265be2d0 PR |
708 | break; |
709 | ||
8554df1c | 710 | case RESEND: |
509fc019 PR |
711 | /* Simply complete (local only) READs. */ |
712 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { | |
8a0bab2a | 713 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
509fc019 PR |
714 | break; |
715 | } | |
716 | ||
11b58e73 | 717 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
a0d856df LE |
718 | before the connection loss (B&C only); only P_BARRIER_ACK |
719 | (or the local completion?) was missing when we suspended. | |
6870ca6d LE |
720 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
721 | During connection handshake, we ensure that the peer was not rebooted. */ | |
11b58e73 | 722 | if (!(req->rq_state & RQ_NET_OK)) { |
a0d856df LE |
723 | /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync? |
724 | * in that case we must not set RQ_NET_PENDING. */ | |
725 | ||
726 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); | |
11b58e73 | 727 | if (req->w.cb) { |
d5b27b01 | 728 | drbd_queue_work(&mdev->tconn->sender_work, &req->w); |
11b58e73 | 729 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
a0d856df | 730 | } /* else: FIXME can this happen? */ |
11b58e73 PR |
731 | break; |
732 | } | |
8554df1c | 733 | /* else, fall through to BARRIER_ACKED */ |
11b58e73 | 734 | |
8554df1c | 735 | case BARRIER_ACKED: |
a0d856df | 736 | /* barrier ack for READ requests does not make sense */ |
288f422e PR |
737 | if (!(req->rq_state & RQ_WRITE)) |
738 | break; | |
739 | ||
b411b363 | 740 | if (req->rq_state & RQ_NET_PENDING) { |
a209b4ae | 741 | /* barrier came in before all requests were acked. |
b411b363 PR |
742 | * this is bad, because if the connection is lost now, |
743 | * we won't be able to clean them up... */ | |
8554df1c | 744 | dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); |
b411b363 | 745 | } |
a0d856df LE |
746 | /* Allowed to complete requests, even while suspended. |
747 | * As this is called for all requests within a matching epoch, | |
748 | * we need to filter, and only set RQ_NET_DONE for those that | |
749 | * have actually been on the wire. */ | |
750 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, | |
751 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); | |
b411b363 PR |
752 | break; |
753 | ||
8554df1c | 754 | case DATA_RECEIVED: |
b411b363 | 755 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
a0d856df | 756 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
b411b363 PR |
757 | break; |
758 | }; | |
2a80699f PR |
759 | |
760 | return rv; | |
b411b363 PR |
761 | } |
762 | ||
763 | /* we may do a local read if: | |
764 | * - we are consistent (of course), | |
765 | * - or we are generally inconsistent, | |
766 | * BUT we are still/already IN SYNC for this area. | |
767 | * since size may be bigger than BM_BLOCK_SIZE, | |
768 | * we may need to check several bits. | |
769 | */ | |
0da34df0 | 770 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) |
b411b363 PR |
771 | { |
772 | unsigned long sbnr, ebnr; | |
773 | sector_t esector, nr_sectors; | |
774 | ||
775 | if (mdev->state.disk == D_UP_TO_DATE) | |
0da34df0 | 776 | return true; |
8c387def | 777 | if (mdev->state.disk != D_INCONSISTENT) |
0da34df0 | 778 | return false; |
b411b363 | 779 | esector = sector + (size >> 9) - 1; |
8ca9844f | 780 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
b411b363 PR |
781 | D_ASSERT(sector < nr_sectors); |
782 | D_ASSERT(esector < nr_sectors); | |
783 | ||
784 | sbnr = BM_SECT_TO_BIT(sector); | |
785 | ebnr = BM_SECT_TO_BIT(esector); | |
786 | ||
0da34df0 | 787 | return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; |
b411b363 PR |
788 | } |
789 | ||
5da9c836 LE |
790 | static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector, |
791 | enum drbd_read_balancing rbm) | |
380207d0 | 792 | { |
380207d0 | 793 | struct backing_dev_info *bdi; |
d60de03a | 794 | int stripe_shift; |
380207d0 | 795 | |
380207d0 PR |
796 | switch (rbm) { |
797 | case RB_CONGESTED_REMOTE: | |
798 | bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; | |
799 | return bdi_read_congested(bdi); | |
800 | case RB_LEAST_PENDING: | |
801 | return atomic_read(&mdev->local_cnt) > | |
802 | atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); | |
d60de03a PR |
803 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
804 | case RB_64K_STRIPING: | |
805 | case RB_128K_STRIPING: | |
806 | case RB_256K_STRIPING: | |
807 | case RB_512K_STRIPING: | |
808 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
809 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
810 | return (sector >> (stripe_shift - 9)) & 1; | |
380207d0 PR |
811 | case RB_ROUND_ROBIN: |
812 | return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); | |
813 | case RB_PREFER_REMOTE: | |
814 | return true; | |
815 | case RB_PREFER_LOCAL: | |
816 | default: | |
817 | return false; | |
818 | } | |
819 | } | |
820 | ||
6024fece AG |
821 | /* |
822 | * complete_conflicting_writes - wait for any conflicting write requests | |
823 | * | |
824 | * The write_requests tree contains all active write requests which we | |
825 | * currently know about. Wait for any requests to complete which conflict with | |
826 | * the new one. | |
648e46b5 LE |
827 | * |
828 | * Only way out: remove the conflicting intervals from the tree. | |
6024fece | 829 | */ |
648e46b5 | 830 | static void complete_conflicting_writes(struct drbd_request *req) |
6024fece | 831 | { |
648e46b5 LE |
832 | DEFINE_WAIT(wait); |
833 | struct drbd_conf *mdev = req->w.mdev; | |
834 | struct drbd_interval *i; | |
835 | sector_t sector = req->i.sector; | |
836 | int size = req->i.size; | |
837 | ||
838 | i = drbd_find_overlap(&mdev->write_requests, sector, size); | |
839 | if (!i) | |
840 | return; | |
6024fece | 841 | |
648e46b5 LE |
842 | for (;;) { |
843 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6024fece AG |
844 | i = drbd_find_overlap(&mdev->write_requests, sector, size); |
845 | if (!i) | |
648e46b5 LE |
846 | break; |
847 | /* Indicate to wake up device->misc_wait on progress. */ | |
848 | i->waiting = true; | |
849 | spin_unlock_irq(&mdev->tconn->req_lock); | |
850 | schedule(); | |
851 | spin_lock_irq(&mdev->tconn->req_lock); | |
6024fece | 852 | } |
648e46b5 | 853 | finish_wait(&mdev->misc_wait, &wait); |
b411b363 PR |
854 | } |
855 | ||
5da9c836 | 856 | /* called within req_lock and rcu_read_lock() */ |
0d5934e3 LE |
857 | static void maybe_pull_ahead(struct drbd_conf *mdev) |
858 | { | |
5da9c836 LE |
859 | struct drbd_tconn *tconn = mdev->tconn; |
860 | struct net_conf *nc; | |
861 | bool congested = false; | |
862 | enum drbd_on_congestion on_congestion; | |
863 | ||
864 | nc = rcu_dereference(tconn->net_conf); | |
865 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; | |
866 | if (on_congestion == OC_BLOCK || | |
867 | tconn->agreed_pro_version < 96) | |
3b9ef85e | 868 | return; |
0d5934e3 LE |
869 | |
870 | /* If I don't even have good local storage, we can not reasonably try | |
871 | * to pull ahead of the peer. We also need the local reference to make | |
872 | * sure mdev->act_log is there. | |
0d5934e3 LE |
873 | */ |
874 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) | |
875 | return; | |
876 | ||
5da9c836 LE |
877 | if (nc->cong_fill && |
878 | atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { | |
0d5934e3 | 879 | dev_info(DEV, "Congestion-fill threshold reached\n"); |
5da9c836 | 880 | congested = true; |
0d5934e3 LE |
881 | } |
882 | ||
5da9c836 | 883 | if (mdev->act_log->used >= nc->cong_extents) { |
0d5934e3 | 884 | dev_info(DEV, "Congestion-extents threshold reached\n"); |
5da9c836 | 885 | congested = true; |
0d5934e3 LE |
886 | } |
887 | ||
888 | if (congested) { | |
99b4d8fe LE |
889 | /* start a new epoch for non-mirrored writes */ |
890 | start_new_tl_epoch(mdev->tconn); | |
0d5934e3 | 891 | |
5da9c836 | 892 | if (on_congestion == OC_PULL_AHEAD) |
0d5934e3 | 893 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); |
5da9c836 | 894 | else /*nc->on_congestion == OC_DISCONNECT */ |
0d5934e3 LE |
895 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); |
896 | } | |
897 | put_ldev(mdev); | |
898 | } | |
899 | ||
5da9c836 LE |
900 | /* If this returns false, and req->private_bio is still set, |
901 | * this should be submitted locally. | |
902 | * | |
903 | * If it returns false, but req->private_bio is not set, | |
904 | * we do not have access to good data :( | |
905 | * | |
906 | * Otherwise, this destroys req->private_bio, if any, | |
907 | * and returns true. | |
908 | */ | |
909 | static bool do_remote_read(struct drbd_request *req) | |
910 | { | |
911 | struct drbd_conf *mdev = req->w.mdev; | |
912 | enum drbd_read_balancing rbm; | |
913 | ||
914 | if (req->private_bio) { | |
915 | if (!drbd_may_do_local_read(mdev, | |
916 | req->i.sector, req->i.size)) { | |
917 | bio_put(req->private_bio); | |
918 | req->private_bio = NULL; | |
919 | put_ldev(mdev); | |
920 | } | |
921 | } | |
922 | ||
923 | if (mdev->state.pdsk != D_UP_TO_DATE) | |
924 | return false; | |
925 | ||
a0d856df LE |
926 | if (req->private_bio == NULL) |
927 | return true; | |
928 | ||
5da9c836 LE |
929 | /* TODO: improve read balancing decisions, take into account drbd |
930 | * protocol, pending requests etc. */ | |
931 | ||
932 | rcu_read_lock(); | |
933 | rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; | |
934 | rcu_read_unlock(); | |
935 | ||
936 | if (rbm == RB_PREFER_LOCAL && req->private_bio) | |
937 | return false; /* submit locally */ | |
938 | ||
5da9c836 LE |
939 | if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { |
940 | if (req->private_bio) { | |
941 | bio_put(req->private_bio); | |
942 | req->private_bio = NULL; | |
943 | put_ldev(mdev); | |
944 | } | |
945 | return true; | |
946 | } | |
947 | ||
948 | return false; | |
949 | } | |
950 | ||
951 | /* returns number of connections (== 1, for drbd 8.4) | |
952 | * expected to actually write this data, | |
953 | * which does NOT include those that we are L_AHEAD for. */ | |
954 | static int drbd_process_write_request(struct drbd_request *req) | |
955 | { | |
956 | struct drbd_conf *mdev = req->w.mdev; | |
957 | int remote, send_oos; | |
958 | ||
959 | rcu_read_lock(); | |
960 | remote = drbd_should_do_remote(mdev->state); | |
961 | if (remote) { | |
3b9ef85e | 962 | maybe_pull_ahead(mdev); |
5da9c836 LE |
963 | remote = drbd_should_do_remote(mdev->state); |
964 | } | |
965 | send_oos = drbd_should_send_out_of_sync(mdev->state); | |
966 | rcu_read_unlock(); | |
967 | ||
519b6d3e LE |
968 | /* Need to replicate writes. Unless it is an empty flush, |
969 | * which is better mapped to a DRBD P_BARRIER packet, | |
970 | * also for drbd wire protocol compatibility reasons. | |
971 | * If this was a flush, just start a new epoch. | |
972 | * Unless the current epoch was empty anyways, or we are not currently | |
973 | * replicating, in which case there is no point. */ | |
974 | if (unlikely(req->i.size == 0)) { | |
975 | /* The only size==0 bios we expect are empty flushes. */ | |
976 | D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH); | |
99b4d8fe | 977 | if (remote) |
519b6d3e LE |
978 | start_new_tl_epoch(mdev->tconn); |
979 | return 0; | |
980 | } | |
981 | ||
5da9c836 LE |
982 | if (!remote && !send_oos) |
983 | return 0; | |
984 | ||
985 | D_ASSERT(!(remote && send_oos)); | |
986 | ||
987 | if (remote) { | |
988 | _req_mod(req, TO_BE_SENT); | |
989 | _req_mod(req, QUEUE_FOR_NET_WRITE); | |
990 | } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size)) | |
991 | _req_mod(req, QUEUE_FOR_SEND_OOS); | |
992 | ||
993 | return remote; | |
994 | } | |
995 | ||
996 | static void | |
997 | drbd_submit_req_private_bio(struct drbd_request *req) | |
998 | { | |
999 | struct drbd_conf *mdev = req->w.mdev; | |
1000 | struct bio *bio = req->private_bio; | |
1001 | const int rw = bio_rw(bio); | |
1002 | ||
1003 | bio->bi_bdev = mdev->ldev->backing_bdev; | |
1004 | ||
1005 | /* State may have changed since we grabbed our reference on the | |
1006 | * ->ldev member. Double check, and short-circuit to endio. | |
1007 | * In case the last activity log transaction failed to get on | |
1008 | * stable storage, and this is a WRITE, we may not even submit | |
1009 | * this bio. */ | |
1010 | if (get_ldev(mdev)) { | |
1011 | if (drbd_insert_fault(mdev, | |
1012 | rw == WRITE ? DRBD_FAULT_DT_WR | |
1013 | : rw == READ ? DRBD_FAULT_DT_RD | |
1014 | : DRBD_FAULT_DT_RA)) | |
1015 | bio_endio(bio, -EIO); | |
1016 | else | |
1017 | generic_make_request(bio); | |
1018 | put_ldev(mdev); | |
1019 | } else | |
1020 | bio_endio(bio, -EIO); | |
1021 | } | |
1022 | ||
779b3fe4 LE |
1023 | static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req) |
1024 | { | |
1025 | spin_lock(&mdev->submit.lock); | |
1026 | list_add_tail(&req->tl_requests, &mdev->submit.writes); | |
1027 | spin_unlock(&mdev->submit.lock); | |
1028 | queue_work(mdev->submit.wq, &mdev->submit.worker); | |
1029 | } | |
1030 | ||
6d9febe2 LE |
1031 | /* returns the new drbd_request pointer, if the caller is expected to |
1032 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the | |
1033 | * request on the submitter thread. | |
1034 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. | |
1035 | */ | |
1036 | struct drbd_request * | |
1037 | drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) | |
b411b363 | 1038 | { |
6d9febe2 | 1039 | const int rw = bio_data_dir(bio); |
b411b363 | 1040 | struct drbd_request *req; |
b411b363 PR |
1041 | |
1042 | /* allocate outside of all locks; */ | |
1043 | req = drbd_req_new(mdev, bio); | |
1044 | if (!req) { | |
1045 | dec_ap_bio(mdev); | |
1046 | /* only pass the error to the upper layers. | |
1047 | * if user cannot handle io errors, that's not our business. */ | |
1048 | dev_err(DEV, "could not kmalloc() req\n"); | |
1049 | bio_endio(bio, -ENOMEM); | |
6d9febe2 | 1050 | return ERR_PTR(-ENOMEM); |
b411b363 | 1051 | } |
aeda1cd6 | 1052 | req->start_time = start_time; |
b411b363 | 1053 | |
5da9c836 LE |
1054 | if (!get_ldev(mdev)) { |
1055 | bio_put(req->private_bio); | |
b411b363 PR |
1056 | req->private_bio = NULL; |
1057 | } | |
b411b363 | 1058 | |
519b6d3e | 1059 | if (rw == WRITE && req->private_bio && req->i.size |
5da9c836 | 1060 | && !test_bit(AL_SUSPENDED, &mdev->flags)) { |
779b3fe4 LE |
1061 | if (!drbd_al_begin_io_fastpath(mdev, &req->i)) { |
1062 | drbd_queue_write(mdev, req); | |
1063 | return NULL; | |
1064 | } | |
0778286a | 1065 | req->rq_state |= RQ_IN_ACT_LOG; |
0778286a | 1066 | } |
b411b363 | 1067 | |
6d9febe2 LE |
1068 | return req; |
1069 | } | |
1070 | ||
1071 | static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req) | |
1072 | { | |
1073 | const int rw = bio_rw(req->master_bio); | |
1074 | struct bio_and_error m = { NULL, }; | |
1075 | bool no_remote = false; | |
1076 | ||
87eeee41 | 1077 | spin_lock_irq(&mdev->tconn->req_lock); |
6024fece | 1078 | if (rw == WRITE) { |
648e46b5 LE |
1079 | /* This may temporarily give up the req_lock, |
1080 | * but will re-aquire it before it returns here. | |
1081 | * Needs to be before the check on drbd_suspended() */ | |
1082 | complete_conflicting_writes(req); | |
b411b363 PR |
1083 | } |
1084 | ||
5da9c836 | 1085 | /* no more giving up req_lock from now on! */ |
9a25a04c | 1086 | |
5da9c836 LE |
1087 | if (drbd_suspended(mdev)) { |
1088 | /* push back and retry: */ | |
1089 | req->rq_state |= RQ_POSTPONED; | |
1090 | if (req->private_bio) { | |
1091 | bio_put(req->private_bio); | |
1092 | req->private_bio = NULL; | |
d7644018 | 1093 | put_ldev(mdev); |
b411b363 | 1094 | } |
5da9c836 | 1095 | goto out; |
b411b363 PR |
1096 | } |
1097 | ||
b411b363 | 1098 | /* Update disk stats */ |
6d9febe2 | 1099 | _drbd_start_io_acct(mdev, req); |
b411b363 | 1100 | |
5da9c836 LE |
1101 | /* We fail READ/READA early, if we can not serve it. |
1102 | * We must do this before req is registered on any lists. | |
a0d856df | 1103 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ |
5da9c836 LE |
1104 | if (rw != WRITE) { |
1105 | if (!do_remote_read(req) && !req->private_bio) | |
1106 | goto nodata; | |
b411b363 PR |
1107 | } |
1108 | ||
b6dd1a89 LE |
1109 | /* which transfer log epoch does this belong to? */ |
1110 | req->epoch = atomic_read(&mdev->tconn->current_tle_nr); | |
288f422e | 1111 | |
227f052f LE |
1112 | /* no point in adding empty flushes to the transfer log, |
1113 | * they are mapped to drbd barriers already. */ | |
99b4d8fe LE |
1114 | if (likely(req->i.size!=0)) { |
1115 | if (rw == WRITE) | |
1116 | mdev->tconn->current_tle_writes++; | |
288f422e | 1117 | |
519b6d3e | 1118 | list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); |
b411b363 | 1119 | } |
67531718 | 1120 | |
5da9c836 LE |
1121 | if (rw == WRITE) { |
1122 | if (!drbd_process_write_request(req)) | |
1123 | no_remote = true; | |
1124 | } else { | |
1125 | /* We either have a private_bio, or we can read from remote. | |
1126 | * Otherwise we had done the goto nodata above. */ | |
1127 | if (req->private_bio == NULL) { | |
1128 | _req_mod(req, TO_BE_SENT); | |
1129 | _req_mod(req, QUEUE_FOR_NET_READ); | |
6719fb03 | 1130 | } else |
5da9c836 | 1131 | no_remote = true; |
b411b363 PR |
1132 | } |
1133 | ||
5da9c836 LE |
1134 | if (req->private_bio) { |
1135 | /* needs to be marked within the same spinlock */ | |
1136 | _req_mod(req, TO_BE_SUBMITTED); | |
1137 | /* but we need to give up the spinlock to submit */ | |
1138 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1139 | drbd_submit_req_private_bio(req); | |
a0d856df | 1140 | spin_lock_irq(&mdev->tconn->req_lock); |
5da9c836 LE |
1141 | } else if (no_remote) { |
1142 | nodata: | |
1143 | if (__ratelimit(&drbd_ratelimit_state)) | |
42839f65 LE |
1144 | dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n", |
1145 | (unsigned long long)req->i.sector, req->i.size >> 9); | |
5da9c836 | 1146 | /* A write may have been queued for send_oos, however. |
a0d856df | 1147 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ |
b411b363 | 1148 | } |
b411b363 | 1149 | |
5da9c836 | 1150 | out: |
a0d856df LE |
1151 | if (drbd_req_put_completion_ref(req, &m, 1)) |
1152 | kref_put(&req->kref, drbd_req_destroy); | |
87eeee41 | 1153 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 | 1154 | |
5da9c836 LE |
1155 | if (m.bio) |
1156 | complete_master_bio(mdev, &m); | |
6d9febe2 LE |
1157 | } |
1158 | ||
1159 | void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) | |
1160 | { | |
1161 | struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time); | |
1162 | if (IS_ERR_OR_NULL(req)) | |
1163 | return; | |
1164 | drbd_send_and_submit(mdev, req); | |
b411b363 PR |
1165 | } |
1166 | ||
08a1ddab | 1167 | static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming) |
113fef9e | 1168 | { |
08a1ddab LE |
1169 | struct drbd_request *req, *tmp; |
1170 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { | |
1171 | const int rw = bio_data_dir(req->master_bio); | |
113fef9e | 1172 | |
08a1ddab LE |
1173 | if (rw == WRITE /* rw != WRITE should not even end up here! */ |
1174 | && req->private_bio && req->i.size | |
1175 | && !test_bit(AL_SUSPENDED, &mdev->flags)) { | |
1176 | if (!drbd_al_begin_io_fastpath(mdev, &req->i)) | |
1177 | continue; | |
1178 | ||
1179 | req->rq_state |= RQ_IN_ACT_LOG; | |
1180 | } | |
1181 | ||
1182 | list_del_init(&req->tl_requests); | |
1183 | drbd_send_and_submit(mdev, req); | |
113fef9e | 1184 | } |
113fef9e LE |
1185 | } |
1186 | ||
08a1ddab LE |
1187 | static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev, |
1188 | struct list_head *incoming, | |
1189 | struct list_head *pending) | |
1190 | { | |
1191 | struct drbd_request *req, *tmp; | |
1192 | int wake = 0; | |
1193 | int err; | |
1194 | ||
1195 | spin_lock_irq(&mdev->al_lock); | |
1196 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { | |
1197 | err = drbd_al_begin_io_nonblock(mdev, &req->i); | |
1198 | if (err == -EBUSY) | |
1199 | wake = 1; | |
1200 | if (err) | |
1201 | continue; | |
1202 | req->rq_state |= RQ_IN_ACT_LOG; | |
1203 | list_move_tail(&req->tl_requests, pending); | |
1204 | } | |
1205 | spin_unlock_irq(&mdev->al_lock); | |
1206 | if (wake) | |
1207 | wake_up(&mdev->al_wait); | |
1208 | ||
1209 | return !list_empty(pending); | |
1210 | } | |
113fef9e LE |
1211 | |
1212 | void do_submit(struct work_struct *ws) | |
1213 | { | |
1214 | struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker); | |
08a1ddab LE |
1215 | LIST_HEAD(incoming); |
1216 | LIST_HEAD(pending); | |
113fef9e LE |
1217 | struct drbd_request *req, *tmp; |
1218 | ||
08a1ddab LE |
1219 | for (;;) { |
1220 | spin_lock(&mdev->submit.lock); | |
1221 | list_splice_tail_init(&mdev->submit.writes, &incoming); | |
1222 | spin_unlock(&mdev->submit.lock); | |
113fef9e | 1223 | |
08a1ddab LE |
1224 | submit_fast_path(mdev, &incoming); |
1225 | if (list_empty(&incoming)) | |
1226 | break; | |
1227 | ||
1228 | wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending)); | |
1229 | drbd_al_begin_io_commit(mdev, false); | |
1230 | ||
1231 | list_for_each_entry_safe(req, tmp, &pending, tl_requests) { | |
1232 | list_del_init(&req->tl_requests); | |
1233 | drbd_send_and_submit(mdev, req); | |
1234 | } | |
113fef9e LE |
1235 | } |
1236 | } | |
1237 | ||
5a7bbad2 | 1238 | void drbd_make_request(struct request_queue *q, struct bio *bio) |
b411b363 | 1239 | { |
b411b363 | 1240 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
aeda1cd6 | 1241 | unsigned long start_time; |
b411b363 | 1242 | |
aeda1cd6 PR |
1243 | start_time = jiffies; |
1244 | ||
b411b363 PR |
1245 | /* |
1246 | * what we "blindly" assume: | |
1247 | */ | |
c670a398 | 1248 | D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); |
b411b363 | 1249 | |
5df69ece LE |
1250 | inc_ap_bio(mdev); |
1251 | __drbd_make_request(mdev, bio, start_time); | |
b411b363 PR |
1252 | } |
1253 | ||
23361cf3 LE |
1254 | /* This is called by bio_add_page(). |
1255 | * | |
1256 | * q->max_hw_sectors and other global limits are already enforced there. | |
b411b363 | 1257 | * |
23361cf3 LE |
1258 | * We need to call down to our lower level device, |
1259 | * in case it has special restrictions. | |
1260 | * | |
1261 | * We also may need to enforce configured max-bio-bvecs limits. | |
b411b363 PR |
1262 | * |
1263 | * As long as the BIO is empty we have to allow at least one bvec, | |
23361cf3 | 1264 | * regardless of size and offset, so no need to ask lower levels. |
b411b363 PR |
1265 | */ |
1266 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | |
1267 | { | |
1268 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | |
b411b363 | 1269 | unsigned int bio_size = bvm->bi_size; |
23361cf3 LE |
1270 | int limit = DRBD_MAX_BIO_SIZE; |
1271 | int backing_limit; | |
1272 | ||
1273 | if (bio_size && get_ldev(mdev)) { | |
b411b363 PR |
1274 | struct request_queue * const b = |
1275 | mdev->ldev->backing_bdev->bd_disk->queue; | |
a1c88d0d | 1276 | if (b->merge_bvec_fn) { |
b411b363 PR |
1277 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1278 | limit = min(limit, backing_limit); | |
1279 | } | |
1280 | put_ldev(mdev); | |
1281 | } | |
1282 | return limit; | |
1283 | } | |
7fde2be9 | 1284 | |
b6dd1a89 LE |
1285 | struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) |
1286 | { | |
1287 | /* Walk the transfer log, | |
1288 | * and find the oldest not yet completed request */ | |
1289 | struct drbd_request *r; | |
1290 | list_for_each_entry(r, &tconn->transfer_log, tl_requests) { | |
b406777e | 1291 | if (atomic_read(&r->completion_ref)) |
b6dd1a89 LE |
1292 | return r; |
1293 | } | |
1294 | return NULL; | |
1295 | } | |
1296 | ||
7fde2be9 PR |
1297 | void request_timer_fn(unsigned long data) |
1298 | { | |
1299 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
8b924f1d | 1300 | struct drbd_tconn *tconn = mdev->tconn; |
7fde2be9 | 1301 | struct drbd_request *req; /* oldest request */ |
44ed167d | 1302 | struct net_conf *nc; |
dfa8bedb | 1303 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
ba280c09 | 1304 | unsigned long now; |
7fde2be9 | 1305 | |
44ed167d PR |
1306 | rcu_read_lock(); |
1307 | nc = rcu_dereference(tconn->net_conf); | |
07be15b1 LE |
1308 | if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS) |
1309 | ent = nc->timeout * HZ/10 * nc->ko_count; | |
cdfda633 | 1310 | |
ba280c09 | 1311 | if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */ |
cdfda633 | 1312 | dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; |
dfa8bedb PR |
1313 | put_ldev(mdev); |
1314 | } | |
44ed167d | 1315 | rcu_read_unlock(); |
7fde2be9 | 1316 | |
dfa8bedb PR |
1317 | et = min_not_zero(dt, ent); |
1318 | ||
ba280c09 | 1319 | if (!et) |
7fde2be9 PR |
1320 | return; /* Recurring timer stopped */ |
1321 | ||
ba280c09 LE |
1322 | now = jiffies; |
1323 | ||
8b924f1d | 1324 | spin_lock_irq(&tconn->req_lock); |
b6dd1a89 LE |
1325 | req = find_oldest_request(tconn); |
1326 | if (!req) { | |
8b924f1d | 1327 | spin_unlock_irq(&tconn->req_lock); |
ba280c09 | 1328 | mod_timer(&mdev->request_timer, now + et); |
7fde2be9 PR |
1329 | return; |
1330 | } | |
1331 | ||
ba280c09 LE |
1332 | /* The request is considered timed out, if |
1333 | * - we have some effective timeout from the configuration, | |
1334 | * with above state restrictions applied, | |
1335 | * - the oldest request is waiting for a response from the network | |
1336 | * resp. the local disk, | |
1337 | * - the oldest request is in fact older than the effective timeout, | |
1338 | * - the connection was established (resp. disk was attached) | |
1339 | * for longer than the timeout already. | |
1340 | * Note that for 32bit jiffies and very stable connections/disks, | |
1341 | * we may have a wrap around, which is catched by | |
1342 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). | |
1343 | * | |
1344 | * Side effect: once per 32bit wrap-around interval, which means every | |
1345 | * ~198 days with 250 HZ, we have a window where the timeout would need | |
1346 | * to expire twice (worst case) to become effective. Good enough. | |
1347 | */ | |
1348 | if (ent && req->rq_state & RQ_NET_PENDING && | |
1349 | time_after(now, req->start_time + ent) && | |
07be15b1 | 1350 | !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { |
ba280c09 LE |
1351 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); |
1352 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); | |
7fde2be9 | 1353 | } |
07be15b1 | 1354 | if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && |
ba280c09 LE |
1355 | time_after(now, req->start_time + dt) && |
1356 | !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { | |
1357 | dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); | |
383606e0 | 1358 | __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); |
dfa8bedb | 1359 | } |
ba280c09 | 1360 | nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; |
8b924f1d | 1361 | spin_unlock_irq(&tconn->req_lock); |
dfa8bedb | 1362 | mod_timer(&mdev->request_timer, nt); |
7fde2be9 | 1363 | } |