]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
b411b363 PR |
31 | #include "drbd_req.h" |
32 | ||
33 | ||
b30ab791 | 34 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); |
57bcb6cf | 35 | |
b411b363 | 36 | /* Update disk stats at start of I/O request */ |
b30ab791 | 37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) |
b411b363 | 38 | { |
6d9febe2 | 39 | const int rw = bio_data_dir(req->master_bio); |
b411b363 PR |
40 | int cpu; |
41 | cpu = part_stat_lock(); | |
b30ab791 AG |
42 | part_round_stats(cpu, &device->vdisk->part0); |
43 | part_stat_inc(cpu, &device->vdisk->part0, ios[rw]); | |
44 | part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9); | |
376694a0 PR |
45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like |
46 | the compiler warning about cpu only assigned but never used... */ | |
b30ab791 | 47 | part_inc_in_flight(&device->vdisk->part0, rw); |
b411b363 | 48 | part_stat_unlock(); |
b411b363 PR |
49 | } |
50 | ||
51 | /* Update disk stats when completing request upwards */ | |
b30ab791 | 52 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) |
b411b363 PR |
53 | { |
54 | int rw = bio_data_dir(req->master_bio); | |
e5f891b2 | 55 | unsigned long duration = jiffies - req->start_jif; |
b411b363 PR |
56 | int cpu; |
57 | cpu = part_stat_lock(); | |
b30ab791 AG |
58 | part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); |
59 | part_round_stats(cpu, &device->vdisk->part0); | |
60 | part_dec_in_flight(&device->vdisk->part0, rw); | |
b411b363 | 61 | part_stat_unlock(); |
b411b363 PR |
62 | } |
63 | ||
b30ab791 | 64 | static struct drbd_request *drbd_req_new(struct drbd_device *device, |
9e204cdd AG |
65 | struct bio *bio_src) |
66 | { | |
67 | struct drbd_request *req; | |
68 | ||
e5f891b2 | 69 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); |
9e204cdd AG |
70 | if (!req) |
71 | return NULL; | |
72 | ||
73 | drbd_req_make_private_bio(req, bio_src); | |
74 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; | |
84b8c06b | 75 | req->device = device; |
9e204cdd AG |
76 | req->master_bio = bio_src; |
77 | req->epoch = 0; | |
53840641 | 78 | |
9e204cdd | 79 | drbd_clear_interval(&req->i); |
4f024f37 KO |
80 | req->i.sector = bio_src->bi_iter.bi_sector; |
81 | req->i.size = bio_src->bi_iter.bi_size; | |
5e472264 | 82 | req->i.local = true; |
53840641 AG |
83 | req->i.waiting = false; |
84 | ||
9e204cdd AG |
85 | INIT_LIST_HEAD(&req->tl_requests); |
86 | INIT_LIST_HEAD(&req->w.list); | |
844a6ae7 LE |
87 | INIT_LIST_HEAD(&req->req_pending_master_completion); |
88 | INIT_LIST_HEAD(&req->req_pending_local); | |
9e204cdd | 89 | |
a0d856df | 90 | /* one reference to be put by __drbd_make_request */ |
b406777e | 91 | atomic_set(&req->completion_ref, 1); |
a0d856df | 92 | /* one kref as long as completion_ref > 0 */ |
b406777e | 93 | kref_init(&req->kref); |
9e204cdd AG |
94 | return req; |
95 | } | |
96 | ||
08d0dabf LE |
97 | static void drbd_remove_request_interval(struct rb_root *root, |
98 | struct drbd_request *req) | |
99 | { | |
100 | struct drbd_device *device = req->device; | |
101 | struct drbd_interval *i = &req->i; | |
102 | ||
103 | drbd_remove_interval(root, i); | |
104 | ||
105 | /* Wake up any processes waiting for this request to complete. */ | |
106 | if (i->waiting) | |
107 | wake_up(&device->misc_wait); | |
108 | } | |
109 | ||
9a278a79 | 110 | void drbd_req_destroy(struct kref *kref) |
b411b363 | 111 | { |
b406777e | 112 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); |
84b8c06b | 113 | struct drbd_device *device = req->device; |
a0d856df LE |
114 | const unsigned s = req->rq_state; |
115 | ||
116 | if ((req->master_bio && !(s & RQ_POSTPONED)) || | |
117 | atomic_read(&req->completion_ref) || | |
118 | (s & RQ_LOCAL_PENDING) || | |
119 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { | |
d0180171 | 120 | drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", |
a0d856df LE |
121 | s, atomic_read(&req->completion_ref)); |
122 | return; | |
123 | } | |
288f422e | 124 | |
844a6ae7 LE |
125 | /* If called from mod_rq_state (expected normal case) or |
126 | * drbd_send_and_submit (the less likely normal path), this holds the | |
127 | * req_lock, and req->tl_requests will typicaly be on ->transfer_log, | |
128 | * though it may be still empty (never added to the transfer log). | |
129 | * | |
130 | * If called from do_retry(), we do NOT hold the req_lock, but we are | |
131 | * still allowed to unconditionally list_del(&req->tl_requests), | |
132 | * because it will be on a local on-stack list only. */ | |
2312f0b3 | 133 | list_del_init(&req->tl_requests); |
288f422e | 134 | |
08d0dabf LE |
135 | /* finally remove the request from the conflict detection |
136 | * respective block_id verification interval tree. */ | |
137 | if (!drbd_interval_empty(&req->i)) { | |
138 | struct rb_root *root; | |
139 | ||
140 | if (s & RQ_WRITE) | |
141 | root = &device->write_requests; | |
142 | else | |
143 | root = &device->read_requests; | |
144 | drbd_remove_request_interval(root, req); | |
145 | } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) | |
146 | drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", | |
147 | s, (unsigned long long)req->i.sector, req->i.size); | |
148 | ||
b411b363 PR |
149 | /* if it was a write, we may have to set the corresponding |
150 | * bit(s) out-of-sync first. If it had a local part, we need to | |
151 | * release the reference to the activity log. */ | |
b406777e | 152 | if (s & RQ_WRITE) { |
b411b363 PR |
153 | /* Set out-of-sync unless both OK flags are set |
154 | * (local only or remote failed). | |
155 | * Other places where we set out-of-sync: | |
156 | * READ with local io-error */ | |
b411b363 | 157 | |
70f17b6b LE |
158 | /* There is a special case: |
159 | * we may notice late that IO was suspended, | |
160 | * and postpone, or schedule for retry, a write, | |
161 | * before it even was submitted or sent. | |
162 | * In that case we do not want to touch the bitmap at all. | |
163 | */ | |
164 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { | |
d7644018 | 165 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
b30ab791 | 166 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
b411b363 | 167 | |
d7644018 | 168 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
b30ab791 | 169 | drbd_set_in_sync(device, req->i.sector, req->i.size); |
d7644018 | 170 | } |
b411b363 PR |
171 | |
172 | /* one might be tempted to move the drbd_al_complete_io | |
fcefa62e | 173 | * to the local io completion callback drbd_request_endio. |
b411b363 PR |
174 | * but, if this was a mirror write, we may only |
175 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
176 | * otherwise the extent could be dropped from the al | |
177 | * before it has actually been written on the peer. | |
178 | * if we crash before our peer knows about the request, | |
179 | * but after the extent has been dropped from the al, | |
180 | * we would forget to resync the corresponding extent. | |
181 | */ | |
76590cd1 | 182 | if (s & RQ_IN_ACT_LOG) { |
b30ab791 AG |
183 | if (get_ldev_if_state(device, D_FAILED)) { |
184 | drbd_al_complete_io(device, &req->i); | |
185 | put_ldev(device); | |
b411b363 | 186 | } else if (__ratelimit(&drbd_ratelimit_state)) { |
d0180171 | 187 | drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " |
181286ad LE |
188 | "but my Disk seems to have failed :(\n", |
189 | (unsigned long long) req->i.sector, req->i.size); | |
b411b363 PR |
190 | } |
191 | } | |
192 | } | |
193 | ||
9a278a79 | 194 | mempool_free(req, drbd_request_mempool); |
b411b363 PR |
195 | } |
196 | ||
bde89a9e AG |
197 | static void wake_all_senders(struct drbd_connection *connection) |
198 | { | |
199 | wake_up(&connection->sender_work.q_wait); | |
b411b363 PR |
200 | } |
201 | ||
b6dd1a89 | 202 | /* must hold resource->req_lock */ |
bde89a9e | 203 | void start_new_tl_epoch(struct drbd_connection *connection) |
b411b363 | 204 | { |
99b4d8fe | 205 | /* no point closing an epoch, if it is empty, anyways. */ |
bde89a9e | 206 | if (connection->current_tle_writes == 0) |
99b4d8fe | 207 | return; |
b411b363 | 208 | |
bde89a9e AG |
209 | connection->current_tle_writes = 0; |
210 | atomic_inc(&connection->current_tle_nr); | |
211 | wake_all_senders(connection); | |
b411b363 PR |
212 | } |
213 | ||
b30ab791 | 214 | void complete_master_bio(struct drbd_device *device, |
b411b363 PR |
215 | struct bio_and_error *m) |
216 | { | |
b411b363 | 217 | bio_endio(m->bio, m->error); |
b30ab791 | 218 | dec_ap_bio(device); |
b411b363 PR |
219 | } |
220 | ||
53840641 | 221 | |
b411b363 PR |
222 | /* Helper for __req_mod(). |
223 | * Set m->bio to the master bio, if it is fit to be completed, | |
224 | * or leave it alone (it is initialized to NULL in __req_mod), | |
225 | * if it has already been completed, or cannot be completed yet. | |
226 | * If m->bio is set, the error status to be returned is placed in m->error. | |
227 | */ | |
6870ca6d | 228 | static |
a0d856df | 229 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
b411b363 | 230 | { |
a0d856df | 231 | const unsigned s = req->rq_state; |
84b8c06b | 232 | struct drbd_device *device = req->device; |
a0d856df LE |
233 | int rw; |
234 | int error, ok; | |
b411b363 | 235 | |
b411b363 PR |
236 | /* we must not complete the master bio, while it is |
237 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
238 | * not yet acknowledged by the peer | |
239 | * not yet completed by the local io subsystem | |
240 | * these flags may get cleared in any order by | |
241 | * the worker, | |
242 | * the receiver, | |
243 | * the bio_endio completion callbacks. | |
244 | */ | |
a0d856df LE |
245 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || |
246 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || | |
247 | (s & RQ_COMPLETION_SUSP)) { | |
d0180171 | 248 | drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); |
b411b363 | 249 | return; |
a0d856df LE |
250 | } |
251 | ||
252 | if (!req->master_bio) { | |
d0180171 | 253 | drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); |
b411b363 | 254 | return; |
a0d856df | 255 | } |
b411b363 | 256 | |
a0d856df | 257 | rw = bio_rw(req->master_bio); |
b411b363 | 258 | |
a0d856df LE |
259 | /* |
260 | * figure out whether to report success or failure. | |
261 | * | |
262 | * report success when at least one of the operations succeeded. | |
263 | * or, to put the other way, | |
264 | * only report failure, when both operations failed. | |
265 | * | |
266 | * what to do about the failures is handled elsewhere. | |
267 | * what we need to do here is just: complete the master_bio. | |
268 | * | |
269 | * local completion error, if any, has been stored as ERR_PTR | |
270 | * in private_bio within drbd_request_endio. | |
271 | */ | |
272 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
273 | error = PTR_ERR(req->private_bio); | |
b411b363 | 274 | |
a0d856df LE |
275 | /* Before we can signal completion to the upper layers, |
276 | * we may need to close the current transfer log epoch. | |
277 | * We are within the request lock, so we can simply compare | |
278 | * the request epoch number with the current transfer log | |
279 | * epoch number. If they match, increase the current_tle_nr, | |
280 | * and reset the transfer log epoch write_cnt. | |
281 | */ | |
282 | if (rw == WRITE && | |
a6b32bc3 AG |
283 | req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) |
284 | start_new_tl_epoch(first_peer_device(device)->connection); | |
b411b363 | 285 | |
a0d856df | 286 | /* Update disk stats */ |
b30ab791 | 287 | _drbd_end_io_acct(device, req); |
b411b363 | 288 | |
a0d856df LE |
289 | /* If READ failed, |
290 | * have it be pushed back to the retry work queue, | |
291 | * so it will re-enter __drbd_make_request(), | |
292 | * and be re-assigned to a suitable local or remote path, | |
293 | * or failed if we do not have access to good data anymore. | |
294 | * | |
295 | * Unless it was failed early by __drbd_make_request(), | |
296 | * because no path was available, in which case | |
297 | * it was not even added to the transfer_log. | |
298 | * | |
299 | * READA may fail, and will not be retried. | |
300 | * | |
301 | * WRITE should have used all available paths already. | |
302 | */ | |
303 | if (!ok && rw == READ && !list_empty(&req->tl_requests)) | |
304 | req->rq_state |= RQ_POSTPONED; | |
b411b363 | 305 | |
a0d856df | 306 | if (!(req->rq_state & RQ_POSTPONED)) { |
b411b363 PR |
307 | m->error = ok ? 0 : (error ?: -EIO); |
308 | m->bio = req->master_bio; | |
309 | req->master_bio = NULL; | |
08d0dabf LE |
310 | /* We leave it in the tree, to be able to verify later |
311 | * write-acks in protocol != C during resync. | |
312 | * But we mark it as "complete", so it won't be counted as | |
313 | * conflict in a multi-primary setup. */ | |
314 | req->i.completed = true; | |
b411b363 | 315 | } |
08d0dabf LE |
316 | |
317 | if (req->i.waiting) | |
318 | wake_up(&device->misc_wait); | |
844a6ae7 LE |
319 | |
320 | /* Either we are about to complete to upper layers, | |
321 | * or we will restart this request. | |
322 | * In either case, the request object will be destroyed soon, | |
323 | * so better remove it from all lists. */ | |
324 | list_del_init(&req->req_pending_master_completion); | |
b411b363 | 325 | } |
b411b363 | 326 | |
844a6ae7 | 327 | /* still holds resource->req_lock */ |
a0d856df | 328 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
cfa03415 | 329 | { |
84b8c06b | 330 | struct drbd_device *device = req->device; |
0b0ba1ef | 331 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
a0d856df LE |
332 | |
333 | if (!atomic_sub_and_test(put, &req->completion_ref)) | |
334 | return 0; | |
2b4dd36f | 335 | |
a0d856df | 336 | drbd_req_complete(req, m); |
9a278a79 LE |
337 | |
338 | if (req->rq_state & RQ_POSTPONED) { | |
339 | /* don't destroy the req object just yet, | |
340 | * but queue it for retry */ | |
341 | drbd_restart_request(req); | |
342 | return 0; | |
b411b363 | 343 | } |
9a278a79 | 344 | |
a0d856df | 345 | return 1; |
b411b363 PR |
346 | } |
347 | ||
a0d856df LE |
348 | /* I'd like this to be the only place that manipulates |
349 | * req->completion_ref and req->kref. */ | |
350 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |
351 | int clear, int set) | |
cfa03415 | 352 | { |
84b8c06b | 353 | struct drbd_device *device = req->device; |
a0d856df LE |
354 | unsigned s = req->rq_state; |
355 | int c_put = 0; | |
356 | int k_put = 0; | |
cfa03415 | 357 | |
b30ab791 | 358 | if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) |
5af2e8ce | 359 | set |= RQ_COMPLETION_SUSP; |
cfa03415 | 360 | |
a0d856df | 361 | /* apply */ |
b411b363 | 362 | |
a0d856df LE |
363 | req->rq_state &= ~clear; |
364 | req->rq_state |= set; | |
b411b363 | 365 | |
a0d856df LE |
366 | /* no change? */ |
367 | if (req->rq_state == s) | |
368 | return; | |
b411b363 | 369 | |
a0d856df LE |
370 | /* intent: get references */ |
371 | ||
372 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) | |
373 | atomic_inc(&req->completion_ref); | |
374 | ||
375 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { | |
b30ab791 | 376 | inc_ap_pending(device); |
a0d856df | 377 | atomic_inc(&req->completion_ref); |
b411b363 PR |
378 | } |
379 | ||
e5f891b2 | 380 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { |
a0d856df | 381 | atomic_inc(&req->completion_ref); |
e5f891b2 | 382 | } |
a0d856df LE |
383 | |
384 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) | |
385 | kref_get(&req->kref); /* wait for the DONE */ | |
386 | ||
e5f891b2 LE |
387 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { |
388 | /* potentially already completed in the asender thread */ | |
389 | if (!(s & RQ_NET_DONE)) | |
390 | atomic_add(req->i.size >> 9, &device->ap_in_flight); | |
391 | } | |
a0d856df | 392 | |
5af2e8ce PR |
393 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) |
394 | atomic_inc(&req->completion_ref); | |
395 | ||
a0d856df LE |
396 | /* progress: put references */ |
397 | ||
398 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) | |
399 | ++c_put; | |
400 | ||
401 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { | |
0b0ba1ef | 402 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); |
a0d856df LE |
403 | /* local completion may still come in later, |
404 | * we need to keep the req object around. */ | |
405 | kref_get(&req->kref); | |
406 | ++c_put; | |
b411b363 | 407 | } |
b411b363 | 408 | |
a0d856df LE |
409 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { |
410 | if (req->rq_state & RQ_LOCAL_ABORTED) | |
411 | ++k_put; | |
412 | else | |
413 | ++c_put; | |
844a6ae7 | 414 | list_del_init(&req->req_pending_local); |
a0d856df | 415 | } |
b411b363 | 416 | |
a0d856df | 417 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { |
b30ab791 | 418 | dec_ap_pending(device); |
a0d856df | 419 | ++c_put; |
e5f891b2 | 420 | req->acked_jif = jiffies; |
a0d856df LE |
421 | } |
422 | ||
423 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) | |
424 | ++c_put; | |
425 | ||
e5f891b2 LE |
426 | if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { |
427 | if (s & RQ_NET_SENT) | |
b30ab791 | 428 | atomic_sub(req->i.size >> 9, &device->ap_in_flight); |
e5f891b2 LE |
429 | if (s & RQ_EXP_BARR_ACK) |
430 | ++k_put; | |
431 | req->net_done_jif = jiffies; | |
a0d856df LE |
432 | } |
433 | ||
434 | /* potentially complete and destroy */ | |
435 | ||
436 | if (k_put || c_put) { | |
437 | /* Completion does it's own kref_put. If we are going to | |
438 | * kref_sub below, we need req to be still around then. */ | |
439 | int at_least = k_put + !!c_put; | |
440 | int refcount = atomic_read(&req->kref.refcount); | |
441 | if (refcount < at_least) | |
d0180171 | 442 | drbd_err(device, |
a0d856df LE |
443 | "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", |
444 | s, req->rq_state, refcount, at_least); | |
445 | } | |
446 | ||
447 | /* If we made progress, retry conflicting peer requests, if any. */ | |
448 | if (req->i.waiting) | |
b30ab791 | 449 | wake_up(&device->misc_wait); |
a0d856df LE |
450 | |
451 | if (c_put) | |
452 | k_put += drbd_req_put_completion_ref(req, m, c_put); | |
453 | if (k_put) | |
454 | kref_sub(&req->kref, k_put, drbd_req_destroy); | |
b411b363 PR |
455 | } |
456 | ||
b30ab791 | 457 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
ccae7868 LE |
458 | { |
459 | char b[BDEVNAME_SIZE]; | |
460 | ||
42839f65 | 461 | if (!__ratelimit(&drbd_ratelimit_state)) |
ccae7868 LE |
462 | return; |
463 | ||
d0180171 | 464 | drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", |
ccae7868 | 465 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", |
42839f65 LE |
466 | (unsigned long long)req->i.sector, |
467 | req->i.size >> 9, | |
b30ab791 | 468 | bdevname(device->ldev->backing_bdev, b)); |
ccae7868 LE |
469 | } |
470 | ||
e5f891b2 LE |
471 | /* Helper for HANDED_OVER_TO_NETWORK. |
472 | * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? | |
473 | * Is it also still "PENDING"? | |
474 | * --> If so, clear PENDING and set NET_OK below. | |
475 | * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster | |
476 | * (and we must not set RQ_NET_OK) */ | |
477 | static inline bool is_pending_write_protocol_A(struct drbd_request *req) | |
478 | { | |
479 | return (req->rq_state & | |
480 | (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) | |
481 | == (RQ_WRITE|RQ_NET_PENDING); | |
482 | } | |
483 | ||
b411b363 PR |
484 | /* obviously this could be coded as many single functions |
485 | * instead of one huge switch, | |
486 | * or by putting the code directly in the respective locations | |
487 | * (as it has been before). | |
488 | * | |
489 | * but having it this way | |
490 | * enforces that it is all in this one place, where it is easier to audit, | |
491 | * it makes it obvious that whatever "event" "happens" to a request should | |
492 | * happen "atomically" within the req_lock, | |
493 | * and it enforces that we have to think in a very structured manner | |
494 | * about the "events" that may happen to a request during its life time ... | |
495 | */ | |
2a80699f | 496 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
497 | struct bio_and_error *m) |
498 | { | |
44a4d551 LE |
499 | struct drbd_device *const device = req->device; |
500 | struct drbd_peer_device *const peer_device = first_peer_device(device); | |
501 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | |
44ed167d | 502 | struct net_conf *nc; |
303d1448 | 503 | int p, rv = 0; |
7be8da07 AG |
504 | |
505 | if (m) | |
506 | m->bio = NULL; | |
b411b363 | 507 | |
b411b363 PR |
508 | switch (what) { |
509 | default: | |
d0180171 | 510 | drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); |
b411b363 PR |
511 | break; |
512 | ||
513 | /* does not happen... | |
514 | * initialization done in drbd_req_new | |
8554df1c | 515 | case CREATED: |
b411b363 PR |
516 | break; |
517 | */ | |
518 | ||
8554df1c | 519 | case TO_BE_SENT: /* via network */ |
7be8da07 | 520 | /* reached via __drbd_make_request |
b411b363 | 521 | * and from w_read_retry_remote */ |
0b0ba1ef | 522 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
44ed167d | 523 | rcu_read_lock(); |
44a4d551 | 524 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
525 | p = nc->wire_protocol; |
526 | rcu_read_unlock(); | |
303d1448 PR |
527 | req->rq_state |= |
528 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
529 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
a0d856df | 530 | mod_rq_state(req, m, 0, RQ_NET_PENDING); |
b411b363 PR |
531 | break; |
532 | ||
8554df1c | 533 | case TO_BE_SUBMITTED: /* locally */ |
7be8da07 | 534 | /* reached via __drbd_make_request */ |
0b0ba1ef | 535 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); |
a0d856df | 536 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
b411b363 PR |
537 | break; |
538 | ||
8554df1c | 539 | case COMPLETED_OK: |
2b4dd36f | 540 | if (req->rq_state & RQ_WRITE) |
b30ab791 | 541 | device->writ_cnt += req->i.size >> 9; |
b411b363 | 542 | else |
b30ab791 | 543 | device->read_cnt += req->i.size >> 9; |
b411b363 | 544 | |
a0d856df LE |
545 | mod_rq_state(req, m, RQ_LOCAL_PENDING, |
546 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
b411b363 PR |
547 | break; |
548 | ||
cdfda633 | 549 | case ABORT_DISK_IO: |
a0d856df | 550 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); |
2b4dd36f PR |
551 | break; |
552 | ||
edc9f5eb | 553 | case WRITE_COMPLETED_WITH_ERROR: |
b30ab791 AG |
554 | drbd_report_io_error(device, req); |
555 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); | |
edc9f5eb | 556 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
b411b363 PR |
557 | break; |
558 | ||
8554df1c | 559 | case READ_COMPLETED_WITH_ERROR: |
b30ab791 AG |
560 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
561 | drbd_report_io_error(device, req); | |
562 | __drbd_chk_io_error(device, DRBD_READ_ERROR); | |
a0d856df LE |
563 | /* fall through. */ |
564 | case READ_AHEAD_COMPLETED_WITH_ERROR: | |
565 | /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ | |
566 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
2f632aeb LE |
567 | break; |
568 | ||
569 | case DISCARD_COMPLETED_NOTSUPP: | |
570 | case DISCARD_COMPLETED_WITH_ERROR: | |
571 | /* I'd rather not detach from local disk just because it | |
572 | * failed a REQ_DISCARD. */ | |
573 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); | |
4439c400 | 574 | break; |
b411b363 | 575 | |
8554df1c | 576 | case QUEUE_FOR_NET_READ: |
b411b363 PR |
577 | /* READ or READA, and |
578 | * no local disk, | |
579 | * or target area marked as invalid, | |
580 | * or just got an io-error. */ | |
7be8da07 | 581 | /* from __drbd_make_request |
b411b363 PR |
582 | * or from bio_endio during read io-error recovery */ |
583 | ||
6870ca6d LE |
584 | /* So we can verify the handle in the answer packet. |
585 | * Corresponding drbd_remove_request_interval is in | |
a0d856df | 586 | * drbd_req_complete() */ |
0b0ba1ef | 587 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
b30ab791 | 588 | drbd_insert_interval(&device->read_requests, &req->i); |
b411b363 | 589 | |
b30ab791 | 590 | set_bit(UNPLUG_REMOTE, &device->flags); |
b411b363 | 591 | |
0b0ba1ef AG |
592 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
593 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); | |
a0d856df | 594 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
4439c400 | 595 | req->w.cb = w_send_read_req; |
44a4d551 | 596 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 597 | &req->w); |
b411b363 PR |
598 | break; |
599 | ||
8554df1c | 600 | case QUEUE_FOR_NET_WRITE: |
b411b363 | 601 | /* assert something? */ |
7be8da07 | 602 | /* from __drbd_make_request only */ |
b411b363 | 603 | |
6870ca6d | 604 | /* Corresponding drbd_remove_request_interval is in |
a0d856df | 605 | * drbd_req_complete() */ |
0b0ba1ef | 606 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
b30ab791 | 607 | drbd_insert_interval(&device->write_requests, &req->i); |
b411b363 PR |
608 | |
609 | /* NOTE | |
610 | * In case the req ended up on the transfer log before being | |
611 | * queued on the worker, it could lead to this request being | |
612 | * missed during cleanup after connection loss. | |
613 | * So we have to do both operations here, | |
614 | * within the same lock that protects the transfer log. | |
615 | * | |
616 | * _req_add_to_epoch(req); this has to be after the | |
617 | * _maybe_start_new_epoch(req); which happened in | |
7be8da07 | 618 | * __drbd_make_request, because we now may set the bit |
b411b363 PR |
619 | * again ourselves to close the current epoch. |
620 | * | |
621 | * Add req to the (now) current epoch (barrier). */ | |
622 | ||
83c38830 LE |
623 | /* otherwise we may lose an unplug, which may cause some remote |
624 | * io-scheduler timeout to expire, increasing maximum latency, | |
625 | * hurting performance. */ | |
b30ab791 | 626 | set_bit(UNPLUG_REMOTE, &device->flags); |
b411b363 PR |
627 | |
628 | /* queue work item to send data */ | |
0b0ba1ef | 629 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
a0d856df | 630 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
b411b363 | 631 | req->w.cb = w_send_dblock; |
44a4d551 | 632 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 633 | &req->w); |
b411b363 PR |
634 | |
635 | /* close the epoch, in case it outgrew the limit */ | |
44ed167d | 636 | rcu_read_lock(); |
44a4d551 | 637 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
638 | p = nc->max_epoch_size; |
639 | rcu_read_unlock(); | |
44a4d551 LE |
640 | if (connection->current_tle_writes >= p) |
641 | start_new_tl_epoch(connection); | |
b411b363 PR |
642 | |
643 | break; | |
644 | ||
8554df1c | 645 | case QUEUE_FOR_SEND_OOS: |
a0d856df | 646 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
8f7bed77 | 647 | req->w.cb = w_send_out_of_sync; |
44a4d551 | 648 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 649 | &req->w); |
73a01a18 PR |
650 | break; |
651 | ||
ea9d6729 | 652 | case READ_RETRY_REMOTE_CANCELED: |
8554df1c | 653 | case SEND_CANCELED: |
8554df1c | 654 | case SEND_FAILED: |
b411b363 PR |
655 | /* real cleanup will be done from tl_clear. just update flags |
656 | * so it is no longer marked as on the worker queue */ | |
a0d856df | 657 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); |
b411b363 PR |
658 | break; |
659 | ||
8554df1c | 660 | case HANDED_OVER_TO_NETWORK: |
b411b363 | 661 | /* assert something? */ |
e5f891b2 | 662 | if (is_pending_write_protocol_A(req)) |
b411b363 PR |
663 | /* this is what is dangerous about protocol A: |
664 | * pretend it was successfully written on the peer. */ | |
e5f891b2 LE |
665 | mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, |
666 | RQ_NET_SENT|RQ_NET_OK); | |
667 | else | |
668 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); | |
669 | /* It is still not yet RQ_NET_DONE until the | |
670 | * corresponding epoch barrier got acked as well, | |
671 | * so we know what to dirty on connection loss. */ | |
6d49e101 LE |
672 | break; |
673 | ||
27a434fe | 674 | case OOS_HANDED_TO_NETWORK: |
6d49e101 LE |
675 | /* Was not set PENDING, no longer QUEUED, so is now DONE |
676 | * as far as this connection is concerned. */ | |
a0d856df | 677 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); |
b411b363 PR |
678 | break; |
679 | ||
8554df1c | 680 | case CONNECTION_LOST_WHILE_PENDING: |
b411b363 | 681 | /* transfer log cleanup after connection loss */ |
a0d856df LE |
682 | mod_rq_state(req, m, |
683 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, | |
684 | RQ_NET_DONE); | |
b411b363 PR |
685 | break; |
686 | ||
d4dabbe2 LE |
687 | case CONFLICT_RESOLVED: |
688 | /* for superseded conflicting writes of multiple primaries, | |
b411b363 | 689 | * there is no need to keep anything in the tl, potential |
934722a2 LE |
690 | * node crashes are covered by the activity log. |
691 | * | |
692 | * If this request had been marked as RQ_POSTPONED before, | |
d4dabbe2 | 693 | * it will actually not be completed, but "restarted", |
934722a2 | 694 | * resubmitted from the retry worker context. */ |
0b0ba1ef AG |
695 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
696 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); | |
934722a2 LE |
697 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); |
698 | break; | |
699 | ||
0afd569a | 700 | case WRITE_ACKED_BY_PEER_AND_SIS: |
934722a2 | 701 | req->rq_state |= RQ_NET_SIS; |
8554df1c | 702 | case WRITE_ACKED_BY_PEER: |
08d0dabf LE |
703 | /* Normal operation protocol C: successfully written on peer. |
704 | * During resync, even in protocol != C, | |
705 | * we requested an explicit write ack anyways. | |
706 | * Which means we cannot even assert anything here. | |
d64957c9 | 707 | * Nothing more to do here. |
b411b363 | 708 | * We want to keep the tl in place for all protocols, to cater |
d64957c9 | 709 | * for volatile write-back caches on lower level devices. */ |
303d1448 | 710 | goto ack_common; |
8554df1c | 711 | case RECV_ACKED_BY_PEER: |
0b0ba1ef | 712 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); |
b411b363 | 713 | /* protocol B; pretends to be successfully written on peer. |
8554df1c | 714 | * see also notes above in HANDED_OVER_TO_NETWORK about |
b411b363 | 715 | * protocol != C */ |
303d1448 | 716 | ack_common: |
a0d856df | 717 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
b411b363 PR |
718 | break; |
719 | ||
7be8da07 | 720 | case POSTPONE_WRITE: |
0b0ba1ef | 721 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
303d1448 | 722 | /* If this node has already detected the write conflict, the |
7be8da07 AG |
723 | * worker will be waiting on misc_wait. Wake it up once this |
724 | * request has completed locally. | |
725 | */ | |
0b0ba1ef | 726 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
7be8da07 | 727 | req->rq_state |= RQ_POSTPONED; |
a0d856df | 728 | if (req->i.waiting) |
b30ab791 | 729 | wake_up(&device->misc_wait); |
a0d856df LE |
730 | /* Do not clear RQ_NET_PENDING. This request will make further |
731 | * progress via restart_conflicting_writes() or | |
732 | * fail_postponed_requests(). Hopefully. */ | |
7be8da07 | 733 | break; |
b411b363 | 734 | |
8554df1c | 735 | case NEG_ACKED: |
46e21bba | 736 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); |
b411b363 PR |
737 | break; |
738 | ||
8554df1c | 739 | case FAIL_FROZEN_DISK_IO: |
265be2d0 PR |
740 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
741 | break; | |
a0d856df | 742 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
265be2d0 PR |
743 | break; |
744 | ||
8554df1c | 745 | case RESTART_FROZEN_DISK_IO: |
265be2d0 PR |
746 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
747 | break; | |
748 | ||
a0d856df LE |
749 | mod_rq_state(req, m, |
750 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, | |
751 | RQ_LOCAL_PENDING); | |
265be2d0 PR |
752 | |
753 | rv = MR_READ; | |
754 | if (bio_data_dir(req->master_bio) == WRITE) | |
755 | rv = MR_WRITE; | |
756 | ||
b30ab791 | 757 | get_ldev(device); /* always succeeds in this call path */ |
265be2d0 | 758 | req->w.cb = w_restart_disk_io; |
44a4d551 | 759 | drbd_queue_work(&connection->sender_work, |
84b8c06b | 760 | &req->w); |
265be2d0 PR |
761 | break; |
762 | ||
8554df1c | 763 | case RESEND: |
509fc019 PR |
764 | /* Simply complete (local only) READs. */ |
765 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { | |
8a0bab2a | 766 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
509fc019 PR |
767 | break; |
768 | } | |
769 | ||
11b58e73 | 770 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
a0d856df LE |
771 | before the connection loss (B&C only); only P_BARRIER_ACK |
772 | (or the local completion?) was missing when we suspended. | |
6870ca6d LE |
773 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
774 | During connection handshake, we ensure that the peer was not rebooted. */ | |
11b58e73 | 775 | if (!(req->rq_state & RQ_NET_OK)) { |
84b8c06b | 776 | /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? |
a0d856df LE |
777 | * in that case we must not set RQ_NET_PENDING. */ |
778 | ||
779 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); | |
11b58e73 | 780 | if (req->w.cb) { |
44a4d551 LE |
781 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
782 | drbd_queue_work(&connection->sender_work, | |
84b8c06b | 783 | &req->w); |
11b58e73 | 784 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
a0d856df | 785 | } /* else: FIXME can this happen? */ |
11b58e73 PR |
786 | break; |
787 | } | |
8554df1c | 788 | /* else, fall through to BARRIER_ACKED */ |
11b58e73 | 789 | |
8554df1c | 790 | case BARRIER_ACKED: |
a0d856df | 791 | /* barrier ack for READ requests does not make sense */ |
288f422e PR |
792 | if (!(req->rq_state & RQ_WRITE)) |
793 | break; | |
794 | ||
b411b363 | 795 | if (req->rq_state & RQ_NET_PENDING) { |
a209b4ae | 796 | /* barrier came in before all requests were acked. |
b411b363 PR |
797 | * this is bad, because if the connection is lost now, |
798 | * we won't be able to clean them up... */ | |
d0180171 | 799 | drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); |
b411b363 | 800 | } |
a0d856df LE |
801 | /* Allowed to complete requests, even while suspended. |
802 | * As this is called for all requests within a matching epoch, | |
803 | * we need to filter, and only set RQ_NET_DONE for those that | |
804 | * have actually been on the wire. */ | |
805 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, | |
806 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); | |
b411b363 PR |
807 | break; |
808 | ||
8554df1c | 809 | case DATA_RECEIVED: |
0b0ba1ef | 810 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
a0d856df | 811 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
b411b363 | 812 | break; |
7074e4a7 LE |
813 | |
814 | case QUEUE_AS_DRBD_BARRIER: | |
44a4d551 | 815 | start_new_tl_epoch(connection); |
7074e4a7 LE |
816 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); |
817 | break; | |
b411b363 | 818 | }; |
2a80699f PR |
819 | |
820 | return rv; | |
b411b363 PR |
821 | } |
822 | ||
823 | /* we may do a local read if: | |
824 | * - we are consistent (of course), | |
825 | * - or we are generally inconsistent, | |
826 | * BUT we are still/already IN SYNC for this area. | |
827 | * since size may be bigger than BM_BLOCK_SIZE, | |
828 | * we may need to check several bits. | |
829 | */ | |
b30ab791 | 830 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) |
b411b363 PR |
831 | { |
832 | unsigned long sbnr, ebnr; | |
833 | sector_t esector, nr_sectors; | |
834 | ||
b30ab791 | 835 | if (device->state.disk == D_UP_TO_DATE) |
0da34df0 | 836 | return true; |
b30ab791 | 837 | if (device->state.disk != D_INCONSISTENT) |
0da34df0 | 838 | return false; |
b411b363 | 839 | esector = sector + (size >> 9) - 1; |
b30ab791 | 840 | nr_sectors = drbd_get_capacity(device->this_bdev); |
0b0ba1ef AG |
841 | D_ASSERT(device, sector < nr_sectors); |
842 | D_ASSERT(device, esector < nr_sectors); | |
b411b363 PR |
843 | |
844 | sbnr = BM_SECT_TO_BIT(sector); | |
845 | ebnr = BM_SECT_TO_BIT(esector); | |
846 | ||
b30ab791 | 847 | return drbd_bm_count_bits(device, sbnr, ebnr) == 0; |
b411b363 PR |
848 | } |
849 | ||
b30ab791 | 850 | static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, |
5da9c836 | 851 | enum drbd_read_balancing rbm) |
380207d0 | 852 | { |
380207d0 | 853 | struct backing_dev_info *bdi; |
d60de03a | 854 | int stripe_shift; |
380207d0 | 855 | |
380207d0 PR |
856 | switch (rbm) { |
857 | case RB_CONGESTED_REMOTE: | |
b30ab791 | 858 | bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; |
380207d0 PR |
859 | return bdi_read_congested(bdi); |
860 | case RB_LEAST_PENDING: | |
b30ab791 AG |
861 | return atomic_read(&device->local_cnt) > |
862 | atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); | |
d60de03a PR |
863 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
864 | case RB_64K_STRIPING: | |
865 | case RB_128K_STRIPING: | |
866 | case RB_256K_STRIPING: | |
867 | case RB_512K_STRIPING: | |
868 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
869 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
870 | return (sector >> (stripe_shift - 9)) & 1; | |
380207d0 | 871 | case RB_ROUND_ROBIN: |
b30ab791 | 872 | return test_and_change_bit(READ_BALANCE_RR, &device->flags); |
380207d0 PR |
873 | case RB_PREFER_REMOTE: |
874 | return true; | |
875 | case RB_PREFER_LOCAL: | |
876 | default: | |
877 | return false; | |
878 | } | |
879 | } | |
880 | ||
6024fece AG |
881 | /* |
882 | * complete_conflicting_writes - wait for any conflicting write requests | |
883 | * | |
884 | * The write_requests tree contains all active write requests which we | |
885 | * currently know about. Wait for any requests to complete which conflict with | |
886 | * the new one. | |
648e46b5 LE |
887 | * |
888 | * Only way out: remove the conflicting intervals from the tree. | |
6024fece | 889 | */ |
648e46b5 | 890 | static void complete_conflicting_writes(struct drbd_request *req) |
6024fece | 891 | { |
648e46b5 | 892 | DEFINE_WAIT(wait); |
84b8c06b | 893 | struct drbd_device *device = req->device; |
648e46b5 LE |
894 | struct drbd_interval *i; |
895 | sector_t sector = req->i.sector; | |
896 | int size = req->i.size; | |
897 | ||
b30ab791 | 898 | i = drbd_find_overlap(&device->write_requests, sector, size); |
648e46b5 LE |
899 | if (!i) |
900 | return; | |
6024fece | 901 | |
648e46b5 | 902 | for (;;) { |
b30ab791 AG |
903 | prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); |
904 | i = drbd_find_overlap(&device->write_requests, sector, size); | |
6024fece | 905 | if (!i) |
648e46b5 LE |
906 | break; |
907 | /* Indicate to wake up device->misc_wait on progress. */ | |
908 | i->waiting = true; | |
0500813f | 909 | spin_unlock_irq(&device->resource->req_lock); |
648e46b5 | 910 | schedule(); |
0500813f | 911 | spin_lock_irq(&device->resource->req_lock); |
6024fece | 912 | } |
b30ab791 | 913 | finish_wait(&device->misc_wait, &wait); |
b411b363 PR |
914 | } |
915 | ||
5da9c836 | 916 | /* called within req_lock and rcu_read_lock() */ |
b30ab791 | 917 | static void maybe_pull_ahead(struct drbd_device *device) |
0d5934e3 | 918 | { |
a6b32bc3 | 919 | struct drbd_connection *connection = first_peer_device(device)->connection; |
5da9c836 LE |
920 | struct net_conf *nc; |
921 | bool congested = false; | |
922 | enum drbd_on_congestion on_congestion; | |
923 | ||
607f25e5 | 924 | rcu_read_lock(); |
bde89a9e | 925 | nc = rcu_dereference(connection->net_conf); |
5da9c836 | 926 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; |
607f25e5 | 927 | rcu_read_unlock(); |
5da9c836 | 928 | if (on_congestion == OC_BLOCK || |
bde89a9e | 929 | connection->agreed_pro_version < 96) |
3b9ef85e | 930 | return; |
0d5934e3 | 931 | |
0c066bc3 LE |
932 | if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) |
933 | return; /* nothing to do ... */ | |
934 | ||
0d5934e3 LE |
935 | /* If I don't even have good local storage, we can not reasonably try |
936 | * to pull ahead of the peer. We also need the local reference to make | |
b30ab791 | 937 | * sure device->act_log is there. |
0d5934e3 | 938 | */ |
b30ab791 | 939 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) |
0d5934e3 LE |
940 | return; |
941 | ||
5da9c836 | 942 | if (nc->cong_fill && |
b30ab791 | 943 | atomic_read(&device->ap_in_flight) >= nc->cong_fill) { |
d0180171 | 944 | drbd_info(device, "Congestion-fill threshold reached\n"); |
5da9c836 | 945 | congested = true; |
0d5934e3 LE |
946 | } |
947 | ||
b30ab791 | 948 | if (device->act_log->used >= nc->cong_extents) { |
d0180171 | 949 | drbd_info(device, "Congestion-extents threshold reached\n"); |
5da9c836 | 950 | congested = true; |
0d5934e3 LE |
951 | } |
952 | ||
953 | if (congested) { | |
99b4d8fe | 954 | /* start a new epoch for non-mirrored writes */ |
a6b32bc3 | 955 | start_new_tl_epoch(first_peer_device(device)->connection); |
0d5934e3 | 956 | |
5da9c836 | 957 | if (on_congestion == OC_PULL_AHEAD) |
b30ab791 | 958 | _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); |
5da9c836 | 959 | else /*nc->on_congestion == OC_DISCONNECT */ |
b30ab791 | 960 | _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); |
0d5934e3 | 961 | } |
b30ab791 | 962 | put_ldev(device); |
0d5934e3 LE |
963 | } |
964 | ||
5da9c836 LE |
965 | /* If this returns false, and req->private_bio is still set, |
966 | * this should be submitted locally. | |
967 | * | |
968 | * If it returns false, but req->private_bio is not set, | |
969 | * we do not have access to good data :( | |
970 | * | |
971 | * Otherwise, this destroys req->private_bio, if any, | |
972 | * and returns true. | |
973 | */ | |
974 | static bool do_remote_read(struct drbd_request *req) | |
975 | { | |
84b8c06b | 976 | struct drbd_device *device = req->device; |
5da9c836 LE |
977 | enum drbd_read_balancing rbm; |
978 | ||
979 | if (req->private_bio) { | |
b30ab791 | 980 | if (!drbd_may_do_local_read(device, |
5da9c836 LE |
981 | req->i.sector, req->i.size)) { |
982 | bio_put(req->private_bio); | |
983 | req->private_bio = NULL; | |
b30ab791 | 984 | put_ldev(device); |
5da9c836 LE |
985 | } |
986 | } | |
987 | ||
b30ab791 | 988 | if (device->state.pdsk != D_UP_TO_DATE) |
5da9c836 LE |
989 | return false; |
990 | ||
a0d856df LE |
991 | if (req->private_bio == NULL) |
992 | return true; | |
993 | ||
5da9c836 LE |
994 | /* TODO: improve read balancing decisions, take into account drbd |
995 | * protocol, pending requests etc. */ | |
996 | ||
997 | rcu_read_lock(); | |
b30ab791 | 998 | rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; |
5da9c836 LE |
999 | rcu_read_unlock(); |
1000 | ||
1001 | if (rbm == RB_PREFER_LOCAL && req->private_bio) | |
1002 | return false; /* submit locally */ | |
1003 | ||
b30ab791 | 1004 | if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { |
5da9c836 LE |
1005 | if (req->private_bio) { |
1006 | bio_put(req->private_bio); | |
1007 | req->private_bio = NULL; | |
b30ab791 | 1008 | put_ldev(device); |
5da9c836 LE |
1009 | } |
1010 | return true; | |
1011 | } | |
1012 | ||
1013 | return false; | |
1014 | } | |
1015 | ||
1016 | /* returns number of connections (== 1, for drbd 8.4) | |
1017 | * expected to actually write this data, | |
1018 | * which does NOT include those that we are L_AHEAD for. */ | |
1019 | static int drbd_process_write_request(struct drbd_request *req) | |
1020 | { | |
84b8c06b | 1021 | struct drbd_device *device = req->device; |
5da9c836 LE |
1022 | int remote, send_oos; |
1023 | ||
b30ab791 AG |
1024 | remote = drbd_should_do_remote(device->state); |
1025 | send_oos = drbd_should_send_out_of_sync(device->state); | |
5da9c836 | 1026 | |
519b6d3e LE |
1027 | /* Need to replicate writes. Unless it is an empty flush, |
1028 | * which is better mapped to a DRBD P_BARRIER packet, | |
1029 | * also for drbd wire protocol compatibility reasons. | |
1030 | * If this was a flush, just start a new epoch. | |
1031 | * Unless the current epoch was empty anyways, or we are not currently | |
1032 | * replicating, in which case there is no point. */ | |
1033 | if (unlikely(req->i.size == 0)) { | |
1034 | /* The only size==0 bios we expect are empty flushes. */ | |
0b0ba1ef | 1035 | D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); |
99b4d8fe | 1036 | if (remote) |
7074e4a7 LE |
1037 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
1038 | return remote; | |
519b6d3e LE |
1039 | } |
1040 | ||
5da9c836 LE |
1041 | if (!remote && !send_oos) |
1042 | return 0; | |
1043 | ||
0b0ba1ef | 1044 | D_ASSERT(device, !(remote && send_oos)); |
5da9c836 LE |
1045 | |
1046 | if (remote) { | |
1047 | _req_mod(req, TO_BE_SENT); | |
1048 | _req_mod(req, QUEUE_FOR_NET_WRITE); | |
b30ab791 | 1049 | } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) |
5da9c836 LE |
1050 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
1051 | ||
1052 | return remote; | |
1053 | } | |
1054 | ||
1055 | static void | |
1056 | drbd_submit_req_private_bio(struct drbd_request *req) | |
1057 | { | |
84b8c06b | 1058 | struct drbd_device *device = req->device; |
5da9c836 LE |
1059 | struct bio *bio = req->private_bio; |
1060 | const int rw = bio_rw(bio); | |
1061 | ||
b30ab791 | 1062 | bio->bi_bdev = device->ldev->backing_bdev; |
5da9c836 LE |
1063 | |
1064 | /* State may have changed since we grabbed our reference on the | |
1065 | * ->ldev member. Double check, and short-circuit to endio. | |
1066 | * In case the last activity log transaction failed to get on | |
1067 | * stable storage, and this is a WRITE, we may not even submit | |
1068 | * this bio. */ | |
b30ab791 | 1069 | if (get_ldev(device)) { |
e5f891b2 | 1070 | req->pre_submit_jif = jiffies; |
b30ab791 | 1071 | if (drbd_insert_fault(device, |
5da9c836 LE |
1072 | rw == WRITE ? DRBD_FAULT_DT_WR |
1073 | : rw == READ ? DRBD_FAULT_DT_RD | |
1074 | : DRBD_FAULT_DT_RA)) | |
1075 | bio_endio(bio, -EIO); | |
1076 | else | |
1077 | generic_make_request(bio); | |
b30ab791 | 1078 | put_ldev(device); |
5da9c836 LE |
1079 | } else |
1080 | bio_endio(bio, -EIO); | |
1081 | } | |
1082 | ||
b30ab791 | 1083 | static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) |
779b3fe4 | 1084 | { |
844a6ae7 | 1085 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 1086 | list_add_tail(&req->tl_requests, &device->submit.writes); |
844a6ae7 LE |
1087 | list_add_tail(&req->req_pending_master_completion, |
1088 | &device->pending_master_completion[1 /* WRITE */]); | |
1089 | spin_unlock_irq(&device->resource->req_lock); | |
b30ab791 | 1090 | queue_work(device->submit.wq, &device->submit.worker); |
779b3fe4 LE |
1091 | } |
1092 | ||
6d9febe2 LE |
1093 | /* returns the new drbd_request pointer, if the caller is expected to |
1094 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the | |
1095 | * request on the submitter thread. | |
1096 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. | |
1097 | */ | |
01cd2636 | 1098 | static struct drbd_request * |
e5f891b2 | 1099 | drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
b411b363 | 1100 | { |
6d9febe2 | 1101 | const int rw = bio_data_dir(bio); |
b411b363 | 1102 | struct drbd_request *req; |
b411b363 PR |
1103 | |
1104 | /* allocate outside of all locks; */ | |
b30ab791 | 1105 | req = drbd_req_new(device, bio); |
b411b363 | 1106 | if (!req) { |
b30ab791 | 1107 | dec_ap_bio(device); |
b411b363 PR |
1108 | /* only pass the error to the upper layers. |
1109 | * if user cannot handle io errors, that's not our business. */ | |
d0180171 | 1110 | drbd_err(device, "could not kmalloc() req\n"); |
b411b363 | 1111 | bio_endio(bio, -ENOMEM); |
6d9febe2 | 1112 | return ERR_PTR(-ENOMEM); |
b411b363 | 1113 | } |
e5f891b2 | 1114 | req->start_jif = start_jif; |
b411b363 | 1115 | |
b30ab791 | 1116 | if (!get_ldev(device)) { |
5da9c836 | 1117 | bio_put(req->private_bio); |
b411b363 PR |
1118 | req->private_bio = NULL; |
1119 | } | |
b411b363 | 1120 | |
7e8c288f | 1121 | /* Update disk stats */ |
b30ab791 | 1122 | _drbd_start_io_acct(device, req); |
7e8c288f | 1123 | |
519b6d3e | 1124 | if (rw == WRITE && req->private_bio && req->i.size |
b30ab791 AG |
1125 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
1126 | if (!drbd_al_begin_io_fastpath(device, &req->i)) { | |
1127 | drbd_queue_write(device, req); | |
779b3fe4 LE |
1128 | return NULL; |
1129 | } | |
0778286a | 1130 | req->rq_state |= RQ_IN_ACT_LOG; |
e5f891b2 | 1131 | req->in_actlog_jif = jiffies; |
0778286a | 1132 | } |
b411b363 | 1133 | |
6d9febe2 LE |
1134 | return req; |
1135 | } | |
1136 | ||
b30ab791 | 1137 | static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) |
6d9febe2 | 1138 | { |
35b5ed5b | 1139 | struct drbd_resource *resource = device->resource; |
6d9febe2 LE |
1140 | const int rw = bio_rw(req->master_bio); |
1141 | struct bio_and_error m = { NULL, }; | |
1142 | bool no_remote = false; | |
35b5ed5b | 1143 | bool submit_private_bio = false; |
6d9febe2 | 1144 | |
35b5ed5b | 1145 | spin_lock_irq(&resource->req_lock); |
6024fece | 1146 | if (rw == WRITE) { |
648e46b5 LE |
1147 | /* This may temporarily give up the req_lock, |
1148 | * but will re-aquire it before it returns here. | |
1149 | * Needs to be before the check on drbd_suspended() */ | |
1150 | complete_conflicting_writes(req); | |
607f25e5 LE |
1151 | /* no more giving up req_lock from now on! */ |
1152 | ||
1153 | /* check for congestion, and potentially stop sending | |
1154 | * full data updates, but start sending "dirty bits" only. */ | |
b30ab791 | 1155 | maybe_pull_ahead(device); |
b411b363 PR |
1156 | } |
1157 | ||
9a25a04c | 1158 | |
b30ab791 | 1159 | if (drbd_suspended(device)) { |
5da9c836 LE |
1160 | /* push back and retry: */ |
1161 | req->rq_state |= RQ_POSTPONED; | |
1162 | if (req->private_bio) { | |
1163 | bio_put(req->private_bio); | |
1164 | req->private_bio = NULL; | |
b30ab791 | 1165 | put_ldev(device); |
b411b363 | 1166 | } |
5da9c836 | 1167 | goto out; |
b411b363 PR |
1168 | } |
1169 | ||
5da9c836 LE |
1170 | /* We fail READ/READA early, if we can not serve it. |
1171 | * We must do this before req is registered on any lists. | |
a0d856df | 1172 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ |
5da9c836 LE |
1173 | if (rw != WRITE) { |
1174 | if (!do_remote_read(req) && !req->private_bio) | |
1175 | goto nodata; | |
b411b363 PR |
1176 | } |
1177 | ||
b6dd1a89 | 1178 | /* which transfer log epoch does this belong to? */ |
a6b32bc3 | 1179 | req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); |
288f422e | 1180 | |
227f052f LE |
1181 | /* no point in adding empty flushes to the transfer log, |
1182 | * they are mapped to drbd barriers already. */ | |
99b4d8fe LE |
1183 | if (likely(req->i.size!=0)) { |
1184 | if (rw == WRITE) | |
a6b32bc3 | 1185 | first_peer_device(device)->connection->current_tle_writes++; |
288f422e | 1186 | |
a6b32bc3 | 1187 | list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); |
b411b363 | 1188 | } |
67531718 | 1189 | |
5da9c836 LE |
1190 | if (rw == WRITE) { |
1191 | if (!drbd_process_write_request(req)) | |
1192 | no_remote = true; | |
1193 | } else { | |
1194 | /* We either have a private_bio, or we can read from remote. | |
1195 | * Otherwise we had done the goto nodata above. */ | |
1196 | if (req->private_bio == NULL) { | |
1197 | _req_mod(req, TO_BE_SENT); | |
1198 | _req_mod(req, QUEUE_FOR_NET_READ); | |
6719fb03 | 1199 | } else |
5da9c836 | 1200 | no_remote = true; |
b411b363 PR |
1201 | } |
1202 | ||
844a6ae7 LE |
1203 | /* If it took the fast path in drbd_request_prepare, add it here. |
1204 | * The slow path has added it already. */ | |
1205 | if (list_empty(&req->req_pending_master_completion)) | |
1206 | list_add_tail(&req->req_pending_master_completion, | |
1207 | &device->pending_master_completion[rw == WRITE]); | |
5da9c836 LE |
1208 | if (req->private_bio) { |
1209 | /* needs to be marked within the same spinlock */ | |
844a6ae7 LE |
1210 | list_add_tail(&req->req_pending_local, |
1211 | &device->pending_completion[rw == WRITE]); | |
5da9c836 LE |
1212 | _req_mod(req, TO_BE_SUBMITTED); |
1213 | /* but we need to give up the spinlock to submit */ | |
35b5ed5b | 1214 | submit_private_bio = true; |
5da9c836 LE |
1215 | } else if (no_remote) { |
1216 | nodata: | |
1217 | if (__ratelimit(&drbd_ratelimit_state)) | |
d0180171 | 1218 | drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", |
42839f65 | 1219 | (unsigned long long)req->i.sector, req->i.size >> 9); |
5da9c836 | 1220 | /* A write may have been queued for send_oos, however. |
a0d856df | 1221 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ |
b411b363 | 1222 | } |
b411b363 | 1223 | |
5da9c836 | 1224 | out: |
a0d856df LE |
1225 | if (drbd_req_put_completion_ref(req, &m, 1)) |
1226 | kref_put(&req->kref, drbd_req_destroy); | |
35b5ed5b LE |
1227 | spin_unlock_irq(&resource->req_lock); |
1228 | ||
1229 | /* Even though above is a kref_put(), this is safe. | |
1230 | * As long as we still need to submit our private bio, | |
1231 | * we hold a completion ref, and the request cannot disappear. | |
1232 | * If however this request did not even have a private bio to submit | |
1233 | * (e.g. remote read), req may already be invalid now. | |
1234 | * That's why we cannot check on req->private_bio. */ | |
1235 | if (submit_private_bio) | |
1236 | drbd_submit_req_private_bio(req); | |
5da9c836 | 1237 | if (m.bio) |
b30ab791 | 1238 | complete_master_bio(device, &m); |
6d9febe2 LE |
1239 | } |
1240 | ||
e5f891b2 | 1241 | void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
6d9febe2 | 1242 | { |
e5f891b2 | 1243 | struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); |
6d9febe2 LE |
1244 | if (IS_ERR_OR_NULL(req)) |
1245 | return; | |
b30ab791 | 1246 | drbd_send_and_submit(device, req); |
b411b363 PR |
1247 | } |
1248 | ||
b30ab791 | 1249 | static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) |
113fef9e | 1250 | { |
08a1ddab LE |
1251 | struct drbd_request *req, *tmp; |
1252 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { | |
1253 | const int rw = bio_data_dir(req->master_bio); | |
113fef9e | 1254 | |
08a1ddab LE |
1255 | if (rw == WRITE /* rw != WRITE should not even end up here! */ |
1256 | && req->private_bio && req->i.size | |
b30ab791 AG |
1257 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
1258 | if (!drbd_al_begin_io_fastpath(device, &req->i)) | |
08a1ddab LE |
1259 | continue; |
1260 | ||
1261 | req->rq_state |= RQ_IN_ACT_LOG; | |
e5f891b2 | 1262 | req->in_actlog_jif = jiffies; |
08a1ddab LE |
1263 | } |
1264 | ||
1265 | list_del_init(&req->tl_requests); | |
b30ab791 | 1266 | drbd_send_and_submit(device, req); |
113fef9e | 1267 | } |
113fef9e LE |
1268 | } |
1269 | ||
b30ab791 | 1270 | static bool prepare_al_transaction_nonblock(struct drbd_device *device, |
08a1ddab LE |
1271 | struct list_head *incoming, |
1272 | struct list_head *pending) | |
1273 | { | |
1274 | struct drbd_request *req, *tmp; | |
1275 | int wake = 0; | |
1276 | int err; | |
1277 | ||
b30ab791 | 1278 | spin_lock_irq(&device->al_lock); |
08a1ddab | 1279 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { |
b30ab791 | 1280 | err = drbd_al_begin_io_nonblock(device, &req->i); |
08a1ddab LE |
1281 | if (err == -EBUSY) |
1282 | wake = 1; | |
1283 | if (err) | |
1284 | continue; | |
08a1ddab LE |
1285 | list_move_tail(&req->tl_requests, pending); |
1286 | } | |
b30ab791 | 1287 | spin_unlock_irq(&device->al_lock); |
08a1ddab | 1288 | if (wake) |
b30ab791 | 1289 | wake_up(&device->al_wait); |
08a1ddab LE |
1290 | |
1291 | return !list_empty(pending); | |
1292 | } | |
113fef9e LE |
1293 | |
1294 | void do_submit(struct work_struct *ws) | |
1295 | { | |
b30ab791 | 1296 | struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); |
08a1ddab LE |
1297 | LIST_HEAD(incoming); |
1298 | LIST_HEAD(pending); | |
113fef9e LE |
1299 | struct drbd_request *req, *tmp; |
1300 | ||
08a1ddab | 1301 | for (;;) { |
844a6ae7 | 1302 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 1303 | list_splice_tail_init(&device->submit.writes, &incoming); |
844a6ae7 | 1304 | spin_unlock_irq(&device->resource->req_lock); |
113fef9e | 1305 | |
b30ab791 | 1306 | submit_fast_path(device, &incoming); |
08a1ddab LE |
1307 | if (list_empty(&incoming)) |
1308 | break; | |
1309 | ||
e4d7d6f4 | 1310 | skip_fast_path: |
b30ab791 | 1311 | wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending)); |
45ad07b3 LE |
1312 | /* Maybe more was queued, while we prepared the transaction? |
1313 | * Try to stuff them into this transaction as well. | |
1314 | * Be strictly non-blocking here, no wait_event, we already | |
1315 | * have something to commit. | |
1316 | * Stop if we don't make any more progres. | |
1317 | */ | |
1318 | for (;;) { | |
1319 | LIST_HEAD(more_pending); | |
1320 | LIST_HEAD(more_incoming); | |
1321 | bool made_progress; | |
1322 | ||
1323 | /* It is ok to look outside the lock, | |
1324 | * it's only an optimization anyways */ | |
b30ab791 | 1325 | if (list_empty(&device->submit.writes)) |
45ad07b3 LE |
1326 | break; |
1327 | ||
844a6ae7 | 1328 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 1329 | list_splice_tail_init(&device->submit.writes, &more_incoming); |
844a6ae7 | 1330 | spin_unlock_irq(&device->resource->req_lock); |
45ad07b3 LE |
1331 | |
1332 | if (list_empty(&more_incoming)) | |
1333 | break; | |
1334 | ||
b30ab791 | 1335 | made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending); |
45ad07b3 LE |
1336 | |
1337 | list_splice_tail_init(&more_pending, &pending); | |
1338 | list_splice_tail_init(&more_incoming, &incoming); | |
1339 | ||
1340 | if (!made_progress) | |
1341 | break; | |
1342 | } | |
4dd726f0 | 1343 | drbd_al_begin_io_commit(device); |
08a1ddab LE |
1344 | |
1345 | list_for_each_entry_safe(req, tmp, &pending, tl_requests) { | |
e5f891b2 LE |
1346 | req->rq_state |= RQ_IN_ACT_LOG; |
1347 | req->in_actlog_jif = jiffies; | |
08a1ddab | 1348 | list_del_init(&req->tl_requests); |
b30ab791 | 1349 | drbd_send_and_submit(device, req); |
08a1ddab | 1350 | } |
e4d7d6f4 LE |
1351 | |
1352 | /* If all currently hot activity log extents are kept busy by | |
1353 | * incoming requests, we still must not totally starve new | |
1354 | * requests to cold extents. In that case, prepare one request | |
1355 | * in blocking mode. */ | |
1356 | list_for_each_entry_safe(req, tmp, &incoming, tl_requests) { | |
e5f891b2 | 1357 | bool was_cold; |
e4d7d6f4 | 1358 | list_del_init(&req->tl_requests); |
e5f891b2 LE |
1359 | was_cold = drbd_al_begin_io_prepare(device, &req->i); |
1360 | if (!was_cold) { | |
1361 | req->rq_state |= RQ_IN_ACT_LOG; | |
1362 | req->in_actlog_jif = jiffies; | |
e4d7d6f4 LE |
1363 | /* Corresponding extent was hot after all? */ |
1364 | drbd_send_and_submit(device, req); | |
1365 | } else { | |
1366 | /* Found a request to a cold extent. | |
1367 | * Put on "pending" list, | |
1368 | * and try to cumulate with more. */ | |
1369 | list_add(&req->tl_requests, &pending); | |
1370 | goto skip_fast_path; | |
1371 | } | |
1372 | } | |
113fef9e LE |
1373 | } |
1374 | } | |
1375 | ||
5a7bbad2 | 1376 | void drbd_make_request(struct request_queue *q, struct bio *bio) |
b411b363 | 1377 | { |
b30ab791 | 1378 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
e5f891b2 | 1379 | unsigned long start_jif; |
b411b363 | 1380 | |
e5f891b2 | 1381 | start_jif = jiffies; |
aeda1cd6 | 1382 | |
b411b363 PR |
1383 | /* |
1384 | * what we "blindly" assume: | |
1385 | */ | |
0b0ba1ef | 1386 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); |
b411b363 | 1387 | |
b30ab791 | 1388 | inc_ap_bio(device); |
e5f891b2 | 1389 | __drbd_make_request(device, bio, start_jif); |
b411b363 PR |
1390 | } |
1391 | ||
23361cf3 LE |
1392 | /* This is called by bio_add_page(). |
1393 | * | |
1394 | * q->max_hw_sectors and other global limits are already enforced there. | |
b411b363 | 1395 | * |
23361cf3 LE |
1396 | * We need to call down to our lower level device, |
1397 | * in case it has special restrictions. | |
1398 | * | |
1399 | * We also may need to enforce configured max-bio-bvecs limits. | |
b411b363 PR |
1400 | * |
1401 | * As long as the BIO is empty we have to allow at least one bvec, | |
23361cf3 | 1402 | * regardless of size and offset, so no need to ask lower levels. |
b411b363 PR |
1403 | */ |
1404 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | |
1405 | { | |
b30ab791 | 1406 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
b411b363 | 1407 | unsigned int bio_size = bvm->bi_size; |
23361cf3 LE |
1408 | int limit = DRBD_MAX_BIO_SIZE; |
1409 | int backing_limit; | |
1410 | ||
b30ab791 | 1411 | if (bio_size && get_ldev(device)) { |
35f47ef1 | 1412 | unsigned int max_hw_sectors = queue_max_hw_sectors(q); |
b411b363 | 1413 | struct request_queue * const b = |
b30ab791 | 1414 | device->ldev->backing_bdev->bd_disk->queue; |
a1c88d0d | 1415 | if (b->merge_bvec_fn) { |
b411b363 PR |
1416 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1417 | limit = min(limit, backing_limit); | |
1418 | } | |
b30ab791 | 1419 | put_ldev(device); |
35f47ef1 LE |
1420 | if ((limit >> 9) > max_hw_sectors) |
1421 | limit = max_hw_sectors << 9; | |
b411b363 PR |
1422 | } |
1423 | return limit; | |
1424 | } | |
7fde2be9 | 1425 | |
08535466 LE |
1426 | static void find_oldest_requests( |
1427 | struct drbd_connection *connection, | |
1428 | struct drbd_device *device, | |
1429 | struct drbd_request **oldest_req_waiting_for_peer, | |
1430 | struct drbd_request **oldest_req_waiting_for_disk) | |
b6dd1a89 | 1431 | { |
b6dd1a89 | 1432 | struct drbd_request *r; |
08535466 LE |
1433 | *oldest_req_waiting_for_peer = NULL; |
1434 | *oldest_req_waiting_for_disk = NULL; | |
bde89a9e | 1435 | list_for_each_entry(r, &connection->transfer_log, tl_requests) { |
08535466 LE |
1436 | const unsigned s = r->rq_state; |
1437 | if (!*oldest_req_waiting_for_peer | |
1438 | && ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) | |
1439 | *oldest_req_waiting_for_peer = r; | |
1440 | ||
1441 | if (!*oldest_req_waiting_for_disk | |
1442 | && (s & RQ_LOCAL_PENDING) && r->device == device) | |
1443 | *oldest_req_waiting_for_disk = r; | |
1444 | ||
1445 | if (*oldest_req_waiting_for_peer && *oldest_req_waiting_for_disk) | |
1446 | break; | |
b6dd1a89 | 1447 | } |
b6dd1a89 LE |
1448 | } |
1449 | ||
7fde2be9 PR |
1450 | void request_timer_fn(unsigned long data) |
1451 | { | |
b30ab791 | 1452 | struct drbd_device *device = (struct drbd_device *) data; |
a6b32bc3 | 1453 | struct drbd_connection *connection = first_peer_device(device)->connection; |
08535466 | 1454 | struct drbd_request *req_disk, *req_peer; /* oldest request */ |
44ed167d | 1455 | struct net_conf *nc; |
dfa8bedb | 1456 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
ba280c09 | 1457 | unsigned long now; |
7fde2be9 | 1458 | |
44ed167d | 1459 | rcu_read_lock(); |
bde89a9e | 1460 | nc = rcu_dereference(connection->net_conf); |
b30ab791 | 1461 | if (nc && device->state.conn >= C_WF_REPORT_PARAMS) |
07be15b1 | 1462 | ent = nc->timeout * HZ/10 * nc->ko_count; |
cdfda633 | 1463 | |
b30ab791 AG |
1464 | if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ |
1465 | dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; | |
1466 | put_ldev(device); | |
dfa8bedb | 1467 | } |
44ed167d | 1468 | rcu_read_unlock(); |
7fde2be9 | 1469 | |
dfa8bedb PR |
1470 | et = min_not_zero(dt, ent); |
1471 | ||
ba280c09 | 1472 | if (!et) |
7fde2be9 PR |
1473 | return; /* Recurring timer stopped */ |
1474 | ||
ba280c09 LE |
1475 | now = jiffies; |
1476 | ||
0500813f | 1477 | spin_lock_irq(&device->resource->req_lock); |
08535466 LE |
1478 | find_oldest_requests(connection, device, &req_peer, &req_disk); |
1479 | if (req_peer == NULL && req_disk == NULL) { | |
0500813f | 1480 | spin_unlock_irq(&device->resource->req_lock); |
b30ab791 | 1481 | mod_timer(&device->request_timer, now + et); |
7fde2be9 PR |
1482 | return; |
1483 | } | |
1484 | ||
ba280c09 LE |
1485 | /* The request is considered timed out, if |
1486 | * - we have some effective timeout from the configuration, | |
1487 | * with above state restrictions applied, | |
1488 | * - the oldest request is waiting for a response from the network | |
1489 | * resp. the local disk, | |
1490 | * - the oldest request is in fact older than the effective timeout, | |
1491 | * - the connection was established (resp. disk was attached) | |
1492 | * for longer than the timeout already. | |
1493 | * Note that for 32bit jiffies and very stable connections/disks, | |
1494 | * we may have a wrap around, which is catched by | |
1495 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). | |
1496 | * | |
1497 | * Side effect: once per 32bit wrap-around interval, which means every | |
1498 | * ~198 days with 250 HZ, we have a window where the timeout would need | |
1499 | * to expire twice (worst case) to become effective. Good enough. | |
1500 | */ | |
08535466 | 1501 | if (ent && req_peer && |
e5f891b2 | 1502 | time_after(now, req_peer->start_jif + ent) && |
bde89a9e | 1503 | !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { |
d0180171 | 1504 | drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); |
b30ab791 | 1505 | _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); |
7fde2be9 | 1506 | } |
08535466 | 1507 | if (dt && req_disk && |
e5f891b2 | 1508 | time_after(now, req_disk->start_jif + dt) && |
b30ab791 | 1509 | !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { |
d0180171 | 1510 | drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); |
b30ab791 | 1511 | __drbd_chk_io_error(device, DRBD_FORCE_DETACH); |
dfa8bedb | 1512 | } |
08535466 LE |
1513 | |
1514 | /* Reschedule timer for the nearest not already expired timeout. | |
1515 | * Fallback to now + min(effective network timeout, disk timeout). */ | |
e5f891b2 LE |
1516 | ent = (ent && req_peer && time_before(now, req_peer->start_jif + ent)) |
1517 | ? req_peer->start_jif + ent : now + et; | |
1518 | dt = (dt && req_disk && time_before(now, req_disk->start_jif + dt)) | |
1519 | ? req_disk->start_jif + dt : now + et; | |
08535466 | 1520 | nt = time_before(ent, dt) ? ent : dt; |
0500813f | 1521 | spin_unlock_irq(&connection->resource->req_lock); |
b30ab791 | 1522 | mod_timer(&device->request_timer, nt); |
7fde2be9 | 1523 | } |