]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <linux/slab.h> | |
29 | #include <linux/drbd.h> | |
30 | #include "drbd_int.h" | |
b411b363 PR |
31 | #include "drbd_req.h" |
32 | ||
33 | ||
57bcb6cf PR |
34 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); |
35 | ||
b411b363 PR |
36 | /* Update disk stats at start of I/O request */ |
37 | static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio) | |
38 | { | |
39 | const int rw = bio_data_dir(bio); | |
40 | int cpu; | |
41 | cpu = part_stat_lock(); | |
72585d24 | 42 | part_round_stats(cpu, &mdev->vdisk->part0); |
b411b363 PR |
43 | part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); |
44 | part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); | |
376694a0 PR |
45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like |
46 | the compiler warning about cpu only assigned but never used... */ | |
753c8913 | 47 | part_inc_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 48 | part_stat_unlock(); |
b411b363 PR |
49 | } |
50 | ||
51 | /* Update disk stats when completing request upwards */ | |
52 | static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) | |
53 | { | |
54 | int rw = bio_data_dir(req->master_bio); | |
55 | unsigned long duration = jiffies - req->start_time; | |
56 | int cpu; | |
57 | cpu = part_stat_lock(); | |
58 | part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); | |
59 | part_round_stats(cpu, &mdev->vdisk->part0); | |
753c8913 | 60 | part_dec_in_flight(&mdev->vdisk->part0, rw); |
b411b363 | 61 | part_stat_unlock(); |
b411b363 PR |
62 | } |
63 | ||
9e204cdd AG |
64 | static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
65 | struct bio *bio_src) | |
66 | { | |
67 | struct drbd_request *req; | |
68 | ||
69 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO); | |
70 | if (!req) | |
71 | return NULL; | |
72 | ||
73 | drbd_req_make_private_bio(req, bio_src); | |
74 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; | |
a21e9298 | 75 | req->w.mdev = mdev; |
9e204cdd AG |
76 | req->master_bio = bio_src; |
77 | req->epoch = 0; | |
53840641 | 78 | |
9e204cdd AG |
79 | drbd_clear_interval(&req->i); |
80 | req->i.sector = bio_src->bi_sector; | |
81 | req->i.size = bio_src->bi_size; | |
5e472264 | 82 | req->i.local = true; |
53840641 AG |
83 | req->i.waiting = false; |
84 | ||
9e204cdd AG |
85 | INIT_LIST_HEAD(&req->tl_requests); |
86 | INIT_LIST_HEAD(&req->w.list); | |
87 | ||
88 | return req; | |
89 | } | |
90 | ||
91 | static void drbd_req_free(struct drbd_request *req) | |
92 | { | |
93 | mempool_free(req, drbd_request_mempool); | |
94 | } | |
95 | ||
96 | /* rw is bio_data_dir(), only READ or WRITE */ | |
b411b363 PR |
97 | static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) |
98 | { | |
99 | const unsigned long s = req->rq_state; | |
288f422e PR |
100 | |
101 | /* remove it from the transfer log. | |
102 | * well, only if it had been there in the first | |
103 | * place... if it had not (local only or conflicting | |
104 | * and never sent), it should still be "empty" as | |
105 | * initialized in drbd_req_new(), so we can list_del() it | |
106 | * here unconditionally */ | |
2312f0b3 | 107 | list_del_init(&req->tl_requests); |
288f422e | 108 | |
b411b363 PR |
109 | /* if it was a write, we may have to set the corresponding |
110 | * bit(s) out-of-sync first. If it had a local part, we need to | |
111 | * release the reference to the activity log. */ | |
112 | if (rw == WRITE) { | |
b411b363 PR |
113 | /* Set out-of-sync unless both OK flags are set |
114 | * (local only or remote failed). | |
115 | * Other places where we set out-of-sync: | |
116 | * READ with local io-error */ | |
117 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) | |
ace652ac | 118 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
119 | |
120 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) | |
ace652ac | 121 | drbd_set_in_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
122 | |
123 | /* one might be tempted to move the drbd_al_complete_io | |
fcefa62e | 124 | * to the local io completion callback drbd_request_endio. |
b411b363 PR |
125 | * but, if this was a mirror write, we may only |
126 | * drbd_al_complete_io after this is RQ_NET_DONE, | |
127 | * otherwise the extent could be dropped from the al | |
128 | * before it has actually been written on the peer. | |
129 | * if we crash before our peer knows about the request, | |
130 | * but after the extent has been dropped from the al, | |
131 | * we would forget to resync the corresponding extent. | |
132 | */ | |
133 | if (s & RQ_LOCAL_MASK) { | |
134 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
0778286a | 135 | if (s & RQ_IN_ACT_LOG) |
181286ad | 136 | drbd_al_complete_io(mdev, &req->i); |
b411b363 PR |
137 | put_ldev(mdev); |
138 | } else if (__ratelimit(&drbd_ratelimit_state)) { | |
181286ad LE |
139 | dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " |
140 | "but my Disk seems to have failed :(\n", | |
141 | (unsigned long long) req->i.sector, req->i.size); | |
b411b363 PR |
142 | } |
143 | } | |
144 | } | |
145 | ||
2312f0b3 LE |
146 | if (s & RQ_POSTPONED) |
147 | drbd_restart_write(req); | |
148 | else | |
149 | drbd_req_free(req); | |
b411b363 PR |
150 | } |
151 | ||
152 | static void queue_barrier(struct drbd_conf *mdev) | |
153 | { | |
154 | struct drbd_tl_epoch *b; | |
6936fcb4 | 155 | struct drbd_tconn *tconn = mdev->tconn; |
b411b363 PR |
156 | |
157 | /* We are within the req_lock. Once we queued the barrier for sending, | |
158 | * we set the CREATE_BARRIER bit. It is cleared as soon as a new | |
159 | * barrier/epoch object is added. This is the only place this bit is | |
160 | * set. It indicates that the barrier for this epoch is already queued, | |
161 | * and no new epoch has been created yet. */ | |
6936fcb4 | 162 | if (test_bit(CREATE_BARRIER, &tconn->flags)) |
b411b363 PR |
163 | return; |
164 | ||
6936fcb4 | 165 | b = tconn->newest_tle; |
b411b363 | 166 | b->w.cb = w_send_barrier; |
a21e9298 | 167 | b->w.mdev = mdev; |
b411b363 PR |
168 | /* inc_ap_pending done here, so we won't |
169 | * get imbalanced on connection loss. | |
170 | * dec_ap_pending will be done in got_BarrierAck | |
171 | * or (on connection loss) in tl_clear. */ | |
172 | inc_ap_pending(mdev); | |
6936fcb4 PR |
173 | drbd_queue_work(&tconn->data.work, &b->w); |
174 | set_bit(CREATE_BARRIER, &tconn->flags); | |
b411b363 PR |
175 | } |
176 | ||
177 | static void _about_to_complete_local_write(struct drbd_conf *mdev, | |
178 | struct drbd_request *req) | |
179 | { | |
180 | const unsigned long s = req->rq_state; | |
b411b363 | 181 | |
8a3c1044 LE |
182 | /* Before we can signal completion to the upper layers, |
183 | * we may need to close the current epoch. | |
184 | * We can skip this, if this request has not even been sent, because we | |
185 | * did not have a fully established connection yet/anymore, during | |
186 | * bitmap exchange, or while we are C_AHEAD due to congestion policy. | |
187 | */ | |
188 | if (mdev->state.conn >= C_CONNECTED && | |
189 | (s & RQ_NET_SENT) != 0 && | |
87eeee41 | 190 | req->epoch == mdev->tconn->newest_tle->br_number) |
b411b363 | 191 | queue_barrier(mdev); |
b411b363 PR |
192 | } |
193 | ||
194 | void complete_master_bio(struct drbd_conf *mdev, | |
195 | struct bio_and_error *m) | |
196 | { | |
b411b363 PR |
197 | bio_endio(m->bio, m->error); |
198 | dec_ap_bio(mdev); | |
199 | } | |
200 | ||
53840641 AG |
201 | |
202 | static void drbd_remove_request_interval(struct rb_root *root, | |
203 | struct drbd_request *req) | |
204 | { | |
a21e9298 | 205 | struct drbd_conf *mdev = req->w.mdev; |
53840641 AG |
206 | struct drbd_interval *i = &req->i; |
207 | ||
208 | drbd_remove_interval(root, i); | |
209 | ||
210 | /* Wake up any processes waiting for this request to complete. */ | |
211 | if (i->waiting) | |
212 | wake_up(&mdev->misc_wait); | |
213 | } | |
214 | ||
b411b363 PR |
215 | /* Helper for __req_mod(). |
216 | * Set m->bio to the master bio, if it is fit to be completed, | |
217 | * or leave it alone (it is initialized to NULL in __req_mod), | |
218 | * if it has already been completed, or cannot be completed yet. | |
219 | * If m->bio is set, the error status to be returned is placed in m->error. | |
220 | */ | |
221 | void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) | |
222 | { | |
223 | const unsigned long s = req->rq_state; | |
a21e9298 | 224 | struct drbd_conf *mdev = req->w.mdev; |
cdfda633 | 225 | int rw = req->rq_state & RQ_WRITE ? WRITE : READ; |
b411b363 | 226 | |
b411b363 PR |
227 | /* we must not complete the master bio, while it is |
228 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) | |
229 | * not yet acknowledged by the peer | |
230 | * not yet completed by the local io subsystem | |
231 | * these flags may get cleared in any order by | |
232 | * the worker, | |
233 | * the receiver, | |
234 | * the bio_endio completion callbacks. | |
235 | */ | |
cdfda633 | 236 | if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) |
7be8da07 AG |
237 | return; |
238 | if (req->i.waiting) { | |
239 | /* Retry all conflicting peer requests. */ | |
240 | wake_up(&mdev->misc_wait); | |
241 | } | |
b411b363 PR |
242 | if (s & RQ_NET_QUEUED) |
243 | return; | |
244 | if (s & RQ_NET_PENDING) | |
245 | return; | |
b411b363 PR |
246 | |
247 | if (req->master_bio) { | |
8554df1c | 248 | /* this is DATA_RECEIVED (remote read) |
b411b363 PR |
249 | * or protocol C P_WRITE_ACK |
250 | * or protocol B P_RECV_ACK | |
8554df1c | 251 | * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck) |
b411b363 PR |
252 | * or canceled or failed, |
253 | * or killed from the transfer log due to connection loss. | |
254 | */ | |
255 | ||
256 | /* | |
257 | * figure out whether to report success or failure. | |
258 | * | |
259 | * report success when at least one of the operations succeeded. | |
260 | * or, to put the other way, | |
261 | * only report failure, when both operations failed. | |
262 | * | |
263 | * what to do about the failures is handled elsewhere. | |
264 | * what we need to do here is just: complete the master_bio. | |
265 | * | |
266 | * local completion error, if any, has been stored as ERR_PTR | |
fcefa62e | 267 | * in private_bio within drbd_request_endio. |
b411b363 PR |
268 | */ |
269 | int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); | |
270 | int error = PTR_ERR(req->private_bio); | |
271 | ||
272 | /* remove the request from the conflict detection | |
273 | * respective block_id verification hash */ | |
dac1389c AG |
274 | if (!drbd_interval_empty(&req->i)) { |
275 | struct rb_root *root; | |
276 | ||
dac1389c AG |
277 | if (rw == WRITE) |
278 | root = &mdev->write_requests; | |
279 | else | |
280 | root = &mdev->read_requests; | |
53840641 | 281 | drbd_remove_request_interval(root, req); |
7be8da07 | 282 | } else if (!(s & RQ_POSTPONED)) |
8825f7c3 | 283 | D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); |
b411b363 PR |
284 | |
285 | /* for writes we need to do some extra housekeeping */ | |
286 | if (rw == WRITE) | |
287 | _about_to_complete_local_write(mdev, req); | |
288 | ||
289 | /* Update disk stats */ | |
290 | _drbd_end_io_acct(mdev, req); | |
291 | ||
7be8da07 AG |
292 | if (!(s & RQ_POSTPONED)) { |
293 | m->error = ok ? 0 : (error ?: -EIO); | |
294 | m->bio = req->master_bio; | |
2312f0b3 LE |
295 | req->master_bio = NULL; |
296 | } else { | |
297 | /* Assert that this will be _req_is_done() | |
298 | * with this very invokation. */ | |
299 | /* FIXME: | |
300 | * what about (RQ_LOCAL_PENDING | RQ_LOCAL_ABORTED)? | |
301 | */ | |
302 | D_ASSERT(!(s & RQ_LOCAL_PENDING)); | |
303 | D_ASSERT(s & RQ_NET_DONE); | |
7be8da07 | 304 | } |
b411b363 PR |
305 | } |
306 | ||
cdfda633 PR |
307 | if (s & RQ_LOCAL_PENDING) |
308 | return; | |
309 | ||
b411b363 PR |
310 | if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) { |
311 | /* this is disconnected (local only) operation, | |
a209b4ae | 312 | * or protocol A, B, or C P_BARRIER_ACK, |
b411b363 PR |
313 | * or killed from the transfer log due to connection loss. */ |
314 | _req_is_done(mdev, req, rw); | |
315 | } | |
316 | /* else: network part and not DONE yet. that is | |
a209b4ae | 317 | * protocol A, B, or C, barrier ack still pending... */ |
b411b363 PR |
318 | } |
319 | ||
cfa03415 PR |
320 | static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m) |
321 | { | |
a21e9298 | 322 | struct drbd_conf *mdev = req->w.mdev; |
cfa03415 | 323 | |
2aebfabb | 324 | if (!drbd_suspended(mdev)) |
cfa03415 PR |
325 | _req_may_be_done(req, m); |
326 | } | |
327 | ||
b411b363 PR |
328 | /* obviously this could be coded as many single functions |
329 | * instead of one huge switch, | |
330 | * or by putting the code directly in the respective locations | |
331 | * (as it has been before). | |
332 | * | |
333 | * but having it this way | |
334 | * enforces that it is all in this one place, where it is easier to audit, | |
335 | * it makes it obvious that whatever "event" "happens" to a request should | |
336 | * happen "atomically" within the req_lock, | |
337 | * and it enforces that we have to think in a very structured manner | |
338 | * about the "events" that may happen to a request during its life time ... | |
339 | */ | |
2a80699f | 340 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
341 | struct bio_and_error *m) |
342 | { | |
a21e9298 | 343 | struct drbd_conf *mdev = req->w.mdev; |
44ed167d | 344 | struct net_conf *nc; |
303d1448 | 345 | int p, rv = 0; |
7be8da07 AG |
346 | |
347 | if (m) | |
348 | m->bio = NULL; | |
b411b363 | 349 | |
b411b363 PR |
350 | switch (what) { |
351 | default: | |
352 | dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); | |
353 | break; | |
354 | ||
355 | /* does not happen... | |
356 | * initialization done in drbd_req_new | |
8554df1c | 357 | case CREATED: |
b411b363 PR |
358 | break; |
359 | */ | |
360 | ||
8554df1c | 361 | case TO_BE_SENT: /* via network */ |
7be8da07 | 362 | /* reached via __drbd_make_request |
b411b363 PR |
363 | * and from w_read_retry_remote */ |
364 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | |
365 | req->rq_state |= RQ_NET_PENDING; | |
44ed167d PR |
366 | rcu_read_lock(); |
367 | nc = rcu_dereference(mdev->tconn->net_conf); | |
368 | p = nc->wire_protocol; | |
369 | rcu_read_unlock(); | |
303d1448 PR |
370 | req->rq_state |= |
371 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : | |
372 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; | |
b411b363 PR |
373 | inc_ap_pending(mdev); |
374 | break; | |
375 | ||
8554df1c | 376 | case TO_BE_SUBMITTED: /* locally */ |
7be8da07 | 377 | /* reached via __drbd_make_request */ |
b411b363 PR |
378 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); |
379 | req->rq_state |= RQ_LOCAL_PENDING; | |
380 | break; | |
381 | ||
8554df1c | 382 | case COMPLETED_OK: |
cdfda633 | 383 | if (req->rq_state & RQ_WRITE) |
ace652ac | 384 | mdev->writ_cnt += req->i.size >> 9; |
b411b363 | 385 | else |
ace652ac | 386 | mdev->read_cnt += req->i.size >> 9; |
b411b363 PR |
387 | |
388 | req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); | |
389 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
390 | ||
cfa03415 | 391 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
392 | put_ldev(mdev); |
393 | break; | |
394 | ||
cdfda633 PR |
395 | case ABORT_DISK_IO: |
396 | req->rq_state |= RQ_LOCAL_ABORTED; | |
397 | if (req->rq_state & RQ_WRITE) | |
398 | _req_may_be_done_not_susp(req, m); | |
399 | else | |
400 | goto goto_queue_for_net_read; | |
401 | break; | |
402 | ||
8554df1c | 403 | case WRITE_COMPLETED_WITH_ERROR: |
b411b363 PR |
404 | req->rq_state |= RQ_LOCAL_COMPLETED; |
405 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
406 | ||
81e84650 | 407 | __drbd_chk_io_error(mdev, false); |
cfa03415 | 408 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
409 | put_ldev(mdev); |
410 | break; | |
411 | ||
8554df1c | 412 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
b411b363 PR |
413 | /* it is legal to fail READA */ |
414 | req->rq_state |= RQ_LOCAL_COMPLETED; | |
415 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
cfa03415 | 416 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
417 | put_ldev(mdev); |
418 | break; | |
419 | ||
8554df1c | 420 | case READ_COMPLETED_WITH_ERROR: |
ace652ac | 421 | drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); |
b411b363 PR |
422 | |
423 | req->rq_state |= RQ_LOCAL_COMPLETED; | |
424 | req->rq_state &= ~RQ_LOCAL_PENDING; | |
425 | ||
b411b363 | 426 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); |
b411b363 | 427 | |
81e84650 | 428 | __drbd_chk_io_error(mdev, false); |
b411b363 | 429 | put_ldev(mdev); |
b411b363 | 430 | |
cdfda633 PR |
431 | goto_queue_for_net_read: |
432 | ||
d255e5ff LE |
433 | /* no point in retrying if there is no good remote data, |
434 | * or we have no connection. */ | |
435 | if (mdev->state.pdsk != D_UP_TO_DATE) { | |
cfa03415 | 436 | _req_may_be_done_not_susp(req, m); |
d255e5ff LE |
437 | break; |
438 | } | |
439 | ||
8554df1c | 440 | /* _req_mod(req,TO_BE_SENT); oops, recursion... */ |
d255e5ff LE |
441 | req->rq_state |= RQ_NET_PENDING; |
442 | inc_ap_pending(mdev); | |
8554df1c | 443 | /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */ |
b411b363 | 444 | |
8554df1c | 445 | case QUEUE_FOR_NET_READ: |
b411b363 PR |
446 | /* READ or READA, and |
447 | * no local disk, | |
448 | * or target area marked as invalid, | |
449 | * or just got an io-error. */ | |
7be8da07 | 450 | /* from __drbd_make_request |
b411b363 PR |
451 | * or from bio_endio during read io-error recovery */ |
452 | ||
453 | /* so we can verify the handle in the answer packet | |
454 | * corresponding hlist_del is in _req_may_be_done() */ | |
97ddb687 | 455 | D_ASSERT(drbd_interval_empty(&req->i)); |
dac1389c | 456 | drbd_insert_interval(&mdev->read_requests, &req->i); |
b411b363 | 457 | |
83c38830 | 458 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
b411b363 PR |
459 | |
460 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
461 | req->rq_state |= RQ_NET_QUEUED; | |
462 | req->w.cb = (req->rq_state & RQ_LOCAL_MASK) | |
463 | ? w_read_retry_remote | |
464 | : w_send_read_req; | |
e42325a5 | 465 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
b411b363 PR |
466 | break; |
467 | ||
8554df1c | 468 | case QUEUE_FOR_NET_WRITE: |
b411b363 | 469 | /* assert something? */ |
7be8da07 | 470 | /* from __drbd_make_request only */ |
b411b363 | 471 | |
b411b363 | 472 | /* corresponding hlist_del is in _req_may_be_done() */ |
97ddb687 | 473 | D_ASSERT(drbd_interval_empty(&req->i)); |
de696716 | 474 | drbd_insert_interval(&mdev->write_requests, &req->i); |
b411b363 PR |
475 | |
476 | /* NOTE | |
477 | * In case the req ended up on the transfer log before being | |
478 | * queued on the worker, it could lead to this request being | |
479 | * missed during cleanup after connection loss. | |
480 | * So we have to do both operations here, | |
481 | * within the same lock that protects the transfer log. | |
482 | * | |
483 | * _req_add_to_epoch(req); this has to be after the | |
484 | * _maybe_start_new_epoch(req); which happened in | |
7be8da07 | 485 | * __drbd_make_request, because we now may set the bit |
b411b363 PR |
486 | * again ourselves to close the current epoch. |
487 | * | |
488 | * Add req to the (now) current epoch (barrier). */ | |
489 | ||
83c38830 LE |
490 | /* otherwise we may lose an unplug, which may cause some remote |
491 | * io-scheduler timeout to expire, increasing maximum latency, | |
492 | * hurting performance. */ | |
493 | set_bit(UNPLUG_REMOTE, &mdev->flags); | |
494 | ||
7be8da07 | 495 | /* see __drbd_make_request, |
b411b363 | 496 | * just after it grabs the req_lock */ |
6936fcb4 | 497 | D_ASSERT(test_bit(CREATE_BARRIER, &mdev->tconn->flags) == 0); |
b411b363 | 498 | |
87eeee41 | 499 | req->epoch = mdev->tconn->newest_tle->br_number; |
b411b363 PR |
500 | |
501 | /* increment size of current epoch */ | |
87eeee41 | 502 | mdev->tconn->newest_tle->n_writes++; |
b411b363 PR |
503 | |
504 | /* queue work item to send data */ | |
505 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
506 | req->rq_state |= RQ_NET_QUEUED; | |
507 | req->w.cb = w_send_dblock; | |
e42325a5 | 508 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
b411b363 PR |
509 | |
510 | /* close the epoch, in case it outgrew the limit */ | |
44ed167d PR |
511 | rcu_read_lock(); |
512 | nc = rcu_dereference(mdev->tconn->net_conf); | |
513 | p = nc->max_epoch_size; | |
514 | rcu_read_unlock(); | |
515 | if (mdev->tconn->newest_tle->n_writes >= p) | |
b411b363 PR |
516 | queue_barrier(mdev); |
517 | ||
518 | break; | |
519 | ||
8554df1c | 520 | case QUEUE_FOR_SEND_OOS: |
73a01a18 | 521 | req->rq_state |= RQ_NET_QUEUED; |
8f7bed77 | 522 | req->w.cb = w_send_out_of_sync; |
e42325a5 | 523 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
73a01a18 PR |
524 | break; |
525 | ||
8554df1c | 526 | case OOS_HANDED_TO_NETWORK: |
73a01a18 | 527 | /* actually the same */ |
8554df1c | 528 | case SEND_CANCELED: |
b411b363 | 529 | /* treat it the same */ |
8554df1c | 530 | case SEND_FAILED: |
b411b363 PR |
531 | /* real cleanup will be done from tl_clear. just update flags |
532 | * so it is no longer marked as on the worker queue */ | |
533 | req->rq_state &= ~RQ_NET_QUEUED; | |
534 | /* if we did it right, tl_clear should be scheduled only after | |
535 | * this, so this should not be necessary! */ | |
cfa03415 | 536 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
537 | break; |
538 | ||
8554df1c | 539 | case HANDED_OVER_TO_NETWORK: |
b411b363 | 540 | /* assert something? */ |
759fbdfb | 541 | if (bio_data_dir(req->master_bio) == WRITE) |
ace652ac | 542 | atomic_add(req->i.size >> 9, &mdev->ap_in_flight); |
759fbdfb | 543 | |
b411b363 | 544 | if (bio_data_dir(req->master_bio) == WRITE && |
303d1448 | 545 | !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { |
b411b363 PR |
546 | /* this is what is dangerous about protocol A: |
547 | * pretend it was successfully written on the peer. */ | |
548 | if (req->rq_state & RQ_NET_PENDING) { | |
549 | dec_ap_pending(mdev); | |
550 | req->rq_state &= ~RQ_NET_PENDING; | |
551 | req->rq_state |= RQ_NET_OK; | |
552 | } /* else: neg-ack was faster... */ | |
553 | /* it is still not yet RQ_NET_DONE until the | |
554 | * corresponding epoch barrier got acked as well, | |
555 | * so we know what to dirty on connection loss */ | |
556 | } | |
557 | req->rq_state &= ~RQ_NET_QUEUED; | |
558 | req->rq_state |= RQ_NET_SENT; | |
559 | /* because _drbd_send_zc_bio could sleep, and may want to | |
8554df1c AG |
560 | * dereference the bio even after the "WRITE_ACKED_BY_PEER" and |
561 | * "COMPLETED_OK" events came in, once we return from | |
b411b363 PR |
562 | * _drbd_send_zc_bio (drbd_send_dblock), we have to check |
563 | * whether it is done already, and end it. */ | |
cfa03415 | 564 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
565 | break; |
566 | ||
8554df1c | 567 | case READ_RETRY_REMOTE_CANCELED: |
d255e5ff LE |
568 | req->rq_state &= ~RQ_NET_QUEUED; |
569 | /* fall through, in case we raced with drbd_disconnect */ | |
8554df1c | 570 | case CONNECTION_LOST_WHILE_PENDING: |
b411b363 PR |
571 | /* transfer log cleanup after connection loss */ |
572 | /* assert something? */ | |
573 | if (req->rq_state & RQ_NET_PENDING) | |
574 | dec_ap_pending(mdev); | |
57bcb6cf PR |
575 | |
576 | p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING; | |
577 | ||
b411b363 PR |
578 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
579 | req->rq_state |= RQ_NET_DONE; | |
759fbdfb | 580 | if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) |
ace652ac | 581 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
759fbdfb | 582 | |
b411b363 PR |
583 | /* if it is still queued, we may not complete it here. |
584 | * it will be canceled soon. */ | |
57bcb6cf PR |
585 | if (!(req->rq_state & RQ_NET_QUEUED)) { |
586 | if (p) | |
587 | goto goto_read_retry_local; | |
cfa03415 | 588 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
57bcb6cf | 589 | } |
b411b363 PR |
590 | break; |
591 | ||
8554df1c | 592 | case WRITE_ACKED_BY_PEER_AND_SIS: |
b411b363 | 593 | req->rq_state |= RQ_NET_SIS; |
7be8da07 | 594 | case DISCARD_WRITE: |
b411b363 PR |
595 | /* for discarded conflicting writes of multiple primaries, |
596 | * there is no need to keep anything in the tl, potential | |
597 | * node crashes are covered by the activity log. */ | |
b411b363 PR |
598 | req->rq_state |= RQ_NET_DONE; |
599 | /* fall through */ | |
8554df1c | 600 | case WRITE_ACKED_BY_PEER: |
303d1448 | 601 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
b411b363 PR |
602 | /* protocol C; successfully written on peer. |
603 | * Nothing to do here. | |
604 | * We want to keep the tl in place for all protocols, to cater | |
605 | * for volatile write-back caches on lower level devices. | |
606 | * | |
607 | * A barrier request is expected to have forced all prior | |
608 | * requests onto stable storage, so completion of a barrier | |
609 | * request could set NET_DONE right here, and not wait for the | |
610 | * P_BARRIER_ACK, but that is an unnecessary optimization. */ | |
611 | ||
303d1448 | 612 | goto ack_common; |
b411b363 | 613 | /* this makes it effectively the same as for: */ |
8554df1c | 614 | case RECV_ACKED_BY_PEER: |
303d1448 | 615 | D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); |
b411b363 | 616 | /* protocol B; pretends to be successfully written on peer. |
8554df1c | 617 | * see also notes above in HANDED_OVER_TO_NETWORK about |
b411b363 | 618 | * protocol != C */ |
303d1448 | 619 | ack_common: |
b411b363 PR |
620 | req->rq_state |= RQ_NET_OK; |
621 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
622 | dec_ap_pending(mdev); | |
ace652ac | 623 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); |
b411b363 | 624 | req->rq_state &= ~RQ_NET_PENDING; |
cfa03415 | 625 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
626 | break; |
627 | ||
7be8da07 | 628 | case POSTPONE_WRITE: |
303d1448 PR |
629 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); |
630 | /* If this node has already detected the write conflict, the | |
7be8da07 AG |
631 | * worker will be waiting on misc_wait. Wake it up once this |
632 | * request has completed locally. | |
633 | */ | |
634 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | |
635 | req->rq_state |= RQ_POSTPONED; | |
636 | _req_may_be_done_not_susp(req, m); | |
637 | break; | |
638 | ||
8554df1c | 639 | case NEG_ACKED: |
b411b363 | 640 | /* assert something? */ |
759fbdfb | 641 | if (req->rq_state & RQ_NET_PENDING) { |
b411b363 | 642 | dec_ap_pending(mdev); |
e8cdc343 PR |
643 | if (req->rq_state & RQ_WRITE) |
644 | atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); | |
759fbdfb | 645 | } |
b411b363 PR |
646 | req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); |
647 | ||
648 | req->rq_state |= RQ_NET_DONE; | |
380207d0 | 649 | |
57bcb6cf | 650 | if (!(req->rq_state & RQ_WRITE)) |
380207d0 PR |
651 | goto goto_read_retry_local; |
652 | ||
cfa03415 | 653 | _req_may_be_done_not_susp(req, m); |
8554df1c | 654 | /* else: done by HANDED_OVER_TO_NETWORK */ |
b411b363 PR |
655 | break; |
656 | ||
380207d0 | 657 | goto_read_retry_local: |
57bcb6cf PR |
658 | if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) { |
659 | _req_may_be_done_not_susp(req, m); | |
660 | break; | |
661 | } | |
662 | D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING)); | |
380207d0 | 663 | req->rq_state |= RQ_LOCAL_PENDING; |
57bcb6cf PR |
664 | |
665 | get_ldev(mdev); | |
666 | req->w.cb = w_restart_disk_io; | |
667 | drbd_queue_work(&mdev->tconn->data.work, &req->w); | |
380207d0 PR |
668 | break; |
669 | ||
8554df1c | 670 | case FAIL_FROZEN_DISK_IO: |
265be2d0 PR |
671 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
672 | break; | |
673 | ||
cfa03415 | 674 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
265be2d0 PR |
675 | break; |
676 | ||
8554df1c | 677 | case RESTART_FROZEN_DISK_IO: |
265be2d0 PR |
678 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
679 | break; | |
680 | ||
681 | req->rq_state &= ~RQ_LOCAL_COMPLETED; | |
682 | ||
683 | rv = MR_READ; | |
684 | if (bio_data_dir(req->master_bio) == WRITE) | |
685 | rv = MR_WRITE; | |
686 | ||
687 | get_ldev(mdev); | |
688 | req->w.cb = w_restart_disk_io; | |
e42325a5 | 689 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
265be2d0 PR |
690 | break; |
691 | ||
8554df1c | 692 | case RESEND: |
11b58e73 | 693 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
47ff2d0a | 694 | before the connection loss (B&C only); only P_BARRIER_ACK was missing. |
11b58e73 | 695 | Trowing them out of the TL here by pretending we got a BARRIER_ACK |
481c6f50 | 696 | We ensure that the peer was not rebooted */ |
11b58e73 PR |
697 | if (!(req->rq_state & RQ_NET_OK)) { |
698 | if (req->w.cb) { | |
e42325a5 | 699 | drbd_queue_work(&mdev->tconn->data.work, &req->w); |
11b58e73 PR |
700 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
701 | } | |
702 | break; | |
703 | } | |
8554df1c | 704 | /* else, fall through to BARRIER_ACKED */ |
11b58e73 | 705 | |
8554df1c | 706 | case BARRIER_ACKED: |
288f422e PR |
707 | if (!(req->rq_state & RQ_WRITE)) |
708 | break; | |
709 | ||
b411b363 | 710 | if (req->rq_state & RQ_NET_PENDING) { |
a209b4ae | 711 | /* barrier came in before all requests were acked. |
b411b363 PR |
712 | * this is bad, because if the connection is lost now, |
713 | * we won't be able to clean them up... */ | |
8554df1c | 714 | dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); |
87eeee41 | 715 | list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests); |
b411b363 | 716 | } |
e636db5b LE |
717 | if ((req->rq_state & RQ_NET_MASK) != 0) { |
718 | req->rq_state |= RQ_NET_DONE; | |
303d1448 | 719 | if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) |
89e58e75 | 720 | atomic_sub(req->i.size>>9, &mdev->ap_in_flight); |
e636db5b | 721 | } |
cfa03415 | 722 | _req_may_be_done(req, m); /* Allowed while state.susp */ |
b411b363 PR |
723 | break; |
724 | ||
8554df1c | 725 | case DATA_RECEIVED: |
b411b363 PR |
726 | D_ASSERT(req->rq_state & RQ_NET_PENDING); |
727 | dec_ap_pending(mdev); | |
728 | req->rq_state &= ~RQ_NET_PENDING; | |
729 | req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); | |
cfa03415 | 730 | _req_may_be_done_not_susp(req, m); |
b411b363 PR |
731 | break; |
732 | }; | |
2a80699f PR |
733 | |
734 | return rv; | |
b411b363 PR |
735 | } |
736 | ||
737 | /* we may do a local read if: | |
738 | * - we are consistent (of course), | |
739 | * - or we are generally inconsistent, | |
740 | * BUT we are still/already IN SYNC for this area. | |
741 | * since size may be bigger than BM_BLOCK_SIZE, | |
742 | * we may need to check several bits. | |
743 | */ | |
0da34df0 | 744 | static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) |
b411b363 PR |
745 | { |
746 | unsigned long sbnr, ebnr; | |
747 | sector_t esector, nr_sectors; | |
748 | ||
749 | if (mdev->state.disk == D_UP_TO_DATE) | |
0da34df0 | 750 | return true; |
8c387def | 751 | if (mdev->state.disk != D_INCONSISTENT) |
0da34df0 | 752 | return false; |
b411b363 | 753 | esector = sector + (size >> 9) - 1; |
8ca9844f | 754 | nr_sectors = drbd_get_capacity(mdev->this_bdev); |
b411b363 PR |
755 | D_ASSERT(sector < nr_sectors); |
756 | D_ASSERT(esector < nr_sectors); | |
757 | ||
758 | sbnr = BM_SECT_TO_BIT(sector); | |
759 | ebnr = BM_SECT_TO_BIT(esector); | |
760 | ||
0da34df0 | 761 | return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; |
b411b363 PR |
762 | } |
763 | ||
d60de03a | 764 | static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector) |
380207d0 PR |
765 | { |
766 | enum drbd_read_balancing rbm; | |
767 | struct backing_dev_info *bdi; | |
d60de03a | 768 | int stripe_shift; |
380207d0 PR |
769 | |
770 | if (mdev->state.pdsk < D_UP_TO_DATE) | |
771 | return false; | |
772 | ||
773 | rcu_read_lock(); | |
774 | rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; | |
775 | rcu_read_unlock(); | |
776 | ||
777 | switch (rbm) { | |
778 | case RB_CONGESTED_REMOTE: | |
779 | bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; | |
780 | return bdi_read_congested(bdi); | |
781 | case RB_LEAST_PENDING: | |
782 | return atomic_read(&mdev->local_cnt) > | |
783 | atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); | |
d60de03a PR |
784 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
785 | case RB_64K_STRIPING: | |
786 | case RB_128K_STRIPING: | |
787 | case RB_256K_STRIPING: | |
788 | case RB_512K_STRIPING: | |
789 | case RB_1M_STRIPING: /* stripe_shift = 20 */ | |
790 | stripe_shift = (rbm - RB_32K_STRIPING + 15); | |
791 | return (sector >> (stripe_shift - 9)) & 1; | |
380207d0 PR |
792 | case RB_ROUND_ROBIN: |
793 | return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); | |
794 | case RB_PREFER_REMOTE: | |
795 | return true; | |
796 | case RB_PREFER_LOCAL: | |
797 | default: | |
798 | return false; | |
799 | } | |
800 | } | |
801 | ||
6024fece AG |
802 | /* |
803 | * complete_conflicting_writes - wait for any conflicting write requests | |
804 | * | |
805 | * The write_requests tree contains all active write requests which we | |
806 | * currently know about. Wait for any requests to complete which conflict with | |
807 | * the new one. | |
808 | */ | |
809 | static int complete_conflicting_writes(struct drbd_conf *mdev, | |
810 | sector_t sector, int size) | |
811 | { | |
812 | for(;;) { | |
6024fece | 813 | struct drbd_interval *i; |
7be8da07 | 814 | int err; |
6024fece AG |
815 | |
816 | i = drbd_find_overlap(&mdev->write_requests, sector, size); | |
817 | if (!i) | |
818 | return 0; | |
7be8da07 AG |
819 | err = drbd_wait_misc(mdev, i); |
820 | if (err) | |
821 | return err; | |
6024fece AG |
822 | } |
823 | } | |
824 | ||
7be8da07 | 825 | int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) |
b411b363 PR |
826 | { |
827 | const int rw = bio_rw(bio); | |
828 | const int size = bio->bi_size; | |
829 | const sector_t sector = bio->bi_sector; | |
830 | struct drbd_tl_epoch *b = NULL; | |
831 | struct drbd_request *req; | |
44ed167d | 832 | struct net_conf *nc; |
73a01a18 | 833 | int local, remote, send_oos = 0; |
6024fece | 834 | int err; |
9a25a04c | 835 | int ret = 0; |
81f44862 | 836 | union drbd_dev_state s; |
b411b363 PR |
837 | |
838 | /* allocate outside of all locks; */ | |
839 | req = drbd_req_new(mdev, bio); | |
840 | if (!req) { | |
841 | dec_ap_bio(mdev); | |
842 | /* only pass the error to the upper layers. | |
843 | * if user cannot handle io errors, that's not our business. */ | |
844 | dev_err(DEV, "could not kmalloc() req\n"); | |
845 | bio_endio(bio, -ENOMEM); | |
846 | return 0; | |
847 | } | |
aeda1cd6 | 848 | req->start_time = start_time; |
b411b363 | 849 | |
b411b363 PR |
850 | local = get_ldev(mdev); |
851 | if (!local) { | |
852 | bio_put(req->private_bio); /* or we get a bio leak */ | |
853 | req->private_bio = NULL; | |
854 | } | |
855 | if (rw == WRITE) { | |
856 | remote = 1; | |
857 | } else { | |
858 | /* READ || READA */ | |
859 | if (local) { | |
57bcb6cf PR |
860 | if (!drbd_may_do_local_read(mdev, sector, size) || |
861 | remote_due_to_read_balancing(mdev, sector)) { | |
b411b363 PR |
862 | /* we could kick the syncer to |
863 | * sync this extent asap, wait for | |
864 | * it, then continue locally. | |
865 | * Or just issue the request remotely. | |
866 | */ | |
867 | local = 0; | |
868 | bio_put(req->private_bio); | |
869 | req->private_bio = NULL; | |
870 | put_ldev(mdev); | |
871 | } | |
872 | } | |
873 | remote = !local && mdev->state.pdsk >= D_UP_TO_DATE; | |
874 | } | |
875 | ||
876 | /* If we have a disk, but a READA request is mapped to remote, | |
877 | * we are R_PRIMARY, D_INCONSISTENT, SyncTarget. | |
878 | * Just fail that READA request right here. | |
879 | * | |
880 | * THINK: maybe fail all READA when not local? | |
881 | * or make this configurable... | |
882 | * if network is slow, READA won't do any good. | |
883 | */ | |
884 | if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) { | |
885 | err = -EWOULDBLOCK; | |
886 | goto fail_and_free_req; | |
887 | } | |
888 | ||
889 | /* For WRITES going to the local disk, grab a reference on the target | |
890 | * extent. This waits for any resync activity in the corresponding | |
891 | * resync extent to finish, and, if necessary, pulls in the target | |
892 | * extent into the activity log, which involves further disk io because | |
893 | * of transactional on-disk meta data updates. */ | |
0778286a PR |
894 | if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { |
895 | req->rq_state |= RQ_IN_ACT_LOG; | |
181286ad | 896 | drbd_al_begin_io(mdev, &req->i); |
0778286a | 897 | } |
b411b363 | 898 | |
81f44862 LE |
899 | s = mdev->state; |
900 | remote = remote && drbd_should_do_remote(s); | |
901 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(s); | |
3719094e | 902 | D_ASSERT(!(remote && send_oos)); |
b411b363 | 903 | |
2aebfabb | 904 | if (!(local || remote) && !drbd_suspended(mdev)) { |
fb2c7a10 LE |
905 | if (__ratelimit(&drbd_ratelimit_state)) |
906 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | |
6024fece | 907 | err = -EIO; |
b411b363 PR |
908 | goto fail_free_complete; |
909 | } | |
910 | ||
911 | /* For WRITE request, we have to make sure that we have an | |
912 | * unused_spare_tle, in case we need to start a new epoch. | |
913 | * I try to be smart and avoid to pre-allocate always "just in case", | |
914 | * but there is a race between testing the bit and pointer outside the | |
915 | * spinlock, and grabbing the spinlock. | |
916 | * if we lost that race, we retry. */ | |
73a01a18 | 917 | if (rw == WRITE && (remote || send_oos) && |
87eeee41 | 918 | mdev->tconn->unused_spare_tle == NULL && |
6936fcb4 | 919 | test_bit(CREATE_BARRIER, &mdev->tconn->flags)) { |
b411b363 PR |
920 | allocate_barrier: |
921 | b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); | |
922 | if (!b) { | |
923 | dev_err(DEV, "Failed to alloc barrier.\n"); | |
924 | err = -ENOMEM; | |
925 | goto fail_free_complete; | |
926 | } | |
927 | } | |
928 | ||
929 | /* GOOD, everything prepared, grab the spin_lock */ | |
87eeee41 | 930 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 | 931 | |
6024fece AG |
932 | if (rw == WRITE) { |
933 | err = complete_conflicting_writes(mdev, sector, size); | |
934 | if (err) { | |
7be8da07 AG |
935 | if (err != -ERESTARTSYS) |
936 | _conn_request_state(mdev->tconn, | |
937 | NS(conn, C_TIMEOUT), | |
938 | CS_HARD); | |
6024fece | 939 | spin_unlock_irq(&mdev->tconn->req_lock); |
7be8da07 | 940 | err = -EIO; |
6024fece AG |
941 | goto fail_free_complete; |
942 | } | |
943 | } | |
944 | ||
2aebfabb | 945 | if (drbd_suspended(mdev)) { |
69b6a3b1 PR |
946 | /* If we got suspended, use the retry mechanism in |
947 | drbd_make_request() to restart processing of this | |
2f58dcfc | 948 | bio. In the next call to drbd_make_request |
9a25a04c PR |
949 | we sleep in inc_ap_bio() */ |
950 | ret = 1; | |
87eeee41 | 951 | spin_unlock_irq(&mdev->tconn->req_lock); |
9a25a04c PR |
952 | goto fail_free_complete; |
953 | } | |
954 | ||
73a01a18 | 955 | if (remote || send_oos) { |
6a35c45f | 956 | remote = drbd_should_do_remote(mdev->state); |
8f7bed77 | 957 | send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state); |
3719094e | 958 | D_ASSERT(!(remote && send_oos)); |
73a01a18 PR |
959 | |
960 | if (!(remote || send_oos)) | |
b411b363 PR |
961 | dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); |
962 | if (!(local || remote)) { | |
963 | dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); | |
87eeee41 | 964 | spin_unlock_irq(&mdev->tconn->req_lock); |
6024fece | 965 | err = -EIO; |
b411b363 PR |
966 | goto fail_free_complete; |
967 | } | |
968 | } | |
969 | ||
87eeee41 PR |
970 | if (b && mdev->tconn->unused_spare_tle == NULL) { |
971 | mdev->tconn->unused_spare_tle = b; | |
b411b363 PR |
972 | b = NULL; |
973 | } | |
73a01a18 | 974 | if (rw == WRITE && (remote || send_oos) && |
87eeee41 | 975 | mdev->tconn->unused_spare_tle == NULL && |
6936fcb4 | 976 | test_bit(CREATE_BARRIER, &mdev->tconn->flags)) { |
b411b363 PR |
977 | /* someone closed the current epoch |
978 | * while we were grabbing the spinlock */ | |
87eeee41 | 979 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
980 | goto allocate_barrier; |
981 | } | |
982 | ||
983 | ||
984 | /* Update disk stats */ | |
985 | _drbd_start_io_acct(mdev, req, bio); | |
986 | ||
987 | /* _maybe_start_new_epoch(mdev); | |
988 | * If we need to generate a write barrier packet, we have to add the | |
989 | * new epoch (barrier) object, and queue the barrier packet for sending, | |
990 | * and queue the req's data after it _within the same lock_, otherwise | |
991 | * we have race conditions were the reorder domains could be mixed up. | |
992 | * | |
993 | * Even read requests may start a new epoch and queue the corresponding | |
994 | * barrier packet. To get the write ordering right, we only have to | |
995 | * make sure that, if this is a write request and it triggered a | |
996 | * barrier packet, this request is queued within the same spinlock. */ | |
87eeee41 | 997 | if ((remote || send_oos) && mdev->tconn->unused_spare_tle && |
6936fcb4 | 998 | test_and_clear_bit(CREATE_BARRIER, &mdev->tconn->flags)) { |
2f5cdd0b | 999 | _tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle); |
87eeee41 | 1000 | mdev->tconn->unused_spare_tle = NULL; |
b411b363 PR |
1001 | } else { |
1002 | D_ASSERT(!(remote && rw == WRITE && | |
6936fcb4 | 1003 | test_bit(CREATE_BARRIER, &mdev->tconn->flags))); |
b411b363 PR |
1004 | } |
1005 | ||
1006 | /* NOTE | |
1007 | * Actually, 'local' may be wrong here already, since we may have failed | |
1008 | * to write to the meta data, and may become wrong anytime because of | |
1009 | * local io-error for some other request, which would lead to us | |
1010 | * "detaching" the local disk. | |
1011 | * | |
1012 | * 'remote' may become wrong any time because the network could fail. | |
1013 | * | |
1014 | * This is a harmless race condition, though, since it is handled | |
1015 | * correctly at the appropriate places; so it just defers the failure | |
1016 | * of the respective operation. | |
1017 | */ | |
1018 | ||
1019 | /* mark them early for readability. | |
1020 | * this just sets some state flags. */ | |
1021 | if (remote) | |
8554df1c | 1022 | _req_mod(req, TO_BE_SENT); |
b411b363 | 1023 | if (local) |
8554df1c | 1024 | _req_mod(req, TO_BE_SUBMITTED); |
b411b363 | 1025 | |
87eeee41 | 1026 | list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests); |
288f422e | 1027 | |
b411b363 PR |
1028 | /* NOTE remote first: to get the concurrent write detection right, |
1029 | * we must register the request before start of local IO. */ | |
1030 | if (remote) { | |
1031 | /* either WRITE and C_CONNECTED, | |
1032 | * or READ, and no local disk, | |
1033 | * or READ, but not in sync. | |
1034 | */ | |
1035 | _req_mod(req, (rw == WRITE) | |
8554df1c AG |
1036 | ? QUEUE_FOR_NET_WRITE |
1037 | : QUEUE_FOR_NET_READ); | |
b411b363 | 1038 | } |
73a01a18 | 1039 | if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) |
8554df1c | 1040 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
67531718 | 1041 | |
44ed167d PR |
1042 | rcu_read_lock(); |
1043 | nc = rcu_dereference(mdev->tconn->net_conf); | |
73a01a18 | 1044 | if (remote && |
44ed167d | 1045 | nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) { |
67531718 PR |
1046 | int congested = 0; |
1047 | ||
44ed167d PR |
1048 | if (nc->cong_fill && |
1049 | atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { | |
67531718 PR |
1050 | dev_info(DEV, "Congestion-fill threshold reached\n"); |
1051 | congested = 1; | |
1052 | } | |
1053 | ||
44ed167d | 1054 | if (mdev->act_log->used >= nc->cong_extents) { |
67531718 PR |
1055 | dev_info(DEV, "Congestion-extents threshold reached\n"); |
1056 | congested = 1; | |
1057 | } | |
1058 | ||
71c78cfb | 1059 | if (congested) { |
039312b6 | 1060 | queue_barrier(mdev); /* last barrier, after mirrored writes */ |
73a01a18 | 1061 | |
44ed167d | 1062 | if (nc->on_congestion == OC_PULL_AHEAD) |
67531718 | 1063 | _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); |
44ed167d | 1064 | else /*nc->on_congestion == OC_DISCONNECT */ |
67531718 PR |
1065 | _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); |
1066 | } | |
1067 | } | |
44ed167d | 1068 | rcu_read_unlock(); |
67531718 | 1069 | |
87eeee41 | 1070 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
1071 | kfree(b); /* if someone else has beaten us to it... */ |
1072 | ||
1073 | if (local) { | |
1074 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; | |
1075 | ||
6719fb03 LE |
1076 | /* State may have changed since we grabbed our reference on the |
1077 | * mdev->ldev member. Double check, and short-circuit to endio. | |
1078 | * In case the last activity log transaction failed to get on | |
1079 | * stable storage, and this is a WRITE, we may not even submit | |
1080 | * this bio. */ | |
1081 | if (get_ldev(mdev)) { | |
0cf9d27e AG |
1082 | if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR |
1083 | : rw == READ ? DRBD_FAULT_DT_RD | |
1084 | : DRBD_FAULT_DT_RA)) | |
6719fb03 LE |
1085 | bio_endio(req->private_bio, -EIO); |
1086 | else | |
1087 | generic_make_request(req->private_bio); | |
1088 | put_ldev(mdev); | |
1089 | } else | |
b411b363 | 1090 | bio_endio(req->private_bio, -EIO); |
b411b363 PR |
1091 | } |
1092 | ||
b411b363 PR |
1093 | return 0; |
1094 | ||
1095 | fail_free_complete: | |
76727f68 | 1096 | if (req->rq_state & RQ_IN_ACT_LOG) |
181286ad | 1097 | drbd_al_complete_io(mdev, &req->i); |
b411b363 | 1098 | fail_and_free_req: |
57bcb6cf | 1099 | if (local) { |
b411b363 PR |
1100 | bio_put(req->private_bio); |
1101 | req->private_bio = NULL; | |
1102 | put_ldev(mdev); | |
1103 | } | |
9a25a04c PR |
1104 | if (!ret) |
1105 | bio_endio(bio, err); | |
1106 | ||
b411b363 PR |
1107 | drbd_req_free(req); |
1108 | dec_ap_bio(mdev); | |
1109 | kfree(b); | |
1110 | ||
9a25a04c | 1111 | return ret; |
b411b363 PR |
1112 | } |
1113 | ||
2f58dcfc | 1114 | int drbd_make_request(struct request_queue *q, struct bio *bio) |
b411b363 | 1115 | { |
b411b363 | 1116 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
aeda1cd6 | 1117 | unsigned long start_time; |
b411b363 | 1118 | |
aeda1cd6 PR |
1119 | start_time = jiffies; |
1120 | ||
b411b363 PR |
1121 | /* |
1122 | * what we "blindly" assume: | |
1123 | */ | |
1124 | D_ASSERT(bio->bi_size > 0); | |
c670a398 | 1125 | D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); |
b411b363 | 1126 | |
69b6a3b1 PR |
1127 | do { |
1128 | inc_ap_bio(mdev); | |
1129 | } while (__drbd_make_request(mdev, bio, start_time)); | |
1130 | ||
1131 | return 0; | |
b411b363 PR |
1132 | } |
1133 | ||
23361cf3 LE |
1134 | /* This is called by bio_add_page(). |
1135 | * | |
1136 | * q->max_hw_sectors and other global limits are already enforced there. | |
b411b363 | 1137 | * |
23361cf3 LE |
1138 | * We need to call down to our lower level device, |
1139 | * in case it has special restrictions. | |
1140 | * | |
1141 | * We also may need to enforce configured max-bio-bvecs limits. | |
b411b363 PR |
1142 | * |
1143 | * As long as the BIO is empty we have to allow at least one bvec, | |
23361cf3 | 1144 | * regardless of size and offset, so no need to ask lower levels. |
b411b363 PR |
1145 | */ |
1146 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) | |
1147 | { | |
1148 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | |
b411b363 | 1149 | unsigned int bio_size = bvm->bi_size; |
23361cf3 LE |
1150 | int limit = DRBD_MAX_BIO_SIZE; |
1151 | int backing_limit; | |
1152 | ||
1153 | if (bio_size && get_ldev(mdev)) { | |
b411b363 PR |
1154 | struct request_queue * const b = |
1155 | mdev->ldev->backing_bdev->bd_disk->queue; | |
a1c88d0d | 1156 | if (b->merge_bvec_fn) { |
b411b363 PR |
1157 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1158 | limit = min(limit, backing_limit); | |
1159 | } | |
1160 | put_ldev(mdev); | |
1161 | } | |
1162 | return limit; | |
1163 | } | |
7fde2be9 PR |
1164 | |
1165 | void request_timer_fn(unsigned long data) | |
1166 | { | |
1167 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
8b924f1d | 1168 | struct drbd_tconn *tconn = mdev->tconn; |
7fde2be9 PR |
1169 | struct drbd_request *req; /* oldest request */ |
1170 | struct list_head *le; | |
44ed167d | 1171 | struct net_conf *nc; |
3b03ad59 | 1172 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
44ed167d PR |
1173 | |
1174 | rcu_read_lock(); | |
1175 | nc = rcu_dereference(tconn->net_conf); | |
cdfda633 PR |
1176 | ent = nc ? nc->timeout * HZ/10 * nc->ko_count : 0; |
1177 | ||
1178 | if (get_ldev(mdev)) { | |
1179 | dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; | |
1180 | put_ldev(mdev); | |
1181 | } | |
44ed167d | 1182 | rcu_read_unlock(); |
7fde2be9 | 1183 | |
cdfda633 PR |
1184 | et = min_not_zero(dt, ent); |
1185 | ||
1186 | if (!et || (mdev->state.conn < C_WF_REPORT_PARAMS && mdev->state.disk <= D_FAILED)) | |
7fde2be9 PR |
1187 | return; /* Recurring timer stopped */ |
1188 | ||
8b924f1d PR |
1189 | spin_lock_irq(&tconn->req_lock); |
1190 | le = &tconn->oldest_tle->requests; | |
7fde2be9 | 1191 | if (list_empty(le)) { |
8b924f1d | 1192 | spin_unlock_irq(&tconn->req_lock); |
7fde2be9 PR |
1193 | mod_timer(&mdev->request_timer, jiffies + et); |
1194 | return; | |
1195 | } | |
1196 | ||
1197 | le = le->prev; | |
1198 | req = list_entry(le, struct drbd_request, tl_requests); | |
cdfda633 PR |
1199 | if (ent && req->rq_state & RQ_NET_PENDING) { |
1200 | if (time_is_before_eq_jiffies(req->start_time + ent)) { | |
7fde2be9 | 1201 | dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); |
cdfda633 PR |
1202 | _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); |
1203 | } | |
1204 | } | |
38a05c16 | 1205 | if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev) { |
cdfda633 PR |
1206 | if (time_is_before_eq_jiffies(req->start_time + dt)) { |
1207 | dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); | |
1208 | __drbd_chk_io_error(mdev, 1); | |
7fde2be9 | 1209 | } |
7fde2be9 | 1210 | } |
3b03ad59 | 1211 | nt = (time_is_before_eq_jiffies(req->start_time + et) ? jiffies : req->start_time) + et; |
8b924f1d | 1212 | spin_unlock_irq(&tconn->req_lock); |
3b03ad59 | 1213 | mod_timer(&mdev->request_timer, nt); |
7fde2be9 | 1214 | } |