]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_worker.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 | 26 | #include <linux/module.h> |
b411b363 PR |
27 | #include <linux/drbd.h> |
28 | #include <linux/sched.h> | |
b411b363 PR |
29 | #include <linux/wait.h> |
30 | #include <linux/mm.h> | |
31 | #include <linux/memcontrol.h> | |
32 | #include <linux/mm_inline.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/random.h> | |
b411b363 PR |
35 | #include <linux/string.h> |
36 | #include <linux/scatterlist.h> | |
37 | ||
38 | #include "drbd_int.h" | |
39 | #include "drbd_req.h" | |
b411b363 | 40 | |
b411b363 | 41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); |
9d77a5fe PR |
42 | static int w_make_resync_request(struct drbd_conf *mdev, |
43 | struct drbd_work *w, int cancel); | |
b411b363 PR |
44 | |
45 | ||
46 | ||
c5a91619 AG |
47 | /* endio handlers: |
48 | * drbd_md_io_complete (defined here) | |
49 | * drbd_endio_pri (defined here) | |
50 | * drbd_endio_sec (defined here) | |
51 | * bm_async_io_complete (defined in drbd_bitmap.c) | |
52 | * | |
b411b363 PR |
53 | * For all these callbacks, note the following: |
54 | * The callbacks will be called in irq context by the IDE drivers, | |
55 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. | |
56 | * Try to get the locking right :) | |
57 | * | |
58 | */ | |
59 | ||
60 | ||
61 | /* About the global_state_lock | |
62 | Each state transition on an device holds a read lock. In case we have | |
63 | to evaluate the sync after dependencies, we grab a write lock, because | |
64 | we need stable states on all devices for that. */ | |
65 | rwlock_t global_state_lock; | |
66 | ||
67 | /* used for synchronous meta data and bitmap IO | |
68 | * submitted by drbd_md_sync_page_io() | |
69 | */ | |
70 | void drbd_md_io_complete(struct bio *bio, int error) | |
71 | { | |
72 | struct drbd_md_io *md_io; | |
73 | ||
74 | md_io = (struct drbd_md_io *)bio->bi_private; | |
75 | md_io->error = error; | |
76 | ||
b411b363 PR |
77 | complete(&md_io->event); |
78 | } | |
79 | ||
80 | /* reads on behalf of the partner, | |
81 | * "submitted" by the receiver | |
82 | */ | |
db830c46 | 83 | void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
84 | { |
85 | unsigned long flags = 0; | |
db830c46 | 86 | struct drbd_conf *mdev = peer_req->mdev; |
b411b363 | 87 | |
87eeee41 | 88 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
db830c46 AG |
89 | mdev->read_cnt += peer_req->i.size >> 9; |
90 | list_del(&peer_req->w.list); | |
b411b363 PR |
91 | if (list_empty(&mdev->read_ee)) |
92 | wake_up(&mdev->ee_wait); | |
db830c46 | 93 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
81e84650 | 94 | __drbd_chk_io_error(mdev, false); |
87eeee41 | 95 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
b411b363 | 96 | |
db830c46 | 97 | drbd_queue_work(&mdev->tconn->data.work, &peer_req->w); |
b411b363 | 98 | put_ldev(mdev); |
b411b363 PR |
99 | } |
100 | ||
101 | /* writes on behalf of the partner, or resync writes, | |
45bb912b | 102 | * "submitted" by the receiver, final stage. */ |
db830c46 | 103 | static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
104 | { |
105 | unsigned long flags = 0; | |
db830c46 | 106 | struct drbd_conf *mdev = peer_req->mdev; |
b411b363 PR |
107 | sector_t e_sector; |
108 | int do_wake; | |
579b57ed | 109 | u64 block_id; |
b411b363 | 110 | int do_al_complete_io; |
b411b363 | 111 | |
db830c46 | 112 | /* after we moved peer_req to done_ee, |
b411b363 PR |
113 | * we may no longer access it, |
114 | * it may be freed/reused already! | |
115 | * (as soon as we release the req_lock) */ | |
db830c46 AG |
116 | e_sector = peer_req->i.sector; |
117 | do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; | |
118 | block_id = peer_req->block_id; | |
b411b363 | 119 | |
87eeee41 | 120 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
db830c46 AG |
121 | mdev->writ_cnt += peer_req->i.size >> 9; |
122 | list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */ | |
123 | list_add_tail(&peer_req->w.list, &mdev->done_ee); | |
b411b363 | 124 | |
bb3bfe96 | 125 | /* |
5e472264 | 126 | * Do not remove from the write_requests tree here: we did not send the |
bb3bfe96 AG |
127 | * Ack yet and did not wake possibly waiting conflicting requests. |
128 | * Removed from the tree from "drbd_process_done_ee" within the | |
129 | * appropriate w.cb (e_end_block/e_end_resync_block) or from | |
130 | * _drbd_clear_done_ee. | |
131 | */ | |
b411b363 | 132 | |
579b57ed | 133 | do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee); |
b411b363 | 134 | |
db830c46 | 135 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
81e84650 | 136 | __drbd_chk_io_error(mdev, false); |
87eeee41 | 137 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
b411b363 | 138 | |
579b57ed | 139 | if (block_id == ID_SYNCER) |
b411b363 PR |
140 | drbd_rs_complete_io(mdev, e_sector); |
141 | ||
142 | if (do_wake) | |
143 | wake_up(&mdev->ee_wait); | |
144 | ||
145 | if (do_al_complete_io) | |
146 | drbd_al_complete_io(mdev, e_sector); | |
147 | ||
148 | wake_asender(mdev); | |
149 | put_ldev(mdev); | |
45bb912b | 150 | } |
b411b363 | 151 | |
45bb912b LE |
152 | /* writes on behalf of the partner, or resync writes, |
153 | * "submitted" by the receiver. | |
154 | */ | |
155 | void drbd_endio_sec(struct bio *bio, int error) | |
156 | { | |
db830c46 AG |
157 | struct drbd_peer_request *peer_req = bio->bi_private; |
158 | struct drbd_conf *mdev = peer_req->mdev; | |
45bb912b LE |
159 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
160 | int is_write = bio_data_dir(bio) == WRITE; | |
161 | ||
07194272 | 162 | if (error && __ratelimit(&drbd_ratelimit_state)) |
45bb912b LE |
163 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
164 | is_write ? "write" : "read", error, | |
db830c46 | 165 | (unsigned long long)peer_req->i.sector); |
45bb912b | 166 | if (!error && !uptodate) { |
07194272 LE |
167 | if (__ratelimit(&drbd_ratelimit_state)) |
168 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | |
169 | is_write ? "write" : "read", | |
db830c46 | 170 | (unsigned long long)peer_req->i.sector); |
45bb912b LE |
171 | /* strange behavior of some lower level drivers... |
172 | * fail the request by clearing the uptodate flag, | |
173 | * but do not return any error?! */ | |
174 | error = -EIO; | |
175 | } | |
176 | ||
177 | if (error) | |
db830c46 | 178 | set_bit(__EE_WAS_ERROR, &peer_req->flags); |
45bb912b LE |
179 | |
180 | bio_put(bio); /* no need for the bio anymore */ | |
db830c46 | 181 | if (atomic_dec_and_test(&peer_req->pending_bios)) { |
45bb912b | 182 | if (is_write) |
db830c46 | 183 | drbd_endio_write_sec_final(peer_req); |
45bb912b | 184 | else |
db830c46 | 185 | drbd_endio_read_sec_final(peer_req); |
45bb912b | 186 | } |
b411b363 PR |
187 | } |
188 | ||
189 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request | |
190 | */ | |
191 | void drbd_endio_pri(struct bio *bio, int error) | |
192 | { | |
a115413d | 193 | unsigned long flags; |
b411b363 PR |
194 | struct drbd_request *req = bio->bi_private; |
195 | struct drbd_conf *mdev = req->mdev; | |
a115413d | 196 | struct bio_and_error m; |
b411b363 PR |
197 | enum drbd_req_event what; |
198 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | |
199 | ||
b411b363 PR |
200 | if (!error && !uptodate) { |
201 | dev_warn(DEV, "p %s: setting error to -EIO\n", | |
202 | bio_data_dir(bio) == WRITE ? "write" : "read"); | |
203 | /* strange behavior of some lower level drivers... | |
204 | * fail the request by clearing the uptodate flag, | |
205 | * but do not return any error?! */ | |
206 | error = -EIO; | |
207 | } | |
208 | ||
b411b363 PR |
209 | /* to avoid recursion in __req_mod */ |
210 | if (unlikely(error)) { | |
211 | what = (bio_data_dir(bio) == WRITE) | |
8554df1c | 212 | ? WRITE_COMPLETED_WITH_ERROR |
5c3c7e64 | 213 | : (bio_rw(bio) == READ) |
8554df1c AG |
214 | ? READ_COMPLETED_WITH_ERROR |
215 | : READ_AHEAD_COMPLETED_WITH_ERROR; | |
b411b363 | 216 | } else |
8554df1c | 217 | what = COMPLETED_OK; |
b411b363 PR |
218 | |
219 | bio_put(req->private_bio); | |
220 | req->private_bio = ERR_PTR(error); | |
221 | ||
a115413d | 222 | /* not req_mod(), we need irqsave here! */ |
87eeee41 | 223 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
a115413d | 224 | __req_mod(req, what, &m); |
87eeee41 | 225 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
a115413d LE |
226 | |
227 | if (m.bio) | |
228 | complete_master_bio(mdev, &m); | |
b411b363 PR |
229 | } |
230 | ||
b411b363 PR |
231 | int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
232 | { | |
233 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
234 | ||
235 | /* We should not detach for read io-error, | |
236 | * but try to WRITE the P_DATA_REPLY to the failed location, | |
237 | * to give the disk the chance to relocate that block */ | |
238 | ||
87eeee41 | 239 | spin_lock_irq(&mdev->tconn->req_lock); |
d255e5ff | 240 | if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { |
8554df1c | 241 | _req_mod(req, READ_RETRY_REMOTE_CANCELED); |
87eeee41 | 242 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
243 | return 1; |
244 | } | |
87eeee41 | 245 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
246 | |
247 | return w_send_read_req(mdev, w, 0); | |
248 | } | |
249 | ||
f6ffca9f | 250 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, |
db830c46 | 251 | struct drbd_peer_request *peer_req, void *digest) |
45bb912b LE |
252 | { |
253 | struct hash_desc desc; | |
254 | struct scatterlist sg; | |
db830c46 | 255 | struct page *page = peer_req->pages; |
45bb912b LE |
256 | struct page *tmp; |
257 | unsigned len; | |
258 | ||
259 | desc.tfm = tfm; | |
260 | desc.flags = 0; | |
261 | ||
262 | sg_init_table(&sg, 1); | |
263 | crypto_hash_init(&desc); | |
264 | ||
265 | while ((tmp = page_chain_next(page))) { | |
266 | /* all but the last page will be fully used */ | |
267 | sg_set_page(&sg, page, PAGE_SIZE, 0); | |
268 | crypto_hash_update(&desc, &sg, sg.length); | |
269 | page = tmp; | |
270 | } | |
271 | /* and now the last, possibly only partially used page */ | |
db830c46 | 272 | len = peer_req->i.size & (PAGE_SIZE - 1); |
45bb912b LE |
273 | sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); |
274 | crypto_hash_update(&desc, &sg, sg.length); | |
275 | crypto_hash_final(&desc, digest); | |
276 | } | |
277 | ||
278 | void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) | |
b411b363 PR |
279 | { |
280 | struct hash_desc desc; | |
281 | struct scatterlist sg; | |
282 | struct bio_vec *bvec; | |
283 | int i; | |
284 | ||
285 | desc.tfm = tfm; | |
286 | desc.flags = 0; | |
287 | ||
288 | sg_init_table(&sg, 1); | |
289 | crypto_hash_init(&desc); | |
290 | ||
291 | __bio_for_each_segment(bvec, bio, i, 0) { | |
292 | sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
293 | crypto_hash_update(&desc, &sg, sg.length); | |
294 | } | |
295 | crypto_hash_final(&desc, digest); | |
296 | } | |
297 | ||
53ea4331 LE |
298 | /* TODO merge common code with w_e_end_ov_req */ |
299 | int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
b411b363 | 300 | { |
db830c46 AG |
301 | struct drbd_peer_request *peer_req = |
302 | container_of(w, struct drbd_peer_request, w); | |
b411b363 PR |
303 | int digest_size; |
304 | void *digest; | |
53ea4331 | 305 | int ok = 1; |
b411b363 | 306 | |
53ea4331 LE |
307 | if (unlikely(cancel)) |
308 | goto out; | |
b411b363 | 309 | |
db830c46 | 310 | if (likely((peer_req->flags & EE_WAS_ERROR) != 0)) |
53ea4331 | 311 | goto out; |
b411b363 | 312 | |
53ea4331 LE |
313 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); |
314 | digest = kmalloc(digest_size, GFP_NOIO); | |
315 | if (digest) { | |
db830c46 AG |
316 | sector_t sector = peer_req->i.sector; |
317 | unsigned int size = peer_req->i.size; | |
318 | drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); | |
53ea4331 LE |
319 | /* Free e and pages before send. |
320 | * In case we block on congestion, we could otherwise run into | |
321 | * some distributed deadlock, if the other side blocks on | |
322 | * congestion as well, because our receiver blocks in | |
323 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 AG |
324 | drbd_free_ee(mdev, peer_req); |
325 | peer_req = NULL; | |
53ea4331 LE |
326 | inc_rs_pending(mdev); |
327 | ok = drbd_send_drequest_csum(mdev, sector, size, | |
328 | digest, digest_size, | |
329 | P_CSUM_RS_REQUEST); | |
330 | kfree(digest); | |
331 | } else { | |
332 | dev_err(DEV, "kmalloc() of digest failed.\n"); | |
333 | ok = 0; | |
334 | } | |
b411b363 | 335 | |
53ea4331 | 336 | out: |
db830c46 AG |
337 | if (peer_req) |
338 | drbd_free_ee(mdev, peer_req); | |
b411b363 PR |
339 | |
340 | if (unlikely(!ok)) | |
341 | dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); | |
342 | return ok; | |
343 | } | |
344 | ||
345 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | |
346 | ||
347 | static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |
348 | { | |
db830c46 | 349 | struct drbd_peer_request *peer_req; |
b411b363 PR |
350 | |
351 | if (!get_ldev(mdev)) | |
80a40e43 | 352 | return -EIO; |
b411b363 | 353 | |
e3555d85 | 354 | if (drbd_rs_should_slow_down(mdev, sector)) |
0f0601f4 LE |
355 | goto defer; |
356 | ||
b411b363 PR |
357 | /* GFP_TRY, because if there is no memory available right now, this may |
358 | * be rescheduled for later. It is "only" background resync, after all. */ | |
db830c46 AG |
359 | peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY); |
360 | if (!peer_req) | |
80a40e43 | 361 | goto defer; |
b411b363 | 362 | |
db830c46 | 363 | peer_req->w.cb = w_e_send_csum; |
87eeee41 | 364 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 365 | list_add(&peer_req->w.list, &mdev->read_ee); |
87eeee41 | 366 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 | 367 | |
0f0601f4 | 368 | atomic_add(size >> 9, &mdev->rs_sect_ev); |
db830c46 | 369 | if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0) |
80a40e43 | 370 | return 0; |
b411b363 | 371 | |
10f6d992 LE |
372 | /* If it failed because of ENOMEM, retry should help. If it failed |
373 | * because bio_add_page failed (probably broken lower level driver), | |
374 | * retry may or may not help. | |
375 | * If it does not, you may need to force disconnect. */ | |
87eeee41 | 376 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 377 | list_del(&peer_req->w.list); |
87eeee41 | 378 | spin_unlock_irq(&mdev->tconn->req_lock); |
22cc37a9 | 379 | |
db830c46 | 380 | drbd_free_ee(mdev, peer_req); |
80a40e43 | 381 | defer: |
45bb912b | 382 | put_ldev(mdev); |
80a40e43 | 383 | return -EAGAIN; |
b411b363 PR |
384 | } |
385 | ||
794abb75 | 386 | int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
b411b363 | 387 | { |
63106d3c PR |
388 | switch (mdev->state.conn) { |
389 | case C_VERIFY_S: | |
794abb75 | 390 | w_make_ov_request(mdev, w, cancel); |
63106d3c PR |
391 | break; |
392 | case C_SYNC_TARGET: | |
794abb75 | 393 | w_make_resync_request(mdev, w, cancel); |
63106d3c | 394 | break; |
b411b363 PR |
395 | } |
396 | ||
794abb75 PR |
397 | return 1; |
398 | } | |
399 | ||
400 | void resync_timer_fn(unsigned long data) | |
401 | { | |
402 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
403 | ||
404 | if (list_empty(&mdev->resync_work.list)) | |
e42325a5 | 405 | drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work); |
b411b363 PR |
406 | } |
407 | ||
778f271d PR |
408 | static void fifo_set(struct fifo_buffer *fb, int value) |
409 | { | |
410 | int i; | |
411 | ||
412 | for (i = 0; i < fb->size; i++) | |
f10f2623 | 413 | fb->values[i] = value; |
778f271d PR |
414 | } |
415 | ||
416 | static int fifo_push(struct fifo_buffer *fb, int value) | |
417 | { | |
418 | int ov; | |
419 | ||
420 | ov = fb->values[fb->head_index]; | |
421 | fb->values[fb->head_index++] = value; | |
422 | ||
423 | if (fb->head_index >= fb->size) | |
424 | fb->head_index = 0; | |
425 | ||
426 | return ov; | |
427 | } | |
428 | ||
429 | static void fifo_add_val(struct fifo_buffer *fb, int value) | |
430 | { | |
431 | int i; | |
432 | ||
433 | for (i = 0; i < fb->size; i++) | |
434 | fb->values[i] += value; | |
435 | } | |
436 | ||
9d77a5fe | 437 | static int drbd_rs_controller(struct drbd_conf *mdev) |
778f271d PR |
438 | { |
439 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ | |
440 | unsigned int want; /* The number of sectors we want in the proxy */ | |
441 | int req_sect; /* Number of sectors to request in this turn */ | |
442 | int correction; /* Number of sectors more we need in the proxy*/ | |
443 | int cps; /* correction per invocation of drbd_rs_controller() */ | |
444 | int steps; /* Number of time steps to plan ahead */ | |
445 | int curr_corr; | |
446 | int max_sect; | |
447 | ||
448 | sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */ | |
449 | mdev->rs_in_flight -= sect_in; | |
450 | ||
451 | spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */ | |
452 | ||
453 | steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ | |
454 | ||
455 | if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */ | |
456 | want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps; | |
457 | } else { /* normal path */ | |
458 | want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target : | |
459 | sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10); | |
460 | } | |
461 | ||
462 | correction = want - mdev->rs_in_flight - mdev->rs_planed; | |
463 | ||
464 | /* Plan ahead */ | |
465 | cps = correction / steps; | |
466 | fifo_add_val(&mdev->rs_plan_s, cps); | |
467 | mdev->rs_planed += cps * steps; | |
468 | ||
469 | /* What we do in this step */ | |
470 | curr_corr = fifo_push(&mdev->rs_plan_s, 0); | |
471 | spin_unlock(&mdev->peer_seq_lock); | |
472 | mdev->rs_planed -= curr_corr; | |
473 | ||
474 | req_sect = sect_in + curr_corr; | |
475 | if (req_sect < 0) | |
476 | req_sect = 0; | |
477 | ||
478 | max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ; | |
479 | if (req_sect > max_sect) | |
480 | req_sect = max_sect; | |
481 | ||
482 | /* | |
483 | dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", | |
484 | sect_in, mdev->rs_in_flight, want, correction, | |
485 | steps, cps, mdev->rs_planed, curr_corr, req_sect); | |
486 | */ | |
487 | ||
488 | return req_sect; | |
489 | } | |
490 | ||
9d77a5fe | 491 | static int drbd_rs_number_requests(struct drbd_conf *mdev) |
e65f440d LE |
492 | { |
493 | int number; | |
494 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | |
495 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | |
496 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | |
497 | } else { | |
498 | mdev->c_sync_rate = mdev->sync_conf.rate; | |
499 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | |
500 | } | |
501 | ||
e65f440d LE |
502 | /* ignore the amount of pending requests, the resync controller should |
503 | * throttle down to incoming reply rate soon enough anyways. */ | |
504 | return number; | |
505 | } | |
506 | ||
9d77a5fe PR |
507 | static int w_make_resync_request(struct drbd_conf *mdev, |
508 | struct drbd_work *w, int cancel) | |
b411b363 PR |
509 | { |
510 | unsigned long bit; | |
511 | sector_t sector; | |
512 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
1816a2b4 | 513 | int max_bio_size; |
e65f440d | 514 | int number, rollback_i, size; |
b411b363 | 515 | int align, queued, sndbuf; |
0f0601f4 | 516 | int i = 0; |
b411b363 PR |
517 | |
518 | if (unlikely(cancel)) | |
519 | return 1; | |
520 | ||
af85e8e8 LE |
521 | if (mdev->rs_total == 0) { |
522 | /* empty resync? */ | |
523 | drbd_resync_finished(mdev); | |
524 | return 1; | |
525 | } | |
526 | ||
b411b363 PR |
527 | if (!get_ldev(mdev)) { |
528 | /* Since we only need to access mdev->rsync a | |
529 | get_ldev_if_state(mdev,D_FAILED) would be sufficient, but | |
530 | to continue resync with a broken disk makes no sense at | |
531 | all */ | |
532 | dev_err(DEV, "Disk broke down during resync!\n"); | |
b411b363 PR |
533 | return 1; |
534 | } | |
535 | ||
0cfdd247 | 536 | max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; |
e65f440d LE |
537 | number = drbd_rs_number_requests(mdev); |
538 | if (number == 0) | |
0f0601f4 | 539 | goto requeue; |
b411b363 | 540 | |
b411b363 PR |
541 | for (i = 0; i < number; i++) { |
542 | /* Stop generating RS requests, when half of the send buffer is filled */ | |
e42325a5 PR |
543 | mutex_lock(&mdev->tconn->data.mutex); |
544 | if (mdev->tconn->data.socket) { | |
545 | queued = mdev->tconn->data.socket->sk->sk_wmem_queued; | |
546 | sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf; | |
b411b363 PR |
547 | } else { |
548 | queued = 1; | |
549 | sndbuf = 0; | |
550 | } | |
e42325a5 | 551 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
552 | if (queued > sndbuf / 2) |
553 | goto requeue; | |
554 | ||
555 | next_sector: | |
556 | size = BM_BLOCK_SIZE; | |
557 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); | |
558 | ||
4b0715f0 | 559 | if (bit == DRBD_END_OF_BITMAP) { |
b411b363 | 560 | mdev->bm_resync_fo = drbd_bm_bits(mdev); |
b411b363 PR |
561 | put_ldev(mdev); |
562 | return 1; | |
563 | } | |
564 | ||
565 | sector = BM_BIT_TO_SECT(bit); | |
566 | ||
e3555d85 PR |
567 | if (drbd_rs_should_slow_down(mdev, sector) || |
568 | drbd_try_rs_begin_io(mdev, sector)) { | |
b411b363 PR |
569 | mdev->bm_resync_fo = bit; |
570 | goto requeue; | |
571 | } | |
572 | mdev->bm_resync_fo = bit + 1; | |
573 | ||
574 | if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) { | |
575 | drbd_rs_complete_io(mdev, sector); | |
576 | goto next_sector; | |
577 | } | |
578 | ||
1816a2b4 | 579 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
b411b363 PR |
580 | /* try to find some adjacent bits. |
581 | * we stop if we have already the maximum req size. | |
582 | * | |
583 | * Additionally always align bigger requests, in order to | |
584 | * be prepared for all stripe sizes of software RAIDs. | |
b411b363 PR |
585 | */ |
586 | align = 1; | |
d207450c | 587 | rollback_i = i; |
b411b363 | 588 | for (;;) { |
1816a2b4 | 589 | if (size + BM_BLOCK_SIZE > max_bio_size) |
b411b363 PR |
590 | break; |
591 | ||
592 | /* Be always aligned */ | |
593 | if (sector & ((1<<(align+3))-1)) | |
594 | break; | |
595 | ||
596 | /* do not cross extent boundaries */ | |
597 | if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0) | |
598 | break; | |
599 | /* now, is it actually dirty, after all? | |
600 | * caution, drbd_bm_test_bit is tri-state for some | |
601 | * obscure reason; ( b == 0 ) would get the out-of-band | |
602 | * only accidentally right because of the "oddly sized" | |
603 | * adjustment below */ | |
604 | if (drbd_bm_test_bit(mdev, bit+1) != 1) | |
605 | break; | |
606 | bit++; | |
607 | size += BM_BLOCK_SIZE; | |
608 | if ((BM_BLOCK_SIZE << align) <= size) | |
609 | align++; | |
610 | i++; | |
611 | } | |
612 | /* if we merged some, | |
613 | * reset the offset to start the next drbd_bm_find_next from */ | |
614 | if (size > BM_BLOCK_SIZE) | |
615 | mdev->bm_resync_fo = bit + 1; | |
616 | #endif | |
617 | ||
618 | /* adjust very last sectors, in case we are oddly sized */ | |
619 | if (sector + (size>>9) > capacity) | |
620 | size = (capacity-sector)<<9; | |
31890f4a | 621 | if (mdev->tconn->agreed_pro_version >= 89 && mdev->csums_tfm) { |
b411b363 | 622 | switch (read_for_csum(mdev, sector, size)) { |
80a40e43 | 623 | case -EIO: /* Disk failure */ |
b411b363 PR |
624 | put_ldev(mdev); |
625 | return 0; | |
80a40e43 | 626 | case -EAGAIN: /* allocation failed, or ldev busy */ |
b411b363 PR |
627 | drbd_rs_complete_io(mdev, sector); |
628 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | |
d207450c | 629 | i = rollback_i; |
b411b363 | 630 | goto requeue; |
80a40e43 LE |
631 | case 0: |
632 | /* everything ok */ | |
633 | break; | |
634 | default: | |
635 | BUG(); | |
b411b363 PR |
636 | } |
637 | } else { | |
638 | inc_rs_pending(mdev); | |
639 | if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST, | |
640 | sector, size, ID_SYNCER)) { | |
641 | dev_err(DEV, "drbd_send_drequest() failed, aborting...\n"); | |
642 | dec_rs_pending(mdev); | |
643 | put_ldev(mdev); | |
644 | return 0; | |
645 | } | |
646 | } | |
647 | } | |
648 | ||
649 | if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) { | |
650 | /* last syncer _request_ was sent, | |
651 | * but the P_RS_DATA_REPLY not yet received. sync will end (and | |
652 | * next sync group will resume), as soon as we receive the last | |
653 | * resync data block, and the last bit is cleared. | |
654 | * until then resync "work" is "inactive" ... | |
655 | */ | |
b411b363 PR |
656 | put_ldev(mdev); |
657 | return 1; | |
658 | } | |
659 | ||
660 | requeue: | |
778f271d | 661 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
b411b363 PR |
662 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
663 | put_ldev(mdev); | |
664 | return 1; | |
665 | } | |
666 | ||
667 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
668 | { | |
669 | int number, i, size; | |
670 | sector_t sector; | |
671 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
672 | ||
673 | if (unlikely(cancel)) | |
674 | return 1; | |
675 | ||
2649f080 | 676 | number = drbd_rs_number_requests(mdev); |
b411b363 PR |
677 | |
678 | sector = mdev->ov_position; | |
679 | for (i = 0; i < number; i++) { | |
680 | if (sector >= capacity) { | |
b411b363 PR |
681 | return 1; |
682 | } | |
683 | ||
684 | size = BM_BLOCK_SIZE; | |
685 | ||
e3555d85 PR |
686 | if (drbd_rs_should_slow_down(mdev, sector) || |
687 | drbd_try_rs_begin_io(mdev, sector)) { | |
b411b363 PR |
688 | mdev->ov_position = sector; |
689 | goto requeue; | |
690 | } | |
691 | ||
692 | if (sector + (size>>9) > capacity) | |
693 | size = (capacity-sector)<<9; | |
694 | ||
695 | inc_rs_pending(mdev); | |
696 | if (!drbd_send_ov_request(mdev, sector, size)) { | |
697 | dec_rs_pending(mdev); | |
698 | return 0; | |
699 | } | |
700 | sector += BM_SECT_PER_BIT; | |
701 | } | |
702 | mdev->ov_position = sector; | |
703 | ||
704 | requeue: | |
2649f080 | 705 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
b411b363 PR |
706 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
707 | return 1; | |
708 | } | |
709 | ||
c4752ef1 | 710 | |
370a43e7 PR |
711 | void start_resync_timer_fn(unsigned long data) |
712 | { | |
713 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
714 | ||
e42325a5 | 715 | drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work); |
370a43e7 PR |
716 | } |
717 | ||
c4752ef1 PR |
718 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
719 | { | |
370a43e7 PR |
720 | if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { |
721 | dev_warn(DEV, "w_start_resync later...\n"); | |
722 | mdev->start_resync_timer.expires = jiffies + HZ/10; | |
723 | add_timer(&mdev->start_resync_timer); | |
724 | return 1; | |
725 | } | |
c4752ef1 | 726 | |
370a43e7 PR |
727 | drbd_start_resync(mdev, C_SYNC_SOURCE); |
728 | clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); | |
c4752ef1 PR |
729 | return 1; |
730 | } | |
731 | ||
b411b363 PR |
732 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
733 | { | |
734 | kfree(w); | |
735 | ov_oos_print(mdev); | |
736 | drbd_resync_finished(mdev); | |
737 | ||
738 | return 1; | |
739 | } | |
740 | ||
741 | static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
742 | { | |
743 | kfree(w); | |
744 | ||
745 | drbd_resync_finished(mdev); | |
746 | ||
747 | return 1; | |
748 | } | |
749 | ||
af85e8e8 LE |
750 | static void ping_peer(struct drbd_conf *mdev) |
751 | { | |
752 | clear_bit(GOT_PING_ACK, &mdev->flags); | |
753 | request_ping(mdev); | |
754 | wait_event(mdev->misc_wait, | |
755 | test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); | |
756 | } | |
757 | ||
b411b363 PR |
758 | int drbd_resync_finished(struct drbd_conf *mdev) |
759 | { | |
760 | unsigned long db, dt, dbdt; | |
761 | unsigned long n_oos; | |
762 | union drbd_state os, ns; | |
763 | struct drbd_work *w; | |
764 | char *khelper_cmd = NULL; | |
26525618 | 765 | int verify_done = 0; |
b411b363 PR |
766 | |
767 | /* Remove all elements from the resync LRU. Since future actions | |
768 | * might set bits in the (main) bitmap, then the entries in the | |
769 | * resync LRU would be wrong. */ | |
770 | if (drbd_rs_del_all(mdev)) { | |
771 | /* In case this is not possible now, most probably because | |
772 | * there are P_RS_DATA_REPLY Packets lingering on the worker's | |
773 | * queue (or even the read operations for those packets | |
774 | * is not finished by now). Retry in 100ms. */ | |
775 | ||
20ee6390 | 776 | schedule_timeout_interruptible(HZ / 10); |
b411b363 PR |
777 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
778 | if (w) { | |
779 | w->cb = w_resync_finished; | |
e42325a5 | 780 | drbd_queue_work(&mdev->tconn->data.work, w); |
b411b363 PR |
781 | return 1; |
782 | } | |
783 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); | |
784 | } | |
785 | ||
786 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; | |
787 | if (dt <= 0) | |
788 | dt = 1; | |
789 | db = mdev->rs_total; | |
790 | dbdt = Bit2KB(db/dt); | |
791 | mdev->rs_paused /= HZ; | |
792 | ||
793 | if (!get_ldev(mdev)) | |
794 | goto out; | |
795 | ||
af85e8e8 LE |
796 | ping_peer(mdev); |
797 | ||
87eeee41 | 798 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
799 | os = mdev->state; |
800 | ||
26525618 LE |
801 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); |
802 | ||
b411b363 PR |
803 | /* This protects us against multiple calls (that can happen in the presence |
804 | of application IO), and against connectivity loss just before we arrive here. */ | |
805 | if (os.conn <= C_CONNECTED) | |
806 | goto out_unlock; | |
807 | ||
808 | ns = os; | |
809 | ns.conn = C_CONNECTED; | |
810 | ||
811 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", | |
26525618 | 812 | verify_done ? "Online verify " : "Resync", |
b411b363 PR |
813 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); |
814 | ||
815 | n_oos = drbd_bm_total_weight(mdev); | |
816 | ||
817 | if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) { | |
818 | if (n_oos) { | |
819 | dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n", | |
820 | n_oos, Bit2KB(1)); | |
821 | khelper_cmd = "out-of-sync"; | |
822 | } | |
823 | } else { | |
824 | D_ASSERT((n_oos - mdev->rs_failed) == 0); | |
825 | ||
826 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) | |
827 | khelper_cmd = "after-resync-target"; | |
828 | ||
829 | if (mdev->csums_tfm && mdev->rs_total) { | |
830 | const unsigned long s = mdev->rs_same_csum; | |
831 | const unsigned long t = mdev->rs_total; | |
832 | const int ratio = | |
833 | (t == 0) ? 0 : | |
834 | (t < 100000) ? ((s*100)/t) : (s/(t/100)); | |
24c4830c | 835 | dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " |
b411b363 PR |
836 | "transferred %luK total %luK\n", |
837 | ratio, | |
838 | Bit2KB(mdev->rs_same_csum), | |
839 | Bit2KB(mdev->rs_total - mdev->rs_same_csum), | |
840 | Bit2KB(mdev->rs_total)); | |
841 | } | |
842 | } | |
843 | ||
844 | if (mdev->rs_failed) { | |
845 | dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed); | |
846 | ||
847 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
848 | ns.disk = D_INCONSISTENT; | |
849 | ns.pdsk = D_UP_TO_DATE; | |
850 | } else { | |
851 | ns.disk = D_UP_TO_DATE; | |
852 | ns.pdsk = D_INCONSISTENT; | |
853 | } | |
854 | } else { | |
855 | ns.disk = D_UP_TO_DATE; | |
856 | ns.pdsk = D_UP_TO_DATE; | |
857 | ||
858 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
859 | if (mdev->p_uuid) { | |
860 | int i; | |
861 | for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++) | |
862 | _drbd_uuid_set(mdev, i, mdev->p_uuid[i]); | |
863 | drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]); | |
864 | _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]); | |
865 | } else { | |
866 | dev_err(DEV, "mdev->p_uuid is NULL! BUG\n"); | |
867 | } | |
868 | } | |
869 | ||
62b0da3a LE |
870 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
871 | /* for verify runs, we don't update uuids here, | |
872 | * so there would be nothing to report. */ | |
873 | drbd_uuid_set_bm(mdev, 0UL); | |
874 | drbd_print_uuids(mdev, "updated UUIDs"); | |
875 | if (mdev->p_uuid) { | |
876 | /* Now the two UUID sets are equal, update what we | |
877 | * know of the peer. */ | |
878 | int i; | |
879 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | |
880 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | |
881 | } | |
b411b363 PR |
882 | } |
883 | } | |
884 | ||
885 | _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | |
886 | out_unlock: | |
87eeee41 | 887 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
888 | put_ldev(mdev); |
889 | out: | |
890 | mdev->rs_total = 0; | |
891 | mdev->rs_failed = 0; | |
892 | mdev->rs_paused = 0; | |
26525618 LE |
893 | if (verify_done) |
894 | mdev->ov_start_sector = 0; | |
b411b363 | 895 | |
13d42685 LE |
896 | drbd_md_sync(mdev); |
897 | ||
b411b363 PR |
898 | if (khelper_cmd) |
899 | drbd_khelper(mdev, khelper_cmd); | |
900 | ||
901 | return 1; | |
902 | } | |
903 | ||
904 | /* helper */ | |
db830c46 | 905 | static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) |
b411b363 | 906 | { |
db830c46 | 907 | if (drbd_ee_has_active_page(peer_req)) { |
b411b363 | 908 | /* This might happen if sendpage() has not finished */ |
db830c46 | 909 | int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; |
435f0740 LE |
910 | atomic_add(i, &mdev->pp_in_use_by_net); |
911 | atomic_sub(i, &mdev->pp_in_use); | |
87eeee41 | 912 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 913 | list_add_tail(&peer_req->w.list, &mdev->net_ee); |
87eeee41 | 914 | spin_unlock_irq(&mdev->tconn->req_lock); |
435f0740 | 915 | wake_up(&drbd_pp_wait); |
b411b363 | 916 | } else |
db830c46 | 917 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
918 | } |
919 | ||
920 | /** | |
921 | * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST | |
922 | * @mdev: DRBD device. | |
923 | * @w: work object. | |
924 | * @cancel: The connection will be closed anyways | |
925 | */ | |
926 | int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
927 | { | |
db830c46 | 928 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
929 | int ok; |
930 | ||
931 | if (unlikely(cancel)) { | |
db830c46 | 932 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
933 | dec_unacked(mdev); |
934 | return 1; | |
935 | } | |
936 | ||
db830c46 AG |
937 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
938 | ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req); | |
b411b363 PR |
939 | } else { |
940 | if (__ratelimit(&drbd_ratelimit_state)) | |
941 | dev_err(DEV, "Sending NegDReply. sector=%llus.\n", | |
db830c46 | 942 | (unsigned long long)peer_req->i.sector); |
b411b363 | 943 | |
db830c46 | 944 | ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req); |
b411b363 PR |
945 | } |
946 | ||
947 | dec_unacked(mdev); | |
948 | ||
db830c46 | 949 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
950 | |
951 | if (unlikely(!ok)) | |
952 | dev_err(DEV, "drbd_send_block() failed\n"); | |
953 | return ok; | |
954 | } | |
955 | ||
956 | /** | |
957 | * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS | |
958 | * @mdev: DRBD device. | |
959 | * @w: work object. | |
960 | * @cancel: The connection will be closed anyways | |
961 | */ | |
962 | int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
963 | { | |
db830c46 | 964 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
965 | int ok; |
966 | ||
967 | if (unlikely(cancel)) { | |
db830c46 | 968 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
969 | dec_unacked(mdev); |
970 | return 1; | |
971 | } | |
972 | ||
973 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
db830c46 | 974 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
b411b363 PR |
975 | put_ldev(mdev); |
976 | } | |
977 | ||
d612d309 | 978 | if (mdev->state.conn == C_AHEAD) { |
db830c46 AG |
979 | ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req); |
980 | } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { | |
b411b363 PR |
981 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
982 | inc_rs_pending(mdev); | |
db830c46 | 983 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
b411b363 PR |
984 | } else { |
985 | if (__ratelimit(&drbd_ratelimit_state)) | |
986 | dev_err(DEV, "Not sending RSDataReply, " | |
987 | "partner DISKLESS!\n"); | |
988 | ok = 1; | |
989 | } | |
990 | } else { | |
991 | if (__ratelimit(&drbd_ratelimit_state)) | |
992 | dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", | |
db830c46 | 993 | (unsigned long long)peer_req->i.sector); |
b411b363 | 994 | |
db830c46 | 995 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
996 | |
997 | /* update resync data with failure */ | |
db830c46 | 998 | drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size); |
b411b363 PR |
999 | } |
1000 | ||
1001 | dec_unacked(mdev); | |
1002 | ||
db830c46 | 1003 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
1004 | |
1005 | if (unlikely(!ok)) | |
1006 | dev_err(DEV, "drbd_send_block() failed\n"); | |
1007 | return ok; | |
1008 | } | |
1009 | ||
1010 | int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1011 | { | |
db830c46 | 1012 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
1013 | struct digest_info *di; |
1014 | int digest_size; | |
1015 | void *digest = NULL; | |
1016 | int ok, eq = 0; | |
1017 | ||
1018 | if (unlikely(cancel)) { | |
db830c46 | 1019 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
1020 | dec_unacked(mdev); |
1021 | return 1; | |
1022 | } | |
1023 | ||
1d53f09e | 1024 | if (get_ldev(mdev)) { |
db830c46 | 1025 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
1d53f09e LE |
1026 | put_ldev(mdev); |
1027 | } | |
b411b363 | 1028 | |
db830c46 | 1029 | di = peer_req->digest; |
b411b363 | 1030 | |
db830c46 | 1031 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b411b363 PR |
1032 | /* quick hack to try to avoid a race against reconfiguration. |
1033 | * a real fix would be much more involved, | |
1034 | * introducing more locking mechanisms */ | |
1035 | if (mdev->csums_tfm) { | |
1036 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); | |
1037 | D_ASSERT(digest_size == di->digest_size); | |
1038 | digest = kmalloc(digest_size, GFP_NOIO); | |
1039 | } | |
1040 | if (digest) { | |
db830c46 | 1041 | drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); |
b411b363 PR |
1042 | eq = !memcmp(digest, di->digest, digest_size); |
1043 | kfree(digest); | |
1044 | } | |
1045 | ||
1046 | if (eq) { | |
db830c46 | 1047 | drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size); |
676396d5 | 1048 | /* rs_same_csums unit is BM_BLOCK_SIZE */ |
db830c46 AG |
1049 | mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; |
1050 | ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req); | |
b411b363 PR |
1051 | } else { |
1052 | inc_rs_pending(mdev); | |
db830c46 AG |
1053 | peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ |
1054 | peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ | |
204bba99 | 1055 | kfree(di); |
db830c46 | 1056 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
b411b363 PR |
1057 | } |
1058 | } else { | |
db830c46 | 1059 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
1060 | if (__ratelimit(&drbd_ratelimit_state)) |
1061 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); | |
1062 | } | |
1063 | ||
1064 | dec_unacked(mdev); | |
db830c46 | 1065 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
1066 | |
1067 | if (unlikely(!ok)) | |
1068 | dev_err(DEV, "drbd_send_block/ack() failed\n"); | |
1069 | return ok; | |
1070 | } | |
1071 | ||
53ea4331 | 1072 | /* TODO merge common code with w_e_send_csum */ |
b411b363 PR |
1073 | int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1074 | { | |
db830c46 AG |
1075 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
1076 | sector_t sector = peer_req->i.sector; | |
1077 | unsigned int size = peer_req->i.size; | |
b411b363 PR |
1078 | int digest_size; |
1079 | void *digest; | |
1080 | int ok = 1; | |
1081 | ||
1082 | if (unlikely(cancel)) | |
1083 | goto out; | |
1084 | ||
b411b363 | 1085 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
b411b363 | 1086 | digest = kmalloc(digest_size, GFP_NOIO); |
8f21420e PR |
1087 | if (!digest) { |
1088 | ok = 0; /* terminate the connection in case the allocation failed */ | |
1089 | goto out; | |
b411b363 PR |
1090 | } |
1091 | ||
db830c46 AG |
1092 | if (likely(!(peer_req->flags & EE_WAS_ERROR))) |
1093 | drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); | |
8f21420e PR |
1094 | else |
1095 | memset(digest, 0, digest_size); | |
1096 | ||
53ea4331 LE |
1097 | /* Free e and pages before send. |
1098 | * In case we block on congestion, we could otherwise run into | |
1099 | * some distributed deadlock, if the other side blocks on | |
1100 | * congestion as well, because our receiver blocks in | |
1101 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 AG |
1102 | drbd_free_ee(mdev, peer_req); |
1103 | peer_req = NULL; | |
8f21420e | 1104 | inc_rs_pending(mdev); |
53ea4331 LE |
1105 | ok = drbd_send_drequest_csum(mdev, sector, size, |
1106 | digest, digest_size, | |
1107 | P_OV_REPLY); | |
8f21420e PR |
1108 | if (!ok) |
1109 | dec_rs_pending(mdev); | |
1110 | kfree(digest); | |
1111 | ||
b411b363 | 1112 | out: |
db830c46 AG |
1113 | if (peer_req) |
1114 | drbd_free_ee(mdev, peer_req); | |
b411b363 | 1115 | dec_unacked(mdev); |
b411b363 PR |
1116 | return ok; |
1117 | } | |
1118 | ||
1119 | void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) | |
1120 | { | |
1121 | if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) { | |
1122 | mdev->ov_last_oos_size += size>>9; | |
1123 | } else { | |
1124 | mdev->ov_last_oos_start = sector; | |
1125 | mdev->ov_last_oos_size = size>>9; | |
1126 | } | |
1127 | drbd_set_out_of_sync(mdev, sector, size); | |
b411b363 PR |
1128 | } |
1129 | ||
1130 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1131 | { | |
db830c46 | 1132 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 | 1133 | struct digest_info *di; |
b411b363 | 1134 | void *digest; |
db830c46 AG |
1135 | sector_t sector = peer_req->i.sector; |
1136 | unsigned int size = peer_req->i.size; | |
53ea4331 | 1137 | int digest_size; |
b411b363 PR |
1138 | int ok, eq = 0; |
1139 | ||
1140 | if (unlikely(cancel)) { | |
db830c46 | 1141 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
1142 | dec_unacked(mdev); |
1143 | return 1; | |
1144 | } | |
1145 | ||
1146 | /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all | |
1147 | * the resync lru has been cleaned up already */ | |
1d53f09e | 1148 | if (get_ldev(mdev)) { |
db830c46 | 1149 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
1d53f09e LE |
1150 | put_ldev(mdev); |
1151 | } | |
b411b363 | 1152 | |
db830c46 | 1153 | di = peer_req->digest; |
b411b363 | 1154 | |
db830c46 | 1155 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b411b363 PR |
1156 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
1157 | digest = kmalloc(digest_size, GFP_NOIO); | |
1158 | if (digest) { | |
db830c46 | 1159 | drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); |
b411b363 PR |
1160 | |
1161 | D_ASSERT(digest_size == di->digest_size); | |
1162 | eq = !memcmp(digest, di->digest, digest_size); | |
1163 | kfree(digest); | |
1164 | } | |
b411b363 PR |
1165 | } |
1166 | ||
53ea4331 LE |
1167 | /* Free e and pages before send. |
1168 | * In case we block on congestion, we could otherwise run into | |
1169 | * some distributed deadlock, if the other side blocks on | |
1170 | * congestion as well, because our receiver blocks in | |
1171 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 | 1172 | drbd_free_ee(mdev, peer_req); |
b411b363 | 1173 | if (!eq) |
53ea4331 | 1174 | drbd_ov_oos_found(mdev, sector, size); |
b411b363 PR |
1175 | else |
1176 | ov_oos_print(mdev); | |
1177 | ||
53ea4331 | 1178 | ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, |
b411b363 PR |
1179 | eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); |
1180 | ||
53ea4331 | 1181 | dec_unacked(mdev); |
b411b363 | 1182 | |
ea5442af LE |
1183 | --mdev->ov_left; |
1184 | ||
1185 | /* let's advance progress step marks only for every other megabyte */ | |
1186 | if ((mdev->ov_left & 0x200) == 0x200) | |
1187 | drbd_advance_rs_marks(mdev, mdev->ov_left); | |
1188 | ||
1189 | if (mdev->ov_left == 0) { | |
b411b363 PR |
1190 | ov_oos_print(mdev); |
1191 | drbd_resync_finished(mdev); | |
1192 | } | |
1193 | ||
1194 | return ok; | |
1195 | } | |
1196 | ||
1197 | int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1198 | { | |
1199 | struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w); | |
1200 | complete(&b->done); | |
1201 | return 1; | |
1202 | } | |
1203 | ||
1204 | int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1205 | { | |
1206 | struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w); | |
e42325a5 | 1207 | struct p_barrier *p = &mdev->tconn->data.sbuf.barrier; |
b411b363 PR |
1208 | int ok = 1; |
1209 | ||
1210 | /* really avoid racing with tl_clear. w.cb may have been referenced | |
1211 | * just before it was reassigned and re-queued, so double check that. | |
1212 | * actually, this race was harmless, since we only try to send the | |
1213 | * barrier packet here, and otherwise do nothing with the object. | |
1214 | * but compare with the head of w_clear_epoch */ | |
87eeee41 | 1215 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
1216 | if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED) |
1217 | cancel = 1; | |
87eeee41 | 1218 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
1219 | if (cancel) |
1220 | return 1; | |
1221 | ||
1222 | if (!drbd_get_data_sock(mdev)) | |
1223 | return 0; | |
1224 | p->barrier = b->br_number; | |
1225 | /* inc_ap_pending was done where this was queued. | |
1226 | * dec_ap_pending will be done in got_BarrierAck | |
1227 | * or (on connection loss) in w_clear_epoch. */ | |
e42325a5 | 1228 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER, |
c012949a | 1229 | &p->head, sizeof(*p), 0); |
b411b363 PR |
1230 | drbd_put_data_sock(mdev); |
1231 | ||
1232 | return ok; | |
1233 | } | |
1234 | ||
1235 | int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1236 | { | |
1237 | if (cancel) | |
1238 | return 1; | |
1239 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); | |
1240 | } | |
1241 | ||
73a01a18 PR |
1242 | int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1243 | { | |
1244 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1245 | int ok; | |
1246 | ||
1247 | if (unlikely(cancel)) { | |
8554df1c | 1248 | req_mod(req, SEND_CANCELED); |
73a01a18 PR |
1249 | return 1; |
1250 | } | |
1251 | ||
1252 | ok = drbd_send_oos(mdev, req); | |
8554df1c | 1253 | req_mod(req, OOS_HANDED_TO_NETWORK); |
73a01a18 PR |
1254 | |
1255 | return ok; | |
1256 | } | |
1257 | ||
b411b363 PR |
1258 | /** |
1259 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request | |
1260 | * @mdev: DRBD device. | |
1261 | * @w: work object. | |
1262 | * @cancel: The connection will be closed anyways | |
1263 | */ | |
1264 | int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1265 | { | |
1266 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1267 | int ok; | |
1268 | ||
1269 | if (unlikely(cancel)) { | |
8554df1c | 1270 | req_mod(req, SEND_CANCELED); |
b411b363 PR |
1271 | return 1; |
1272 | } | |
1273 | ||
1274 | ok = drbd_send_dblock(mdev, req); | |
8554df1c | 1275 | req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED); |
b411b363 PR |
1276 | |
1277 | return ok; | |
1278 | } | |
1279 | ||
1280 | /** | |
1281 | * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet | |
1282 | * @mdev: DRBD device. | |
1283 | * @w: work object. | |
1284 | * @cancel: The connection will be closed anyways | |
1285 | */ | |
1286 | int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1287 | { | |
1288 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1289 | int ok; | |
1290 | ||
1291 | if (unlikely(cancel)) { | |
8554df1c | 1292 | req_mod(req, SEND_CANCELED); |
b411b363 PR |
1293 | return 1; |
1294 | } | |
1295 | ||
ace652ac | 1296 | ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size, |
b411b363 PR |
1297 | (unsigned long)req); |
1298 | ||
1299 | if (!ok) { | |
1300 | /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send(); | |
1301 | * so this is probably redundant */ | |
1302 | if (mdev->state.conn >= C_CONNECTED) | |
1303 | drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); | |
1304 | } | |
8554df1c | 1305 | req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED); |
b411b363 PR |
1306 | |
1307 | return ok; | |
1308 | } | |
1309 | ||
265be2d0 PR |
1310 | int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1311 | { | |
1312 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1313 | ||
0778286a | 1314 | if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) |
ace652ac | 1315 | drbd_al_begin_io(mdev, req->i.sector); |
265be2d0 PR |
1316 | /* Calling drbd_al_begin_io() out of the worker might deadlocks |
1317 | theoretically. Practically it can not deadlock, since this is | |
1318 | only used when unfreezing IOs. All the extents of the requests | |
1319 | that made it into the TL are already active */ | |
1320 | ||
1321 | drbd_req_make_private_bio(req, req->master_bio); | |
1322 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; | |
1323 | generic_make_request(req->private_bio); | |
1324 | ||
1325 | return 1; | |
1326 | } | |
1327 | ||
b411b363 PR |
1328 | static int _drbd_may_sync_now(struct drbd_conf *mdev) |
1329 | { | |
1330 | struct drbd_conf *odev = mdev; | |
1331 | ||
1332 | while (1) { | |
1333 | if (odev->sync_conf.after == -1) | |
1334 | return 1; | |
1335 | odev = minor_to_mdev(odev->sync_conf.after); | |
841ce241 AG |
1336 | if (!expect(odev)) |
1337 | return 1; | |
b411b363 PR |
1338 | if ((odev->state.conn >= C_SYNC_SOURCE && |
1339 | odev->state.conn <= C_PAUSED_SYNC_T) || | |
1340 | odev->state.aftr_isp || odev->state.peer_isp || | |
1341 | odev->state.user_isp) | |
1342 | return 0; | |
1343 | } | |
1344 | } | |
1345 | ||
1346 | /** | |
1347 | * _drbd_pause_after() - Pause resync on all devices that may not resync now | |
1348 | * @mdev: DRBD device. | |
1349 | * | |
1350 | * Called from process context only (admin command and after_state_ch). | |
1351 | */ | |
1352 | static int _drbd_pause_after(struct drbd_conf *mdev) | |
1353 | { | |
1354 | struct drbd_conf *odev; | |
1355 | int i, rv = 0; | |
1356 | ||
1357 | for (i = 0; i < minor_count; i++) { | |
1358 | odev = minor_to_mdev(i); | |
1359 | if (!odev) | |
1360 | continue; | |
1361 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) | |
1362 | continue; | |
1363 | if (!_drbd_may_sync_now(odev)) | |
1364 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL) | |
1365 | != SS_NOTHING_TO_DO); | |
1366 | } | |
1367 | ||
1368 | return rv; | |
1369 | } | |
1370 | ||
1371 | /** | |
1372 | * _drbd_resume_next() - Resume resync on all devices that may resync now | |
1373 | * @mdev: DRBD device. | |
1374 | * | |
1375 | * Called from process context only (admin command and worker). | |
1376 | */ | |
1377 | static int _drbd_resume_next(struct drbd_conf *mdev) | |
1378 | { | |
1379 | struct drbd_conf *odev; | |
1380 | int i, rv = 0; | |
1381 | ||
1382 | for (i = 0; i < minor_count; i++) { | |
1383 | odev = minor_to_mdev(i); | |
1384 | if (!odev) | |
1385 | continue; | |
1386 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) | |
1387 | continue; | |
1388 | if (odev->state.aftr_isp) { | |
1389 | if (_drbd_may_sync_now(odev)) | |
1390 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0), | |
1391 | CS_HARD, NULL) | |
1392 | != SS_NOTHING_TO_DO) ; | |
1393 | } | |
1394 | } | |
1395 | return rv; | |
1396 | } | |
1397 | ||
1398 | void resume_next_sg(struct drbd_conf *mdev) | |
1399 | { | |
1400 | write_lock_irq(&global_state_lock); | |
1401 | _drbd_resume_next(mdev); | |
1402 | write_unlock_irq(&global_state_lock); | |
1403 | } | |
1404 | ||
1405 | void suspend_other_sg(struct drbd_conf *mdev) | |
1406 | { | |
1407 | write_lock_irq(&global_state_lock); | |
1408 | _drbd_pause_after(mdev); | |
1409 | write_unlock_irq(&global_state_lock); | |
1410 | } | |
1411 | ||
1412 | static int sync_after_error(struct drbd_conf *mdev, int o_minor) | |
1413 | { | |
1414 | struct drbd_conf *odev; | |
1415 | ||
1416 | if (o_minor == -1) | |
1417 | return NO_ERROR; | |
1418 | if (o_minor < -1 || minor_to_mdev(o_minor) == NULL) | |
1419 | return ERR_SYNC_AFTER; | |
1420 | ||
1421 | /* check for loops */ | |
1422 | odev = minor_to_mdev(o_minor); | |
1423 | while (1) { | |
1424 | if (odev == mdev) | |
1425 | return ERR_SYNC_AFTER_CYCLE; | |
1426 | ||
1427 | /* dependency chain ends here, no cycles. */ | |
1428 | if (odev->sync_conf.after == -1) | |
1429 | return NO_ERROR; | |
1430 | ||
1431 | /* follow the dependency chain */ | |
1432 | odev = minor_to_mdev(odev->sync_conf.after); | |
1433 | } | |
1434 | } | |
1435 | ||
1436 | int drbd_alter_sa(struct drbd_conf *mdev, int na) | |
1437 | { | |
1438 | int changes; | |
1439 | int retcode; | |
1440 | ||
1441 | write_lock_irq(&global_state_lock); | |
1442 | retcode = sync_after_error(mdev, na); | |
1443 | if (retcode == NO_ERROR) { | |
1444 | mdev->sync_conf.after = na; | |
1445 | do { | |
1446 | changes = _drbd_pause_after(mdev); | |
1447 | changes |= _drbd_resume_next(mdev); | |
1448 | } while (changes); | |
1449 | } | |
1450 | write_unlock_irq(&global_state_lock); | |
1451 | return retcode; | |
1452 | } | |
1453 | ||
9bd28d3c LE |
1454 | void drbd_rs_controller_reset(struct drbd_conf *mdev) |
1455 | { | |
1456 | atomic_set(&mdev->rs_sect_in, 0); | |
1457 | atomic_set(&mdev->rs_sect_ev, 0); | |
1458 | mdev->rs_in_flight = 0; | |
1459 | mdev->rs_planed = 0; | |
1460 | spin_lock(&mdev->peer_seq_lock); | |
1461 | fifo_set(&mdev->rs_plan_s, 0); | |
1462 | spin_unlock(&mdev->peer_seq_lock); | |
1463 | } | |
1464 | ||
b411b363 PR |
1465 | /** |
1466 | * drbd_start_resync() - Start the resync process | |
1467 | * @mdev: DRBD device. | |
1468 | * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET | |
1469 | * | |
1470 | * This function might bring you directly into one of the | |
1471 | * C_PAUSED_SYNC_* states. | |
1472 | */ | |
1473 | void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |
1474 | { | |
1475 | union drbd_state ns; | |
1476 | int r; | |
1477 | ||
c4752ef1 | 1478 | if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { |
b411b363 PR |
1479 | dev_err(DEV, "Resync already running!\n"); |
1480 | return; | |
1481 | } | |
1482 | ||
59817f4f PR |
1483 | if (mdev->state.conn < C_AHEAD) { |
1484 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ | |
1485 | drbd_rs_cancel_all(mdev); | |
1486 | /* This should be done when we abort the resync. We definitely do not | |
1487 | want to have this for connections going back and forth between | |
1488 | Ahead/Behind and SyncSource/SyncTarget */ | |
1489 | } | |
b411b363 PR |
1490 | |
1491 | if (side == C_SYNC_TARGET) { | |
1492 | /* Since application IO was locked out during C_WF_BITMAP_T and | |
1493 | C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET | |
1494 | we check that we might make the data inconsistent. */ | |
1495 | r = drbd_khelper(mdev, "before-resync-target"); | |
1496 | r = (r >> 8) & 0xff; | |
1497 | if (r > 0) { | |
1498 | dev_info(DEV, "before-resync-target handler returned %d, " | |
1499 | "dropping connection.\n", r); | |
1500 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
1501 | return; | |
1502 | } | |
09b9e797 PR |
1503 | } else /* C_SYNC_SOURCE */ { |
1504 | r = drbd_khelper(mdev, "before-resync-source"); | |
1505 | r = (r >> 8) & 0xff; | |
1506 | if (r > 0) { | |
1507 | if (r == 3) { | |
1508 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1509 | "ignoring. Old userland tools?", r); | |
1510 | } else { | |
1511 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1512 | "dropping connection.\n", r); | |
1513 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
1514 | return; | |
1515 | } | |
1516 | } | |
b411b363 PR |
1517 | } |
1518 | ||
1519 | drbd_state_lock(mdev); | |
1520 | ||
1521 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
1522 | drbd_state_unlock(mdev); | |
1523 | return; | |
1524 | } | |
1525 | ||
b411b363 PR |
1526 | write_lock_irq(&global_state_lock); |
1527 | ns = mdev->state; | |
1528 | ||
1529 | ns.aftr_isp = !_drbd_may_sync_now(mdev); | |
1530 | ||
1531 | ns.conn = side; | |
1532 | ||
1533 | if (side == C_SYNC_TARGET) | |
1534 | ns.disk = D_INCONSISTENT; | |
1535 | else /* side == C_SYNC_SOURCE */ | |
1536 | ns.pdsk = D_INCONSISTENT; | |
1537 | ||
1538 | r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | |
1539 | ns = mdev->state; | |
1540 | ||
1541 | if (ns.conn < C_CONNECTED) | |
1542 | r = SS_UNKNOWN_ERROR; | |
1543 | ||
1544 | if (r == SS_SUCCESS) { | |
1d7734a0 LE |
1545 | unsigned long tw = drbd_bm_total_weight(mdev); |
1546 | unsigned long now = jiffies; | |
1547 | int i; | |
1548 | ||
b411b363 PR |
1549 | mdev->rs_failed = 0; |
1550 | mdev->rs_paused = 0; | |
b411b363 | 1551 | mdev->rs_same_csum = 0; |
0f0601f4 LE |
1552 | mdev->rs_last_events = 0; |
1553 | mdev->rs_last_sect_ev = 0; | |
1d7734a0 LE |
1554 | mdev->rs_total = tw; |
1555 | mdev->rs_start = now; | |
1556 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | |
1557 | mdev->rs_mark_left[i] = tw; | |
1558 | mdev->rs_mark_time[i] = now; | |
1559 | } | |
b411b363 PR |
1560 | _drbd_pause_after(mdev); |
1561 | } | |
1562 | write_unlock_irq(&global_state_lock); | |
5a22db89 | 1563 | |
b411b363 PR |
1564 | if (r == SS_SUCCESS) { |
1565 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", | |
1566 | drbd_conn_str(ns.conn), | |
1567 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), | |
1568 | (unsigned long) mdev->rs_total); | |
6c922ed5 LE |
1569 | if (side == C_SYNC_TARGET) |
1570 | mdev->bm_resync_fo = 0; | |
1571 | ||
1572 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid | |
1573 | * with w_send_oos, or the sync target will get confused as to | |
1574 | * how much bits to resync. We cannot do that always, because for an | |
1575 | * empty resync and protocol < 95, we need to do it here, as we call | |
1576 | * drbd_resync_finished from here in that case. | |
1577 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | |
1578 | * and from after_state_ch otherwise. */ | |
31890f4a | 1579 | if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96) |
6c922ed5 | 1580 | drbd_gen_and_send_sync_uuid(mdev); |
b411b363 | 1581 | |
31890f4a | 1582 | if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) { |
af85e8e8 LE |
1583 | /* This still has a race (about when exactly the peers |
1584 | * detect connection loss) that can lead to a full sync | |
1585 | * on next handshake. In 8.3.9 we fixed this with explicit | |
1586 | * resync-finished notifications, but the fix | |
1587 | * introduces a protocol change. Sleeping for some | |
1588 | * time longer than the ping interval + timeout on the | |
1589 | * SyncSource, to give the SyncTarget the chance to | |
1590 | * detect connection loss, then waiting for a ping | |
1591 | * response (implicit in drbd_resync_finished) reduces | |
1592 | * the race considerably, but does not solve it. */ | |
1593 | if (side == C_SYNC_SOURCE) | |
1594 | schedule_timeout_interruptible( | |
89e58e75 PR |
1595 | mdev->tconn->net_conf->ping_int * HZ + |
1596 | mdev->tconn->net_conf->ping_timeo*HZ/9); | |
b411b363 | 1597 | drbd_resync_finished(mdev); |
b411b363 PR |
1598 | } |
1599 | ||
9bd28d3c | 1600 | drbd_rs_controller_reset(mdev); |
b411b363 PR |
1601 | /* ns.conn may already be != mdev->state.conn, |
1602 | * we may have been paused in between, or become paused until | |
1603 | * the timer triggers. | |
1604 | * No matter, that is handled in resync_timer_fn() */ | |
1605 | if (ns.conn == C_SYNC_TARGET) | |
1606 | mod_timer(&mdev->resync_timer, jiffies); | |
1607 | ||
1608 | drbd_md_sync(mdev); | |
1609 | } | |
5a22db89 | 1610 | put_ldev(mdev); |
d0c3f60f | 1611 | drbd_state_unlock(mdev); |
b411b363 PR |
1612 | } |
1613 | ||
1614 | int drbd_worker(struct drbd_thread *thi) | |
1615 | { | |
1616 | struct drbd_conf *mdev = thi->mdev; | |
1617 | struct drbd_work *w = NULL; | |
1618 | LIST_HEAD(work_list); | |
1619 | int intr = 0, i; | |
1620 | ||
1621 | sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev)); | |
1622 | ||
e77a0a5c | 1623 | while (get_t_state(thi) == RUNNING) { |
bc31fe33 | 1624 | drbd_thread_current_set_cpu(mdev, thi); |
b411b363 | 1625 | |
e42325a5 PR |
1626 | if (down_trylock(&mdev->tconn->data.work.s)) { |
1627 | mutex_lock(&mdev->tconn->data.mutex); | |
1628 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) | |
1629 | drbd_tcp_uncork(mdev->tconn->data.socket); | |
1630 | mutex_unlock(&mdev->tconn->data.mutex); | |
b411b363 | 1631 | |
e42325a5 | 1632 | intr = down_interruptible(&mdev->tconn->data.work.s); |
b411b363 | 1633 | |
e42325a5 PR |
1634 | mutex_lock(&mdev->tconn->data.mutex); |
1635 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) | |
1636 | drbd_tcp_cork(mdev->tconn->data.socket); | |
1637 | mutex_unlock(&mdev->tconn->data.mutex); | |
b411b363 PR |
1638 | } |
1639 | ||
1640 | if (intr) { | |
1641 | D_ASSERT(intr == -EINTR); | |
1642 | flush_signals(current); | |
841ce241 | 1643 | if (!expect(get_t_state(thi) != RUNNING)) |
b411b363 PR |
1644 | continue; |
1645 | break; | |
1646 | } | |
1647 | ||
e77a0a5c | 1648 | if (get_t_state(thi) != RUNNING) |
b411b363 PR |
1649 | break; |
1650 | /* With this break, we have done a down() but not consumed | |
1651 | the entry from the list. The cleanup code takes care of | |
1652 | this... */ | |
1653 | ||
1654 | w = NULL; | |
e42325a5 PR |
1655 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
1656 | if (!expect(!list_empty(&mdev->tconn->data.work.q))) { | |
b411b363 PR |
1657 | /* something terribly wrong in our logic. |
1658 | * we were able to down() the semaphore, | |
1659 | * but the list is empty... doh. | |
1660 | * | |
1661 | * what is the best thing to do now? | |
1662 | * try again from scratch, restarting the receiver, | |
1663 | * asender, whatnot? could break even more ugly, | |
1664 | * e.g. when we are primary, but no good local data. | |
1665 | * | |
1666 | * I'll try to get away just starting over this loop. | |
1667 | */ | |
e42325a5 | 1668 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1669 | continue; |
1670 | } | |
e42325a5 | 1671 | w = list_entry(mdev->tconn->data.work.q.next, struct drbd_work, list); |
b411b363 | 1672 | list_del_init(&w->list); |
e42325a5 | 1673 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1674 | |
1675 | if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) { | |
1676 | /* dev_warn(DEV, "worker: a callback failed! \n"); */ | |
1677 | if (mdev->state.conn >= C_CONNECTED) | |
1678 | drbd_force_state(mdev, | |
1679 | NS(conn, C_NETWORK_FAILURE)); | |
1680 | } | |
1681 | } | |
1682 | D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags)); | |
1683 | D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags)); | |
1684 | ||
e42325a5 | 1685 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 | 1686 | i = 0; |
e42325a5 PR |
1687 | while (!list_empty(&mdev->tconn->data.work.q)) { |
1688 | list_splice_init(&mdev->tconn->data.work.q, &work_list); | |
1689 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); | |
b411b363 PR |
1690 | |
1691 | while (!list_empty(&work_list)) { | |
1692 | w = list_entry(work_list.next, struct drbd_work, list); | |
1693 | list_del_init(&w->list); | |
1694 | w->cb(mdev, w, 1); | |
1695 | i++; /* dead debugging code */ | |
1696 | } | |
1697 | ||
e42325a5 | 1698 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 | 1699 | } |
e42325a5 | 1700 | sema_init(&mdev->tconn->data.work.s, 0); |
b411b363 PR |
1701 | /* DANGEROUS race: if someone did queue his work within the spinlock, |
1702 | * but up() ed outside the spinlock, we could get an up() on the | |
1703 | * semaphore without corresponding list entry. | |
1704 | * So don't do that. | |
1705 | */ | |
e42325a5 | 1706 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1707 | |
1708 | D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); | |
1709 | /* _drbd_set_state only uses stop_nowait. | |
e6b3ea83 PR |
1710 | * wait here for the exiting receiver. */ |
1711 | drbd_thread_stop(&mdev->tconn->receiver); | |
b411b363 PR |
1712 | drbd_mdev_cleanup(mdev); |
1713 | ||
1714 | dev_info(DEV, "worker terminated\n"); | |
1715 | ||
1716 | clear_bit(DEVICE_DYING, &mdev->flags); | |
1717 | clear_bit(CONFIG_PENDING, &mdev->flags); | |
1718 | wake_up(&mdev->state_wait); | |
1719 | ||
1720 | return 0; | |
1721 | } |