]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_worker.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 | 26 | #include <linux/module.h> |
b411b363 PR |
27 | #include <linux/drbd.h> |
28 | #include <linux/sched.h> | |
b411b363 PR |
29 | #include <linux/wait.h> |
30 | #include <linux/mm.h> | |
31 | #include <linux/memcontrol.h> | |
32 | #include <linux/mm_inline.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/random.h> | |
b411b363 PR |
35 | #include <linux/string.h> |
36 | #include <linux/scatterlist.h> | |
37 | ||
38 | #include "drbd_int.h" | |
a3603a6e | 39 | #include "drbd_protocol.h" |
b411b363 | 40 | #include "drbd_req.h" |
b411b363 | 41 | |
00d56944 | 42 | static int w_make_ov_request(struct drbd_work *w, int cancel); |
b411b363 PR |
43 | |
44 | ||
c5a91619 AG |
45 | /* endio handlers: |
46 | * drbd_md_io_complete (defined here) | |
fcefa62e AG |
47 | * drbd_request_endio (defined here) |
48 | * drbd_peer_request_endio (defined here) | |
c5a91619 AG |
49 | * bm_async_io_complete (defined in drbd_bitmap.c) |
50 | * | |
b411b363 PR |
51 | * For all these callbacks, note the following: |
52 | * The callbacks will be called in irq context by the IDE drivers, | |
53 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. | |
54 | * Try to get the locking right :) | |
55 | * | |
56 | */ | |
57 | ||
58 | ||
59 | /* About the global_state_lock | |
60 | Each state transition on an device holds a read lock. In case we have | |
95f8efd0 | 61 | to evaluate the resync after dependencies, we grab a write lock, because |
b411b363 PR |
62 | we need stable states on all devices for that. */ |
63 | rwlock_t global_state_lock; | |
64 | ||
65 | /* used for synchronous meta data and bitmap IO | |
66 | * submitted by drbd_md_sync_page_io() | |
67 | */ | |
68 | void drbd_md_io_complete(struct bio *bio, int error) | |
69 | { | |
70 | struct drbd_md_io *md_io; | |
b30ab791 | 71 | struct drbd_device *device; |
b411b363 PR |
72 | |
73 | md_io = (struct drbd_md_io *)bio->bi_private; | |
b30ab791 | 74 | device = container_of(md_io, struct drbd_device, md_io); |
cdfda633 | 75 | |
b411b363 PR |
76 | md_io->error = error; |
77 | ||
0cfac5dd PR |
78 | /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able |
79 | * to timeout on the lower level device, and eventually detach from it. | |
80 | * If this io completion runs after that timeout expired, this | |
81 | * drbd_md_put_buffer() may allow us to finally try and re-attach. | |
82 | * During normal operation, this only puts that extra reference | |
83 | * down to 1 again. | |
84 | * Make sure we first drop the reference, and only then signal | |
85 | * completion, or we may (in drbd_al_read_log()) cycle so fast into the | |
86 | * next drbd_md_sync_page_io(), that we trigger the | |
b30ab791 | 87 | * ASSERT(atomic_read(&device->md_io_in_use) == 1) there. |
0cfac5dd | 88 | */ |
b30ab791 | 89 | drbd_md_put_buffer(device); |
cdfda633 | 90 | md_io->done = 1; |
b30ab791 | 91 | wake_up(&device->misc_wait); |
cdfda633 | 92 | bio_put(bio); |
b30ab791 AG |
93 | if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */ |
94 | put_ldev(device); | |
b411b363 PR |
95 | } |
96 | ||
97 | /* reads on behalf of the partner, | |
98 | * "submitted" by the receiver | |
99 | */ | |
a186e478 | 100 | static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
101 | { |
102 | unsigned long flags = 0; | |
b30ab791 | 103 | struct drbd_device *device = peer_req->w.device; |
b411b363 | 104 | |
a6b32bc3 | 105 | spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); |
b30ab791 | 106 | device->read_cnt += peer_req->i.size >> 9; |
db830c46 | 107 | list_del(&peer_req->w.list); |
b30ab791 AG |
108 | if (list_empty(&device->read_ee)) |
109 | wake_up(&device->ee_wait); | |
db830c46 | 110 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
b30ab791 | 111 | __drbd_chk_io_error(device, DRBD_READ_ERROR); |
a6b32bc3 | 112 | spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); |
b411b363 | 113 | |
a6b32bc3 | 114 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w); |
b30ab791 | 115 | put_ldev(device); |
b411b363 PR |
116 | } |
117 | ||
118 | /* writes on behalf of the partner, or resync writes, | |
45bb912b | 119 | * "submitted" by the receiver, final stage. */ |
db830c46 | 120 | static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
121 | { |
122 | unsigned long flags = 0; | |
b30ab791 | 123 | struct drbd_device *device = peer_req->w.device; |
181286ad | 124 | struct drbd_interval i; |
b411b363 | 125 | int do_wake; |
579b57ed | 126 | u64 block_id; |
b411b363 | 127 | int do_al_complete_io; |
b411b363 | 128 | |
db830c46 | 129 | /* after we moved peer_req to done_ee, |
b411b363 PR |
130 | * we may no longer access it, |
131 | * it may be freed/reused already! | |
132 | * (as soon as we release the req_lock) */ | |
181286ad | 133 | i = peer_req->i; |
db830c46 AG |
134 | do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; |
135 | block_id = peer_req->block_id; | |
b411b363 | 136 | |
a6b32bc3 | 137 | spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); |
b30ab791 AG |
138 | device->writ_cnt += peer_req->i.size >> 9; |
139 | list_move_tail(&peer_req->w.list, &device->done_ee); | |
b411b363 | 140 | |
bb3bfe96 | 141 | /* |
5e472264 | 142 | * Do not remove from the write_requests tree here: we did not send the |
bb3bfe96 AG |
143 | * Ack yet and did not wake possibly waiting conflicting requests. |
144 | * Removed from the tree from "drbd_process_done_ee" within the | |
145 | * appropriate w.cb (e_end_block/e_end_resync_block) or from | |
146 | * _drbd_clear_done_ee. | |
147 | */ | |
b411b363 | 148 | |
b30ab791 | 149 | do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); |
b411b363 | 150 | |
db830c46 | 151 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
b30ab791 | 152 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); |
a6b32bc3 | 153 | spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); |
b411b363 | 154 | |
579b57ed | 155 | if (block_id == ID_SYNCER) |
b30ab791 | 156 | drbd_rs_complete_io(device, i.sector); |
b411b363 PR |
157 | |
158 | if (do_wake) | |
b30ab791 | 159 | wake_up(&device->ee_wait); |
b411b363 PR |
160 | |
161 | if (do_al_complete_io) | |
b30ab791 | 162 | drbd_al_complete_io(device, &i); |
b411b363 | 163 | |
a6b32bc3 | 164 | wake_asender(first_peer_device(device)->connection); |
b30ab791 | 165 | put_ldev(device); |
45bb912b | 166 | } |
b411b363 | 167 | |
45bb912b LE |
168 | /* writes on behalf of the partner, or resync writes, |
169 | * "submitted" by the receiver. | |
170 | */ | |
fcefa62e | 171 | void drbd_peer_request_endio(struct bio *bio, int error) |
45bb912b | 172 | { |
db830c46 | 173 | struct drbd_peer_request *peer_req = bio->bi_private; |
b30ab791 | 174 | struct drbd_device *device = peer_req->w.device; |
45bb912b LE |
175 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
176 | int is_write = bio_data_dir(bio) == WRITE; | |
177 | ||
07194272 | 178 | if (error && __ratelimit(&drbd_ratelimit_state)) |
45bb912b LE |
179 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
180 | is_write ? "write" : "read", error, | |
db830c46 | 181 | (unsigned long long)peer_req->i.sector); |
45bb912b | 182 | if (!error && !uptodate) { |
07194272 LE |
183 | if (__ratelimit(&drbd_ratelimit_state)) |
184 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | |
185 | is_write ? "write" : "read", | |
db830c46 | 186 | (unsigned long long)peer_req->i.sector); |
45bb912b LE |
187 | /* strange behavior of some lower level drivers... |
188 | * fail the request by clearing the uptodate flag, | |
189 | * but do not return any error?! */ | |
190 | error = -EIO; | |
191 | } | |
192 | ||
193 | if (error) | |
db830c46 | 194 | set_bit(__EE_WAS_ERROR, &peer_req->flags); |
45bb912b LE |
195 | |
196 | bio_put(bio); /* no need for the bio anymore */ | |
db830c46 | 197 | if (atomic_dec_and_test(&peer_req->pending_bios)) { |
45bb912b | 198 | if (is_write) |
db830c46 | 199 | drbd_endio_write_sec_final(peer_req); |
45bb912b | 200 | else |
db830c46 | 201 | drbd_endio_read_sec_final(peer_req); |
45bb912b | 202 | } |
b411b363 PR |
203 | } |
204 | ||
205 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request | |
206 | */ | |
fcefa62e | 207 | void drbd_request_endio(struct bio *bio, int error) |
b411b363 | 208 | { |
a115413d | 209 | unsigned long flags; |
b411b363 | 210 | struct drbd_request *req = bio->bi_private; |
b30ab791 | 211 | struct drbd_device *device = req->w.device; |
a115413d | 212 | struct bio_and_error m; |
b411b363 PR |
213 | enum drbd_req_event what; |
214 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | |
215 | ||
b411b363 PR |
216 | if (!error && !uptodate) { |
217 | dev_warn(DEV, "p %s: setting error to -EIO\n", | |
218 | bio_data_dir(bio) == WRITE ? "write" : "read"); | |
219 | /* strange behavior of some lower level drivers... | |
220 | * fail the request by clearing the uptodate flag, | |
221 | * but do not return any error?! */ | |
222 | error = -EIO; | |
223 | } | |
224 | ||
1b6dd252 PR |
225 | |
226 | /* If this request was aborted locally before, | |
227 | * but now was completed "successfully", | |
228 | * chances are that this caused arbitrary data corruption. | |
229 | * | |
230 | * "aborting" requests, or force-detaching the disk, is intended for | |
231 | * completely blocked/hung local backing devices which do no longer | |
232 | * complete requests at all, not even do error completions. In this | |
233 | * situation, usually a hard-reset and failover is the only way out. | |
234 | * | |
235 | * By "aborting", basically faking a local error-completion, | |
236 | * we allow for a more graceful swichover by cleanly migrating services. | |
237 | * Still the affected node has to be rebooted "soon". | |
238 | * | |
239 | * By completing these requests, we allow the upper layers to re-use | |
240 | * the associated data pages. | |
241 | * | |
242 | * If later the local backing device "recovers", and now DMAs some data | |
243 | * from disk into the original request pages, in the best case it will | |
244 | * just put random data into unused pages; but typically it will corrupt | |
245 | * meanwhile completely unrelated data, causing all sorts of damage. | |
246 | * | |
247 | * Which means delayed successful completion, | |
248 | * especially for READ requests, | |
249 | * is a reason to panic(). | |
250 | * | |
251 | * We assume that a delayed *error* completion is OK, | |
252 | * though we still will complain noisily about it. | |
253 | */ | |
254 | if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) { | |
255 | if (__ratelimit(&drbd_ratelimit_state)) | |
256 | dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); | |
257 | ||
258 | if (!error) | |
259 | panic("possible random memory corruption caused by delayed completion of aborted local request\n"); | |
260 | } | |
261 | ||
b411b363 PR |
262 | /* to avoid recursion in __req_mod */ |
263 | if (unlikely(error)) { | |
264 | what = (bio_data_dir(bio) == WRITE) | |
8554df1c | 265 | ? WRITE_COMPLETED_WITH_ERROR |
5c3c7e64 | 266 | : (bio_rw(bio) == READ) |
8554df1c AG |
267 | ? READ_COMPLETED_WITH_ERROR |
268 | : READ_AHEAD_COMPLETED_WITH_ERROR; | |
b411b363 | 269 | } else |
8554df1c | 270 | what = COMPLETED_OK; |
b411b363 PR |
271 | |
272 | bio_put(req->private_bio); | |
273 | req->private_bio = ERR_PTR(error); | |
274 | ||
a115413d | 275 | /* not req_mod(), we need irqsave here! */ |
a6b32bc3 | 276 | spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); |
a115413d | 277 | __req_mod(req, what, &m); |
a6b32bc3 | 278 | spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); |
b30ab791 | 279 | put_ldev(device); |
a115413d LE |
280 | |
281 | if (m.bio) | |
b30ab791 | 282 | complete_master_bio(device, &m); |
b411b363 PR |
283 | } |
284 | ||
b30ab791 | 285 | void drbd_csum_ee(struct drbd_device *device, struct crypto_hash *tfm, |
db830c46 | 286 | struct drbd_peer_request *peer_req, void *digest) |
45bb912b LE |
287 | { |
288 | struct hash_desc desc; | |
289 | struct scatterlist sg; | |
db830c46 | 290 | struct page *page = peer_req->pages; |
45bb912b LE |
291 | struct page *tmp; |
292 | unsigned len; | |
293 | ||
294 | desc.tfm = tfm; | |
295 | desc.flags = 0; | |
296 | ||
297 | sg_init_table(&sg, 1); | |
298 | crypto_hash_init(&desc); | |
299 | ||
300 | while ((tmp = page_chain_next(page))) { | |
301 | /* all but the last page will be fully used */ | |
302 | sg_set_page(&sg, page, PAGE_SIZE, 0); | |
303 | crypto_hash_update(&desc, &sg, sg.length); | |
304 | page = tmp; | |
305 | } | |
306 | /* and now the last, possibly only partially used page */ | |
db830c46 | 307 | len = peer_req->i.size & (PAGE_SIZE - 1); |
45bb912b LE |
308 | sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); |
309 | crypto_hash_update(&desc, &sg, sg.length); | |
310 | crypto_hash_final(&desc, digest); | |
311 | } | |
312 | ||
b30ab791 | 313 | void drbd_csum_bio(struct drbd_device *device, struct crypto_hash *tfm, struct bio *bio, void *digest) |
b411b363 PR |
314 | { |
315 | struct hash_desc desc; | |
316 | struct scatterlist sg; | |
7988613b KO |
317 | struct bio_vec bvec; |
318 | struct bvec_iter iter; | |
b411b363 PR |
319 | |
320 | desc.tfm = tfm; | |
321 | desc.flags = 0; | |
322 | ||
323 | sg_init_table(&sg, 1); | |
324 | crypto_hash_init(&desc); | |
325 | ||
7988613b KO |
326 | bio_for_each_segment(bvec, bio, iter) { |
327 | sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
b411b363 PR |
328 | crypto_hash_update(&desc, &sg, sg.length); |
329 | } | |
330 | crypto_hash_final(&desc, digest); | |
331 | } | |
332 | ||
9676c760 | 333 | /* MAYBE merge common code with w_e_end_ov_req */ |
99920dc5 | 334 | static int w_e_send_csum(struct drbd_work *w, int cancel) |
b411b363 | 335 | { |
00d56944 | 336 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 337 | struct drbd_device *device = w->device; |
b411b363 PR |
338 | int digest_size; |
339 | void *digest; | |
99920dc5 | 340 | int err = 0; |
b411b363 | 341 | |
53ea4331 LE |
342 | if (unlikely(cancel)) |
343 | goto out; | |
b411b363 | 344 | |
9676c760 | 345 | if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) |
53ea4331 | 346 | goto out; |
b411b363 | 347 | |
a6b32bc3 | 348 | digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm); |
53ea4331 LE |
349 | digest = kmalloc(digest_size, GFP_NOIO); |
350 | if (digest) { | |
db830c46 AG |
351 | sector_t sector = peer_req->i.sector; |
352 | unsigned int size = peer_req->i.size; | |
a6b32bc3 | 353 | drbd_csum_ee(device, first_peer_device(device)->connection->csums_tfm, peer_req, digest); |
9676c760 | 354 | /* Free peer_req and pages before send. |
53ea4331 LE |
355 | * In case we block on congestion, we could otherwise run into |
356 | * some distributed deadlock, if the other side blocks on | |
357 | * congestion as well, because our receiver blocks in | |
c37c8ecf | 358 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
b30ab791 | 359 | drbd_free_peer_req(device, peer_req); |
db830c46 | 360 | peer_req = NULL; |
b30ab791 AG |
361 | inc_rs_pending(device); |
362 | err = drbd_send_drequest_csum(device, sector, size, | |
db1b0b72 AG |
363 | digest, digest_size, |
364 | P_CSUM_RS_REQUEST); | |
53ea4331 LE |
365 | kfree(digest); |
366 | } else { | |
367 | dev_err(DEV, "kmalloc() of digest failed.\n"); | |
99920dc5 | 368 | err = -ENOMEM; |
53ea4331 | 369 | } |
b411b363 | 370 | |
53ea4331 | 371 | out: |
db830c46 | 372 | if (peer_req) |
b30ab791 | 373 | drbd_free_peer_req(device, peer_req); |
b411b363 | 374 | |
99920dc5 | 375 | if (unlikely(err)) |
b411b363 | 376 | dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); |
99920dc5 | 377 | return err; |
b411b363 PR |
378 | } |
379 | ||
380 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | |
381 | ||
b30ab791 | 382 | static int read_for_csum(struct drbd_device *device, sector_t sector, int size) |
b411b363 | 383 | { |
db830c46 | 384 | struct drbd_peer_request *peer_req; |
b411b363 | 385 | |
b30ab791 | 386 | if (!get_ldev(device)) |
80a40e43 | 387 | return -EIO; |
b411b363 | 388 | |
b30ab791 | 389 | if (drbd_rs_should_slow_down(device, sector)) |
0f0601f4 LE |
390 | goto defer; |
391 | ||
b411b363 PR |
392 | /* GFP_TRY, because if there is no memory available right now, this may |
393 | * be rescheduled for later. It is "only" background resync, after all. */ | |
b30ab791 | 394 | peer_req = drbd_alloc_peer_req(device, ID_SYNCER /* unused */, sector, |
0db55363 | 395 | size, GFP_TRY); |
db830c46 | 396 | if (!peer_req) |
80a40e43 | 397 | goto defer; |
b411b363 | 398 | |
db830c46 | 399 | peer_req->w.cb = w_e_send_csum; |
a6b32bc3 | 400 | spin_lock_irq(&first_peer_device(device)->connection->req_lock); |
b30ab791 | 401 | list_add(&peer_req->w.list, &device->read_ee); |
a6b32bc3 | 402 | spin_unlock_irq(&first_peer_device(device)->connection->req_lock); |
b411b363 | 403 | |
b30ab791 AG |
404 | atomic_add(size >> 9, &device->rs_sect_ev); |
405 | if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) | |
80a40e43 | 406 | return 0; |
b411b363 | 407 | |
10f6d992 LE |
408 | /* If it failed because of ENOMEM, retry should help. If it failed |
409 | * because bio_add_page failed (probably broken lower level driver), | |
410 | * retry may or may not help. | |
411 | * If it does not, you may need to force disconnect. */ | |
a6b32bc3 | 412 | spin_lock_irq(&first_peer_device(device)->connection->req_lock); |
db830c46 | 413 | list_del(&peer_req->w.list); |
a6b32bc3 | 414 | spin_unlock_irq(&first_peer_device(device)->connection->req_lock); |
22cc37a9 | 415 | |
b30ab791 | 416 | drbd_free_peer_req(device, peer_req); |
80a40e43 | 417 | defer: |
b30ab791 | 418 | put_ldev(device); |
80a40e43 | 419 | return -EAGAIN; |
b411b363 PR |
420 | } |
421 | ||
99920dc5 | 422 | int w_resync_timer(struct drbd_work *w, int cancel) |
b411b363 | 423 | { |
b30ab791 AG |
424 | struct drbd_device *device = w->device; |
425 | switch (device->state.conn) { | |
63106d3c | 426 | case C_VERIFY_S: |
00d56944 | 427 | w_make_ov_request(w, cancel); |
63106d3c PR |
428 | break; |
429 | case C_SYNC_TARGET: | |
00d56944 | 430 | w_make_resync_request(w, cancel); |
63106d3c | 431 | break; |
b411b363 PR |
432 | } |
433 | ||
99920dc5 | 434 | return 0; |
794abb75 PR |
435 | } |
436 | ||
437 | void resync_timer_fn(unsigned long data) | |
438 | { | |
b30ab791 | 439 | struct drbd_device *device = (struct drbd_device *) data; |
794abb75 | 440 | |
b30ab791 | 441 | if (list_empty(&device->resync_work.list)) |
a6b32bc3 | 442 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->resync_work); |
b411b363 PR |
443 | } |
444 | ||
778f271d PR |
445 | static void fifo_set(struct fifo_buffer *fb, int value) |
446 | { | |
447 | int i; | |
448 | ||
449 | for (i = 0; i < fb->size; i++) | |
f10f2623 | 450 | fb->values[i] = value; |
778f271d PR |
451 | } |
452 | ||
453 | static int fifo_push(struct fifo_buffer *fb, int value) | |
454 | { | |
455 | int ov; | |
456 | ||
457 | ov = fb->values[fb->head_index]; | |
458 | fb->values[fb->head_index++] = value; | |
459 | ||
460 | if (fb->head_index >= fb->size) | |
461 | fb->head_index = 0; | |
462 | ||
463 | return ov; | |
464 | } | |
465 | ||
466 | static void fifo_add_val(struct fifo_buffer *fb, int value) | |
467 | { | |
468 | int i; | |
469 | ||
470 | for (i = 0; i < fb->size; i++) | |
471 | fb->values[i] += value; | |
472 | } | |
473 | ||
9958c857 PR |
474 | struct fifo_buffer *fifo_alloc(int fifo_size) |
475 | { | |
476 | struct fifo_buffer *fb; | |
477 | ||
8747d30a | 478 | fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO); |
9958c857 PR |
479 | if (!fb) |
480 | return NULL; | |
481 | ||
482 | fb->head_index = 0; | |
483 | fb->size = fifo_size; | |
484 | fb->total = 0; | |
485 | ||
486 | return fb; | |
487 | } | |
488 | ||
b30ab791 | 489 | static int drbd_rs_controller(struct drbd_device *device) |
778f271d | 490 | { |
daeda1cc | 491 | struct disk_conf *dc; |
778f271d PR |
492 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ |
493 | unsigned int want; /* The number of sectors we want in the proxy */ | |
494 | int req_sect; /* Number of sectors to request in this turn */ | |
495 | int correction; /* Number of sectors more we need in the proxy*/ | |
496 | int cps; /* correction per invocation of drbd_rs_controller() */ | |
497 | int steps; /* Number of time steps to plan ahead */ | |
498 | int curr_corr; | |
499 | int max_sect; | |
813472ce | 500 | struct fifo_buffer *plan; |
778f271d | 501 | |
b30ab791 AG |
502 | sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */ |
503 | device->rs_in_flight -= sect_in; | |
778f271d | 504 | |
b30ab791 AG |
505 | dc = rcu_dereference(device->ldev->disk_conf); |
506 | plan = rcu_dereference(device->rs_plan_s); | |
778f271d | 507 | |
813472ce | 508 | steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ |
778f271d | 509 | |
b30ab791 | 510 | if (device->rs_in_flight + sect_in == 0) { /* At start of resync */ |
daeda1cc | 511 | want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps; |
778f271d | 512 | } else { /* normal path */ |
daeda1cc PR |
513 | want = dc->c_fill_target ? dc->c_fill_target : |
514 | sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10); | |
778f271d PR |
515 | } |
516 | ||
b30ab791 | 517 | correction = want - device->rs_in_flight - plan->total; |
778f271d PR |
518 | |
519 | /* Plan ahead */ | |
520 | cps = correction / steps; | |
813472ce PR |
521 | fifo_add_val(plan, cps); |
522 | plan->total += cps * steps; | |
778f271d PR |
523 | |
524 | /* What we do in this step */ | |
813472ce PR |
525 | curr_corr = fifo_push(plan, 0); |
526 | plan->total -= curr_corr; | |
778f271d PR |
527 | |
528 | req_sect = sect_in + curr_corr; | |
529 | if (req_sect < 0) | |
530 | req_sect = 0; | |
531 | ||
daeda1cc | 532 | max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ; |
778f271d PR |
533 | if (req_sect > max_sect) |
534 | req_sect = max_sect; | |
535 | ||
536 | /* | |
537 | dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", | |
b30ab791 AG |
538 | sect_in, device->rs_in_flight, want, correction, |
539 | steps, cps, device->rs_planed, curr_corr, req_sect); | |
778f271d PR |
540 | */ |
541 | ||
542 | return req_sect; | |
543 | } | |
544 | ||
b30ab791 | 545 | static int drbd_rs_number_requests(struct drbd_device *device) |
e65f440d LE |
546 | { |
547 | int number; | |
813472ce PR |
548 | |
549 | rcu_read_lock(); | |
b30ab791 AG |
550 | if (rcu_dereference(device->rs_plan_s)->size) { |
551 | number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9); | |
552 | device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | |
e65f440d | 553 | } else { |
b30ab791 AG |
554 | device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; |
555 | number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | |
e65f440d | 556 | } |
813472ce | 557 | rcu_read_unlock(); |
e65f440d | 558 | |
e65f440d LE |
559 | /* ignore the amount of pending requests, the resync controller should |
560 | * throttle down to incoming reply rate soon enough anyways. */ | |
561 | return number; | |
562 | } | |
563 | ||
99920dc5 | 564 | int w_make_resync_request(struct drbd_work *w, int cancel) |
b411b363 | 565 | { |
b30ab791 | 566 | struct drbd_device *device = w->device; |
b411b363 PR |
567 | unsigned long bit; |
568 | sector_t sector; | |
b30ab791 | 569 | const sector_t capacity = drbd_get_capacity(device->this_bdev); |
1816a2b4 | 570 | int max_bio_size; |
e65f440d | 571 | int number, rollback_i, size; |
b411b363 | 572 | int align, queued, sndbuf; |
0f0601f4 | 573 | int i = 0; |
b411b363 PR |
574 | |
575 | if (unlikely(cancel)) | |
99920dc5 | 576 | return 0; |
b411b363 | 577 | |
b30ab791 | 578 | if (device->rs_total == 0) { |
af85e8e8 | 579 | /* empty resync? */ |
b30ab791 | 580 | drbd_resync_finished(device); |
99920dc5 | 581 | return 0; |
af85e8e8 LE |
582 | } |
583 | ||
b30ab791 AG |
584 | if (!get_ldev(device)) { |
585 | /* Since we only need to access device->rsync a | |
586 | get_ldev_if_state(device,D_FAILED) would be sufficient, but | |
b411b363 PR |
587 | to continue resync with a broken disk makes no sense at |
588 | all */ | |
589 | dev_err(DEV, "Disk broke down during resync!\n"); | |
99920dc5 | 590 | return 0; |
b411b363 PR |
591 | } |
592 | ||
b30ab791 AG |
593 | max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; |
594 | number = drbd_rs_number_requests(device); | |
e65f440d | 595 | if (number == 0) |
0f0601f4 | 596 | goto requeue; |
b411b363 | 597 | |
b411b363 PR |
598 | for (i = 0; i < number; i++) { |
599 | /* Stop generating RS requests, when half of the send buffer is filled */ | |
a6b32bc3 AG |
600 | mutex_lock(&first_peer_device(device)->connection->data.mutex); |
601 | if (first_peer_device(device)->connection->data.socket) { | |
602 | queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued; | |
603 | sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf; | |
b411b363 PR |
604 | } else { |
605 | queued = 1; | |
606 | sndbuf = 0; | |
607 | } | |
a6b32bc3 | 608 | mutex_unlock(&first_peer_device(device)->connection->data.mutex); |
b411b363 PR |
609 | if (queued > sndbuf / 2) |
610 | goto requeue; | |
611 | ||
612 | next_sector: | |
613 | size = BM_BLOCK_SIZE; | |
b30ab791 | 614 | bit = drbd_bm_find_next(device, device->bm_resync_fo); |
b411b363 | 615 | |
4b0715f0 | 616 | if (bit == DRBD_END_OF_BITMAP) { |
b30ab791 AG |
617 | device->bm_resync_fo = drbd_bm_bits(device); |
618 | put_ldev(device); | |
99920dc5 | 619 | return 0; |
b411b363 PR |
620 | } |
621 | ||
622 | sector = BM_BIT_TO_SECT(bit); | |
623 | ||
b30ab791 AG |
624 | if (drbd_rs_should_slow_down(device, sector) || |
625 | drbd_try_rs_begin_io(device, sector)) { | |
626 | device->bm_resync_fo = bit; | |
b411b363 PR |
627 | goto requeue; |
628 | } | |
b30ab791 | 629 | device->bm_resync_fo = bit + 1; |
b411b363 | 630 | |
b30ab791 AG |
631 | if (unlikely(drbd_bm_test_bit(device, bit) == 0)) { |
632 | drbd_rs_complete_io(device, sector); | |
b411b363 PR |
633 | goto next_sector; |
634 | } | |
635 | ||
1816a2b4 | 636 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
b411b363 PR |
637 | /* try to find some adjacent bits. |
638 | * we stop if we have already the maximum req size. | |
639 | * | |
640 | * Additionally always align bigger requests, in order to | |
641 | * be prepared for all stripe sizes of software RAIDs. | |
b411b363 PR |
642 | */ |
643 | align = 1; | |
d207450c | 644 | rollback_i = i; |
b411b363 | 645 | for (;;) { |
1816a2b4 | 646 | if (size + BM_BLOCK_SIZE > max_bio_size) |
b411b363 PR |
647 | break; |
648 | ||
649 | /* Be always aligned */ | |
650 | if (sector & ((1<<(align+3))-1)) | |
651 | break; | |
652 | ||
653 | /* do not cross extent boundaries */ | |
654 | if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0) | |
655 | break; | |
656 | /* now, is it actually dirty, after all? | |
657 | * caution, drbd_bm_test_bit is tri-state for some | |
658 | * obscure reason; ( b == 0 ) would get the out-of-band | |
659 | * only accidentally right because of the "oddly sized" | |
660 | * adjustment below */ | |
b30ab791 | 661 | if (drbd_bm_test_bit(device, bit+1) != 1) |
b411b363 PR |
662 | break; |
663 | bit++; | |
664 | size += BM_BLOCK_SIZE; | |
665 | if ((BM_BLOCK_SIZE << align) <= size) | |
666 | align++; | |
667 | i++; | |
668 | } | |
669 | /* if we merged some, | |
670 | * reset the offset to start the next drbd_bm_find_next from */ | |
671 | if (size > BM_BLOCK_SIZE) | |
b30ab791 | 672 | device->bm_resync_fo = bit + 1; |
b411b363 PR |
673 | #endif |
674 | ||
675 | /* adjust very last sectors, in case we are oddly sized */ | |
676 | if (sector + (size>>9) > capacity) | |
677 | size = (capacity-sector)<<9; | |
a6b32bc3 AG |
678 | if (first_peer_device(device)->connection->agreed_pro_version >= 89 && |
679 | first_peer_device(device)->connection->csums_tfm) { | |
b30ab791 | 680 | switch (read_for_csum(device, sector, size)) { |
80a40e43 | 681 | case -EIO: /* Disk failure */ |
b30ab791 | 682 | put_ldev(device); |
99920dc5 | 683 | return -EIO; |
80a40e43 | 684 | case -EAGAIN: /* allocation failed, or ldev busy */ |
b30ab791 AG |
685 | drbd_rs_complete_io(device, sector); |
686 | device->bm_resync_fo = BM_SECT_TO_BIT(sector); | |
d207450c | 687 | i = rollback_i; |
b411b363 | 688 | goto requeue; |
80a40e43 LE |
689 | case 0: |
690 | /* everything ok */ | |
691 | break; | |
692 | default: | |
693 | BUG(); | |
b411b363 PR |
694 | } |
695 | } else { | |
99920dc5 AG |
696 | int err; |
697 | ||
b30ab791 AG |
698 | inc_rs_pending(device); |
699 | err = drbd_send_drequest(device, P_RS_DATA_REQUEST, | |
99920dc5 AG |
700 | sector, size, ID_SYNCER); |
701 | if (err) { | |
b411b363 | 702 | dev_err(DEV, "drbd_send_drequest() failed, aborting...\n"); |
b30ab791 AG |
703 | dec_rs_pending(device); |
704 | put_ldev(device); | |
99920dc5 | 705 | return err; |
b411b363 PR |
706 | } |
707 | } | |
708 | } | |
709 | ||
b30ab791 | 710 | if (device->bm_resync_fo >= drbd_bm_bits(device)) { |
b411b363 PR |
711 | /* last syncer _request_ was sent, |
712 | * but the P_RS_DATA_REPLY not yet received. sync will end (and | |
713 | * next sync group will resume), as soon as we receive the last | |
714 | * resync data block, and the last bit is cleared. | |
715 | * until then resync "work" is "inactive" ... | |
716 | */ | |
b30ab791 | 717 | put_ldev(device); |
99920dc5 | 718 | return 0; |
b411b363 PR |
719 | } |
720 | ||
721 | requeue: | |
b30ab791 AG |
722 | device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
723 | mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); | |
724 | put_ldev(device); | |
99920dc5 | 725 | return 0; |
b411b363 PR |
726 | } |
727 | ||
00d56944 | 728 | static int w_make_ov_request(struct drbd_work *w, int cancel) |
b411b363 | 729 | { |
b30ab791 | 730 | struct drbd_device *device = w->device; |
b411b363 PR |
731 | int number, i, size; |
732 | sector_t sector; | |
b30ab791 | 733 | const sector_t capacity = drbd_get_capacity(device->this_bdev); |
58ffa580 | 734 | bool stop_sector_reached = false; |
b411b363 PR |
735 | |
736 | if (unlikely(cancel)) | |
737 | return 1; | |
738 | ||
b30ab791 | 739 | number = drbd_rs_number_requests(device); |
b411b363 | 740 | |
b30ab791 | 741 | sector = device->ov_position; |
b411b363 | 742 | for (i = 0; i < number; i++) { |
58ffa580 | 743 | if (sector >= capacity) |
b411b363 | 744 | return 1; |
58ffa580 LE |
745 | |
746 | /* We check for "finished" only in the reply path: | |
747 | * w_e_end_ov_reply(). | |
748 | * We need to send at least one request out. */ | |
749 | stop_sector_reached = i > 0 | |
b30ab791 AG |
750 | && verify_can_do_stop_sector(device) |
751 | && sector >= device->ov_stop_sector; | |
58ffa580 LE |
752 | if (stop_sector_reached) |
753 | break; | |
b411b363 PR |
754 | |
755 | size = BM_BLOCK_SIZE; | |
756 | ||
b30ab791 AG |
757 | if (drbd_rs_should_slow_down(device, sector) || |
758 | drbd_try_rs_begin_io(device, sector)) { | |
759 | device->ov_position = sector; | |
b411b363 PR |
760 | goto requeue; |
761 | } | |
762 | ||
763 | if (sector + (size>>9) > capacity) | |
764 | size = (capacity-sector)<<9; | |
765 | ||
b30ab791 AG |
766 | inc_rs_pending(device); |
767 | if (drbd_send_ov_request(device, sector, size)) { | |
768 | dec_rs_pending(device); | |
b411b363 PR |
769 | return 0; |
770 | } | |
771 | sector += BM_SECT_PER_BIT; | |
772 | } | |
b30ab791 | 773 | device->ov_position = sector; |
b411b363 PR |
774 | |
775 | requeue: | |
b30ab791 | 776 | device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
58ffa580 | 777 | if (i == 0 || !stop_sector_reached) |
b30ab791 | 778 | mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); |
b411b363 PR |
779 | return 1; |
780 | } | |
781 | ||
99920dc5 | 782 | int w_ov_finished(struct drbd_work *w, int cancel) |
b411b363 | 783 | { |
b30ab791 | 784 | struct drbd_device *device = w->device; |
b411b363 | 785 | kfree(w); |
b30ab791 AG |
786 | ov_out_of_sync_print(device); |
787 | drbd_resync_finished(device); | |
b411b363 | 788 | |
99920dc5 | 789 | return 0; |
b411b363 PR |
790 | } |
791 | ||
99920dc5 | 792 | static int w_resync_finished(struct drbd_work *w, int cancel) |
b411b363 | 793 | { |
b30ab791 | 794 | struct drbd_device *device = w->device; |
b411b363 PR |
795 | kfree(w); |
796 | ||
b30ab791 | 797 | drbd_resync_finished(device); |
b411b363 | 798 | |
99920dc5 | 799 | return 0; |
b411b363 PR |
800 | } |
801 | ||
b30ab791 | 802 | static void ping_peer(struct drbd_device *device) |
af85e8e8 | 803 | { |
a6b32bc3 | 804 | struct drbd_connection *connection = first_peer_device(device)->connection; |
2a67d8b9 | 805 | |
bde89a9e AG |
806 | clear_bit(GOT_PING_ACK, &connection->flags); |
807 | request_ping(connection); | |
808 | wait_event(connection->ping_wait, | |
809 | test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); | |
af85e8e8 LE |
810 | } |
811 | ||
b30ab791 | 812 | int drbd_resync_finished(struct drbd_device *device) |
b411b363 PR |
813 | { |
814 | unsigned long db, dt, dbdt; | |
815 | unsigned long n_oos; | |
816 | union drbd_state os, ns; | |
817 | struct drbd_work *w; | |
818 | char *khelper_cmd = NULL; | |
26525618 | 819 | int verify_done = 0; |
b411b363 PR |
820 | |
821 | /* Remove all elements from the resync LRU. Since future actions | |
822 | * might set bits in the (main) bitmap, then the entries in the | |
823 | * resync LRU would be wrong. */ | |
b30ab791 | 824 | if (drbd_rs_del_all(device)) { |
b411b363 PR |
825 | /* In case this is not possible now, most probably because |
826 | * there are P_RS_DATA_REPLY Packets lingering on the worker's | |
827 | * queue (or even the read operations for those packets | |
828 | * is not finished by now). Retry in 100ms. */ | |
829 | ||
20ee6390 | 830 | schedule_timeout_interruptible(HZ / 10); |
b411b363 PR |
831 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
832 | if (w) { | |
833 | w->cb = w_resync_finished; | |
b30ab791 | 834 | w->device = device; |
a6b32bc3 | 835 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, w); |
b411b363 PR |
836 | return 1; |
837 | } | |
838 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); | |
839 | } | |
840 | ||
b30ab791 | 841 | dt = (jiffies - device->rs_start - device->rs_paused) / HZ; |
b411b363 PR |
842 | if (dt <= 0) |
843 | dt = 1; | |
58ffa580 | 844 | |
b30ab791 | 845 | db = device->rs_total; |
58ffa580 | 846 | /* adjust for verify start and stop sectors, respective reached position */ |
b30ab791 AG |
847 | if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) |
848 | db -= device->ov_left; | |
58ffa580 | 849 | |
b411b363 | 850 | dbdt = Bit2KB(db/dt); |
b30ab791 | 851 | device->rs_paused /= HZ; |
b411b363 | 852 | |
b30ab791 | 853 | if (!get_ldev(device)) |
b411b363 PR |
854 | goto out; |
855 | ||
b30ab791 | 856 | ping_peer(device); |
af85e8e8 | 857 | |
a6b32bc3 | 858 | spin_lock_irq(&first_peer_device(device)->connection->req_lock); |
b30ab791 | 859 | os = drbd_read_state(device); |
b411b363 | 860 | |
26525618 LE |
861 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); |
862 | ||
b411b363 PR |
863 | /* This protects us against multiple calls (that can happen in the presence |
864 | of application IO), and against connectivity loss just before we arrive here. */ | |
865 | if (os.conn <= C_CONNECTED) | |
866 | goto out_unlock; | |
867 | ||
868 | ns = os; | |
869 | ns.conn = C_CONNECTED; | |
870 | ||
871 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", | |
58ffa580 | 872 | verify_done ? "Online verify" : "Resync", |
b30ab791 | 873 | dt + device->rs_paused, device->rs_paused, dbdt); |
b411b363 | 874 | |
b30ab791 | 875 | n_oos = drbd_bm_total_weight(device); |
b411b363 PR |
876 | |
877 | if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) { | |
878 | if (n_oos) { | |
879 | dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n", | |
880 | n_oos, Bit2KB(1)); | |
881 | khelper_cmd = "out-of-sync"; | |
882 | } | |
883 | } else { | |
b30ab791 | 884 | D_ASSERT((n_oos - device->rs_failed) == 0); |
b411b363 PR |
885 | |
886 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) | |
887 | khelper_cmd = "after-resync-target"; | |
888 | ||
a6b32bc3 | 889 | if (first_peer_device(device)->connection->csums_tfm && device->rs_total) { |
b30ab791 AG |
890 | const unsigned long s = device->rs_same_csum; |
891 | const unsigned long t = device->rs_total; | |
b411b363 PR |
892 | const int ratio = |
893 | (t == 0) ? 0 : | |
894 | (t < 100000) ? ((s*100)/t) : (s/(t/100)); | |
24c4830c | 895 | dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " |
b411b363 PR |
896 | "transferred %luK total %luK\n", |
897 | ratio, | |
b30ab791 AG |
898 | Bit2KB(device->rs_same_csum), |
899 | Bit2KB(device->rs_total - device->rs_same_csum), | |
900 | Bit2KB(device->rs_total)); | |
b411b363 PR |
901 | } |
902 | } | |
903 | ||
b30ab791 AG |
904 | if (device->rs_failed) { |
905 | dev_info(DEV, " %lu failed blocks\n", device->rs_failed); | |
b411b363 PR |
906 | |
907 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
908 | ns.disk = D_INCONSISTENT; | |
909 | ns.pdsk = D_UP_TO_DATE; | |
910 | } else { | |
911 | ns.disk = D_UP_TO_DATE; | |
912 | ns.pdsk = D_INCONSISTENT; | |
913 | } | |
914 | } else { | |
915 | ns.disk = D_UP_TO_DATE; | |
916 | ns.pdsk = D_UP_TO_DATE; | |
917 | ||
918 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
b30ab791 | 919 | if (device->p_uuid) { |
b411b363 PR |
920 | int i; |
921 | for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++) | |
b30ab791 AG |
922 | _drbd_uuid_set(device, i, device->p_uuid[i]); |
923 | drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); | |
924 | _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]); | |
b411b363 | 925 | } else { |
b30ab791 | 926 | dev_err(DEV, "device->p_uuid is NULL! BUG\n"); |
b411b363 PR |
927 | } |
928 | } | |
929 | ||
62b0da3a LE |
930 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
931 | /* for verify runs, we don't update uuids here, | |
932 | * so there would be nothing to report. */ | |
b30ab791 AG |
933 | drbd_uuid_set_bm(device, 0UL); |
934 | drbd_print_uuids(device, "updated UUIDs"); | |
935 | if (device->p_uuid) { | |
62b0da3a LE |
936 | /* Now the two UUID sets are equal, update what we |
937 | * know of the peer. */ | |
938 | int i; | |
939 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | |
b30ab791 | 940 | device->p_uuid[i] = device->ldev->md.uuid[i]; |
62b0da3a | 941 | } |
b411b363 PR |
942 | } |
943 | } | |
944 | ||
b30ab791 | 945 | _drbd_set_state(device, ns, CS_VERBOSE, NULL); |
b411b363 | 946 | out_unlock: |
a6b32bc3 | 947 | spin_unlock_irq(&first_peer_device(device)->connection->req_lock); |
b30ab791 | 948 | put_ldev(device); |
b411b363 | 949 | out: |
b30ab791 AG |
950 | device->rs_total = 0; |
951 | device->rs_failed = 0; | |
952 | device->rs_paused = 0; | |
58ffa580 LE |
953 | |
954 | /* reset start sector, if we reached end of device */ | |
b30ab791 AG |
955 | if (verify_done && device->ov_left == 0) |
956 | device->ov_start_sector = 0; | |
b411b363 | 957 | |
b30ab791 | 958 | drbd_md_sync(device); |
13d42685 | 959 | |
b411b363 | 960 | if (khelper_cmd) |
b30ab791 | 961 | drbd_khelper(device, khelper_cmd); |
b411b363 PR |
962 | |
963 | return 1; | |
964 | } | |
965 | ||
966 | /* helper */ | |
b30ab791 | 967 | static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) |
b411b363 | 968 | { |
045417f7 | 969 | if (drbd_peer_req_has_active_page(peer_req)) { |
b411b363 | 970 | /* This might happen if sendpage() has not finished */ |
db830c46 | 971 | int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; |
b30ab791 AG |
972 | atomic_add(i, &device->pp_in_use_by_net); |
973 | atomic_sub(i, &device->pp_in_use); | |
a6b32bc3 | 974 | spin_lock_irq(&first_peer_device(device)->connection->req_lock); |
b30ab791 | 975 | list_add_tail(&peer_req->w.list, &device->net_ee); |
a6b32bc3 | 976 | spin_unlock_irq(&first_peer_device(device)->connection->req_lock); |
435f0740 | 977 | wake_up(&drbd_pp_wait); |
b411b363 | 978 | } else |
b30ab791 | 979 | drbd_free_peer_req(device, peer_req); |
b411b363 PR |
980 | } |
981 | ||
982 | /** | |
983 | * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST | |
b30ab791 | 984 | * @device: DRBD device. |
b411b363 PR |
985 | * @w: work object. |
986 | * @cancel: The connection will be closed anyways | |
987 | */ | |
99920dc5 | 988 | int w_e_end_data_req(struct drbd_work *w, int cancel) |
b411b363 | 989 | { |
db830c46 | 990 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 991 | struct drbd_device *device = w->device; |
99920dc5 | 992 | int err; |
b411b363 PR |
993 | |
994 | if (unlikely(cancel)) { | |
b30ab791 AG |
995 | drbd_free_peer_req(device, peer_req); |
996 | dec_unacked(device); | |
99920dc5 | 997 | return 0; |
b411b363 PR |
998 | } |
999 | ||
db830c46 | 1000 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b30ab791 | 1001 | err = drbd_send_block(device, P_DATA_REPLY, peer_req); |
b411b363 PR |
1002 | } else { |
1003 | if (__ratelimit(&drbd_ratelimit_state)) | |
1004 | dev_err(DEV, "Sending NegDReply. sector=%llus.\n", | |
db830c46 | 1005 | (unsigned long long)peer_req->i.sector); |
b411b363 | 1006 | |
b30ab791 | 1007 | err = drbd_send_ack(device, P_NEG_DREPLY, peer_req); |
b411b363 PR |
1008 | } |
1009 | ||
b30ab791 | 1010 | dec_unacked(device); |
b411b363 | 1011 | |
b30ab791 | 1012 | move_to_net_ee_or_free(device, peer_req); |
b411b363 | 1013 | |
99920dc5 | 1014 | if (unlikely(err)) |
b411b363 | 1015 | dev_err(DEV, "drbd_send_block() failed\n"); |
99920dc5 | 1016 | return err; |
b411b363 PR |
1017 | } |
1018 | ||
1019 | /** | |
a209b4ae | 1020 | * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST |
b30ab791 | 1021 | * @device: DRBD device. |
b411b363 PR |
1022 | * @w: work object. |
1023 | * @cancel: The connection will be closed anyways | |
1024 | */ | |
99920dc5 | 1025 | int w_e_end_rsdata_req(struct drbd_work *w, int cancel) |
b411b363 | 1026 | { |
db830c46 | 1027 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 1028 | struct drbd_device *device = w->device; |
99920dc5 | 1029 | int err; |
b411b363 PR |
1030 | |
1031 | if (unlikely(cancel)) { | |
b30ab791 AG |
1032 | drbd_free_peer_req(device, peer_req); |
1033 | dec_unacked(device); | |
99920dc5 | 1034 | return 0; |
b411b363 PR |
1035 | } |
1036 | ||
b30ab791 AG |
1037 | if (get_ldev_if_state(device, D_FAILED)) { |
1038 | drbd_rs_complete_io(device, peer_req->i.sector); | |
1039 | put_ldev(device); | |
b411b363 PR |
1040 | } |
1041 | ||
b30ab791 AG |
1042 | if (device->state.conn == C_AHEAD) { |
1043 | err = drbd_send_ack(device, P_RS_CANCEL, peer_req); | |
db830c46 | 1044 | } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b30ab791 AG |
1045 | if (likely(device->state.pdsk >= D_INCONSISTENT)) { |
1046 | inc_rs_pending(device); | |
1047 | err = drbd_send_block(device, P_RS_DATA_REPLY, peer_req); | |
b411b363 PR |
1048 | } else { |
1049 | if (__ratelimit(&drbd_ratelimit_state)) | |
1050 | dev_err(DEV, "Not sending RSDataReply, " | |
1051 | "partner DISKLESS!\n"); | |
99920dc5 | 1052 | err = 0; |
b411b363 PR |
1053 | } |
1054 | } else { | |
1055 | if (__ratelimit(&drbd_ratelimit_state)) | |
1056 | dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", | |
db830c46 | 1057 | (unsigned long long)peer_req->i.sector); |
b411b363 | 1058 | |
b30ab791 | 1059 | err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
1060 | |
1061 | /* update resync data with failure */ | |
b30ab791 | 1062 | drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); |
b411b363 PR |
1063 | } |
1064 | ||
b30ab791 | 1065 | dec_unacked(device); |
b411b363 | 1066 | |
b30ab791 | 1067 | move_to_net_ee_or_free(device, peer_req); |
b411b363 | 1068 | |
99920dc5 | 1069 | if (unlikely(err)) |
b411b363 | 1070 | dev_err(DEV, "drbd_send_block() failed\n"); |
99920dc5 | 1071 | return err; |
b411b363 PR |
1072 | } |
1073 | ||
99920dc5 | 1074 | int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) |
b411b363 | 1075 | { |
db830c46 | 1076 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 1077 | struct drbd_device *device = w->device; |
b411b363 PR |
1078 | struct digest_info *di; |
1079 | int digest_size; | |
1080 | void *digest = NULL; | |
99920dc5 | 1081 | int err, eq = 0; |
b411b363 PR |
1082 | |
1083 | if (unlikely(cancel)) { | |
b30ab791 AG |
1084 | drbd_free_peer_req(device, peer_req); |
1085 | dec_unacked(device); | |
99920dc5 | 1086 | return 0; |
b411b363 PR |
1087 | } |
1088 | ||
b30ab791 AG |
1089 | if (get_ldev(device)) { |
1090 | drbd_rs_complete_io(device, peer_req->i.sector); | |
1091 | put_ldev(device); | |
1d53f09e | 1092 | } |
b411b363 | 1093 | |
db830c46 | 1094 | di = peer_req->digest; |
b411b363 | 1095 | |
db830c46 | 1096 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b411b363 PR |
1097 | /* quick hack to try to avoid a race against reconfiguration. |
1098 | * a real fix would be much more involved, | |
1099 | * introducing more locking mechanisms */ | |
a6b32bc3 AG |
1100 | if (first_peer_device(device)->connection->csums_tfm) { |
1101 | digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm); | |
b411b363 PR |
1102 | D_ASSERT(digest_size == di->digest_size); |
1103 | digest = kmalloc(digest_size, GFP_NOIO); | |
1104 | } | |
1105 | if (digest) { | |
a6b32bc3 | 1106 | drbd_csum_ee(device, first_peer_device(device)->connection->csums_tfm, peer_req, digest); |
b411b363 PR |
1107 | eq = !memcmp(digest, di->digest, digest_size); |
1108 | kfree(digest); | |
1109 | } | |
1110 | ||
1111 | if (eq) { | |
b30ab791 | 1112 | drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); |
676396d5 | 1113 | /* rs_same_csums unit is BM_BLOCK_SIZE */ |
b30ab791 AG |
1114 | device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; |
1115 | err = drbd_send_ack(device, P_RS_IS_IN_SYNC, peer_req); | |
b411b363 | 1116 | } else { |
b30ab791 | 1117 | inc_rs_pending(device); |
db830c46 AG |
1118 | peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ |
1119 | peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ | |
204bba99 | 1120 | kfree(di); |
b30ab791 | 1121 | err = drbd_send_block(device, P_RS_DATA_REPLY, peer_req); |
b411b363 PR |
1122 | } |
1123 | } else { | |
b30ab791 | 1124 | err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
1125 | if (__ratelimit(&drbd_ratelimit_state)) |
1126 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); | |
1127 | } | |
1128 | ||
b30ab791 AG |
1129 | dec_unacked(device); |
1130 | move_to_net_ee_or_free(device, peer_req); | |
b411b363 | 1131 | |
99920dc5 | 1132 | if (unlikely(err)) |
b411b363 | 1133 | dev_err(DEV, "drbd_send_block/ack() failed\n"); |
99920dc5 | 1134 | return err; |
b411b363 PR |
1135 | } |
1136 | ||
99920dc5 | 1137 | int w_e_end_ov_req(struct drbd_work *w, int cancel) |
b411b363 | 1138 | { |
db830c46 | 1139 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 1140 | struct drbd_device *device = w->device; |
db830c46 AG |
1141 | sector_t sector = peer_req->i.sector; |
1142 | unsigned int size = peer_req->i.size; | |
b411b363 PR |
1143 | int digest_size; |
1144 | void *digest; | |
99920dc5 | 1145 | int err = 0; |
b411b363 PR |
1146 | |
1147 | if (unlikely(cancel)) | |
1148 | goto out; | |
1149 | ||
a6b32bc3 | 1150 | digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm); |
b411b363 | 1151 | digest = kmalloc(digest_size, GFP_NOIO); |
8f21420e | 1152 | if (!digest) { |
99920dc5 | 1153 | err = 1; /* terminate the connection in case the allocation failed */ |
8f21420e | 1154 | goto out; |
b411b363 PR |
1155 | } |
1156 | ||
db830c46 | 1157 | if (likely(!(peer_req->flags & EE_WAS_ERROR))) |
a6b32bc3 | 1158 | drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest); |
8f21420e PR |
1159 | else |
1160 | memset(digest, 0, digest_size); | |
1161 | ||
53ea4331 LE |
1162 | /* Free e and pages before send. |
1163 | * In case we block on congestion, we could otherwise run into | |
1164 | * some distributed deadlock, if the other side blocks on | |
1165 | * congestion as well, because our receiver blocks in | |
c37c8ecf | 1166 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
b30ab791 | 1167 | drbd_free_peer_req(device, peer_req); |
db830c46 | 1168 | peer_req = NULL; |
b30ab791 AG |
1169 | inc_rs_pending(device); |
1170 | err = drbd_send_drequest_csum(device, sector, size, digest, digest_size, P_OV_REPLY); | |
99920dc5 | 1171 | if (err) |
b30ab791 | 1172 | dec_rs_pending(device); |
8f21420e PR |
1173 | kfree(digest); |
1174 | ||
b411b363 | 1175 | out: |
db830c46 | 1176 | if (peer_req) |
b30ab791 AG |
1177 | drbd_free_peer_req(device, peer_req); |
1178 | dec_unacked(device); | |
99920dc5 | 1179 | return err; |
b411b363 PR |
1180 | } |
1181 | ||
b30ab791 | 1182 | void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) |
b411b363 | 1183 | { |
b30ab791 AG |
1184 | if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { |
1185 | device->ov_last_oos_size += size>>9; | |
b411b363 | 1186 | } else { |
b30ab791 AG |
1187 | device->ov_last_oos_start = sector; |
1188 | device->ov_last_oos_size = size>>9; | |
b411b363 | 1189 | } |
b30ab791 | 1190 | drbd_set_out_of_sync(device, sector, size); |
b411b363 PR |
1191 | } |
1192 | ||
99920dc5 | 1193 | int w_e_end_ov_reply(struct drbd_work *w, int cancel) |
b411b363 | 1194 | { |
db830c46 | 1195 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b30ab791 | 1196 | struct drbd_device *device = w->device; |
b411b363 | 1197 | struct digest_info *di; |
b411b363 | 1198 | void *digest; |
db830c46 AG |
1199 | sector_t sector = peer_req->i.sector; |
1200 | unsigned int size = peer_req->i.size; | |
53ea4331 | 1201 | int digest_size; |
99920dc5 | 1202 | int err, eq = 0; |
58ffa580 | 1203 | bool stop_sector_reached = false; |
b411b363 PR |
1204 | |
1205 | if (unlikely(cancel)) { | |
b30ab791 AG |
1206 | drbd_free_peer_req(device, peer_req); |
1207 | dec_unacked(device); | |
99920dc5 | 1208 | return 0; |
b411b363 PR |
1209 | } |
1210 | ||
1211 | /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all | |
1212 | * the resync lru has been cleaned up already */ | |
b30ab791 AG |
1213 | if (get_ldev(device)) { |
1214 | drbd_rs_complete_io(device, peer_req->i.sector); | |
1215 | put_ldev(device); | |
1d53f09e | 1216 | } |
b411b363 | 1217 | |
db830c46 | 1218 | di = peer_req->digest; |
b411b363 | 1219 | |
db830c46 | 1220 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
a6b32bc3 | 1221 | digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm); |
b411b363 PR |
1222 | digest = kmalloc(digest_size, GFP_NOIO); |
1223 | if (digest) { | |
a6b32bc3 | 1224 | drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest); |
b411b363 PR |
1225 | |
1226 | D_ASSERT(digest_size == di->digest_size); | |
1227 | eq = !memcmp(digest, di->digest, digest_size); | |
1228 | kfree(digest); | |
1229 | } | |
b411b363 PR |
1230 | } |
1231 | ||
9676c760 LE |
1232 | /* Free peer_req and pages before send. |
1233 | * In case we block on congestion, we could otherwise run into | |
1234 | * some distributed deadlock, if the other side blocks on | |
1235 | * congestion as well, because our receiver blocks in | |
c37c8ecf | 1236 | * drbd_alloc_pages due to pp_in_use > max_buffers. */ |
b30ab791 | 1237 | drbd_free_peer_req(device, peer_req); |
b411b363 | 1238 | if (!eq) |
b30ab791 | 1239 | drbd_ov_out_of_sync_found(device, sector, size); |
b411b363 | 1240 | else |
b30ab791 | 1241 | ov_out_of_sync_print(device); |
b411b363 | 1242 | |
b30ab791 | 1243 | err = drbd_send_ack_ex(device, P_OV_RESULT, sector, size, |
fa79abd8 | 1244 | eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); |
b411b363 | 1245 | |
b30ab791 | 1246 | dec_unacked(device); |
b411b363 | 1247 | |
b30ab791 | 1248 | --device->ov_left; |
ea5442af LE |
1249 | |
1250 | /* let's advance progress step marks only for every other megabyte */ | |
b30ab791 AG |
1251 | if ((device->ov_left & 0x200) == 0x200) |
1252 | drbd_advance_rs_marks(device, device->ov_left); | |
ea5442af | 1253 | |
b30ab791 AG |
1254 | stop_sector_reached = verify_can_do_stop_sector(device) && |
1255 | (sector + (size>>9)) >= device->ov_stop_sector; | |
58ffa580 | 1256 | |
b30ab791 AG |
1257 | if (device->ov_left == 0 || stop_sector_reached) { |
1258 | ov_out_of_sync_print(device); | |
1259 | drbd_resync_finished(device); | |
b411b363 PR |
1260 | } |
1261 | ||
99920dc5 | 1262 | return err; |
b411b363 PR |
1263 | } |
1264 | ||
99920dc5 | 1265 | int w_prev_work_done(struct drbd_work *w, int cancel) |
b411b363 PR |
1266 | { |
1267 | struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w); | |
00d56944 | 1268 | |
b411b363 | 1269 | complete(&b->done); |
99920dc5 | 1270 | return 0; |
b411b363 PR |
1271 | } |
1272 | ||
b6dd1a89 LE |
1273 | /* FIXME |
1274 | * We need to track the number of pending barrier acks, | |
1275 | * and to be able to wait for them. | |
1276 | * See also comment in drbd_adm_attach before drbd_suspend_io. | |
1277 | */ | |
bde89a9e | 1278 | static int drbd_send_barrier(struct drbd_connection *connection) |
b411b363 | 1279 | { |
9f5bdc33 | 1280 | struct p_barrier *p; |
b6dd1a89 | 1281 | struct drbd_socket *sock; |
b411b363 | 1282 | |
bde89a9e AG |
1283 | sock = &connection->data; |
1284 | p = conn_prepare_command(connection, sock); | |
9f5bdc33 AG |
1285 | if (!p) |
1286 | return -EIO; | |
bde89a9e | 1287 | p->barrier = connection->send.current_epoch_nr; |
b6dd1a89 | 1288 | p->pad = 0; |
bde89a9e | 1289 | connection->send.current_epoch_writes = 0; |
b6dd1a89 | 1290 | |
bde89a9e | 1291 | return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0); |
b411b363 PR |
1292 | } |
1293 | ||
99920dc5 | 1294 | int w_send_write_hint(struct drbd_work *w, int cancel) |
b411b363 | 1295 | { |
b30ab791 | 1296 | struct drbd_device *device = w->device; |
9f5bdc33 AG |
1297 | struct drbd_socket *sock; |
1298 | ||
b411b363 | 1299 | if (cancel) |
99920dc5 | 1300 | return 0; |
a6b32bc3 | 1301 | sock = &first_peer_device(device)->connection->data; |
b30ab791 | 1302 | if (!drbd_prepare_command(device, sock)) |
9f5bdc33 | 1303 | return -EIO; |
b30ab791 | 1304 | return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0); |
b411b363 PR |
1305 | } |
1306 | ||
bde89a9e | 1307 | static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch) |
4eb9b3cb | 1308 | { |
bde89a9e AG |
1309 | if (!connection->send.seen_any_write_yet) { |
1310 | connection->send.seen_any_write_yet = true; | |
1311 | connection->send.current_epoch_nr = epoch; | |
1312 | connection->send.current_epoch_writes = 0; | |
4eb9b3cb LE |
1313 | } |
1314 | } | |
1315 | ||
bde89a9e | 1316 | static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch) |
4eb9b3cb LE |
1317 | { |
1318 | /* re-init if first write on this connection */ | |
bde89a9e | 1319 | if (!connection->send.seen_any_write_yet) |
4eb9b3cb | 1320 | return; |
bde89a9e AG |
1321 | if (connection->send.current_epoch_nr != epoch) { |
1322 | if (connection->send.current_epoch_writes) | |
1323 | drbd_send_barrier(connection); | |
1324 | connection->send.current_epoch_nr = epoch; | |
4eb9b3cb LE |
1325 | } |
1326 | } | |
1327 | ||
8f7bed77 | 1328 | int w_send_out_of_sync(struct drbd_work *w, int cancel) |
73a01a18 PR |
1329 | { |
1330 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
b30ab791 | 1331 | struct drbd_device *device = w->device; |
a6b32bc3 | 1332 | struct drbd_connection *connection = first_peer_device(device)->connection; |
99920dc5 | 1333 | int err; |
73a01a18 PR |
1334 | |
1335 | if (unlikely(cancel)) { | |
8554df1c | 1336 | req_mod(req, SEND_CANCELED); |
99920dc5 | 1337 | return 0; |
73a01a18 PR |
1338 | } |
1339 | ||
bde89a9e | 1340 | /* this time, no connection->send.current_epoch_writes++; |
b6dd1a89 LE |
1341 | * If it was sent, it was the closing barrier for the last |
1342 | * replicated epoch, before we went into AHEAD mode. | |
1343 | * No more barriers will be sent, until we leave AHEAD mode again. */ | |
bde89a9e | 1344 | maybe_send_barrier(connection, req->epoch); |
b6dd1a89 | 1345 | |
b30ab791 | 1346 | err = drbd_send_out_of_sync(device, req); |
8554df1c | 1347 | req_mod(req, OOS_HANDED_TO_NETWORK); |
73a01a18 | 1348 | |
99920dc5 | 1349 | return err; |
73a01a18 PR |
1350 | } |
1351 | ||
b411b363 PR |
1352 | /** |
1353 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request | |
b30ab791 | 1354 | * @device: DRBD device. |
b411b363 PR |
1355 | * @w: work object. |
1356 | * @cancel: The connection will be closed anyways | |
1357 | */ | |
99920dc5 | 1358 | int w_send_dblock(struct drbd_work *w, int cancel) |
b411b363 PR |
1359 | { |
1360 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
b30ab791 | 1361 | struct drbd_device *device = w->device; |
a6b32bc3 | 1362 | struct drbd_connection *connection = first_peer_device(device)->connection; |
99920dc5 | 1363 | int err; |
b411b363 PR |
1364 | |
1365 | if (unlikely(cancel)) { | |
8554df1c | 1366 | req_mod(req, SEND_CANCELED); |
99920dc5 | 1367 | return 0; |
b411b363 PR |
1368 | } |
1369 | ||
bde89a9e AG |
1370 | re_init_if_first_write(connection, req->epoch); |
1371 | maybe_send_barrier(connection, req->epoch); | |
1372 | connection->send.current_epoch_writes++; | |
b6dd1a89 | 1373 | |
b30ab791 | 1374 | err = drbd_send_dblock(device, req); |
99920dc5 | 1375 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
b411b363 | 1376 | |
99920dc5 | 1377 | return err; |
b411b363 PR |
1378 | } |
1379 | ||
1380 | /** | |
1381 | * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet | |
b30ab791 | 1382 | * @device: DRBD device. |
b411b363 PR |
1383 | * @w: work object. |
1384 | * @cancel: The connection will be closed anyways | |
1385 | */ | |
99920dc5 | 1386 | int w_send_read_req(struct drbd_work *w, int cancel) |
b411b363 PR |
1387 | { |
1388 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
b30ab791 | 1389 | struct drbd_device *device = w->device; |
a6b32bc3 | 1390 | struct drbd_connection *connection = first_peer_device(device)->connection; |
99920dc5 | 1391 | int err; |
b411b363 PR |
1392 | |
1393 | if (unlikely(cancel)) { | |
8554df1c | 1394 | req_mod(req, SEND_CANCELED); |
99920dc5 | 1395 | return 0; |
b411b363 PR |
1396 | } |
1397 | ||
b6dd1a89 LE |
1398 | /* Even read requests may close a write epoch, |
1399 | * if there was any yet. */ | |
bde89a9e | 1400 | maybe_send_barrier(connection, req->epoch); |
b6dd1a89 | 1401 | |
b30ab791 | 1402 | err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size, |
6c1005e7 | 1403 | (unsigned long)req); |
b411b363 | 1404 | |
99920dc5 | 1405 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
b411b363 | 1406 | |
99920dc5 | 1407 | return err; |
b411b363 PR |
1408 | } |
1409 | ||
99920dc5 | 1410 | int w_restart_disk_io(struct drbd_work *w, int cancel) |
265be2d0 PR |
1411 | { |
1412 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
b30ab791 | 1413 | struct drbd_device *device = w->device; |
265be2d0 | 1414 | |
0778286a | 1415 | if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) |
b30ab791 | 1416 | drbd_al_begin_io(device, &req->i, false); |
265be2d0 PR |
1417 | |
1418 | drbd_req_make_private_bio(req, req->master_bio); | |
b30ab791 | 1419 | req->private_bio->bi_bdev = device->ldev->backing_bdev; |
265be2d0 PR |
1420 | generic_make_request(req->private_bio); |
1421 | ||
99920dc5 | 1422 | return 0; |
265be2d0 PR |
1423 | } |
1424 | ||
b30ab791 | 1425 | static int _drbd_may_sync_now(struct drbd_device *device) |
b411b363 | 1426 | { |
b30ab791 | 1427 | struct drbd_device *odev = device; |
95f8efd0 | 1428 | int resync_after; |
b411b363 PR |
1429 | |
1430 | while (1) { | |
a3f8f7dc | 1431 | if (!odev->ldev || odev->state.disk == D_DISKLESS) |
438c8374 | 1432 | return 1; |
daeda1cc | 1433 | rcu_read_lock(); |
95f8efd0 | 1434 | resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; |
daeda1cc | 1435 | rcu_read_unlock(); |
95f8efd0 | 1436 | if (resync_after == -1) |
b411b363 | 1437 | return 1; |
b30ab791 | 1438 | odev = minor_to_device(resync_after); |
a3f8f7dc | 1439 | if (!odev) |
841ce241 | 1440 | return 1; |
b411b363 PR |
1441 | if ((odev->state.conn >= C_SYNC_SOURCE && |
1442 | odev->state.conn <= C_PAUSED_SYNC_T) || | |
1443 | odev->state.aftr_isp || odev->state.peer_isp || | |
1444 | odev->state.user_isp) | |
1445 | return 0; | |
1446 | } | |
1447 | } | |
1448 | ||
1449 | /** | |
1450 | * _drbd_pause_after() - Pause resync on all devices that may not resync now | |
b30ab791 | 1451 | * @device: DRBD device. |
b411b363 PR |
1452 | * |
1453 | * Called from process context only (admin command and after_state_ch). | |
1454 | */ | |
b30ab791 | 1455 | static int _drbd_pause_after(struct drbd_device *device) |
b411b363 | 1456 | { |
54761697 | 1457 | struct drbd_device *odev; |
b411b363 PR |
1458 | int i, rv = 0; |
1459 | ||
695d08fa | 1460 | rcu_read_lock(); |
05a10ec7 | 1461 | idr_for_each_entry(&drbd_devices, odev, i) { |
b411b363 PR |
1462 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) |
1463 | continue; | |
1464 | if (!_drbd_may_sync_now(odev)) | |
1465 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL) | |
1466 | != SS_NOTHING_TO_DO); | |
1467 | } | |
695d08fa | 1468 | rcu_read_unlock(); |
b411b363 PR |
1469 | |
1470 | return rv; | |
1471 | } | |
1472 | ||
1473 | /** | |
1474 | * _drbd_resume_next() - Resume resync on all devices that may resync now | |
b30ab791 | 1475 | * @device: DRBD device. |
b411b363 PR |
1476 | * |
1477 | * Called from process context only (admin command and worker). | |
1478 | */ | |
b30ab791 | 1479 | static int _drbd_resume_next(struct drbd_device *device) |
b411b363 | 1480 | { |
54761697 | 1481 | struct drbd_device *odev; |
b411b363 PR |
1482 | int i, rv = 0; |
1483 | ||
695d08fa | 1484 | rcu_read_lock(); |
05a10ec7 | 1485 | idr_for_each_entry(&drbd_devices, odev, i) { |
b411b363 PR |
1486 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) |
1487 | continue; | |
1488 | if (odev->state.aftr_isp) { | |
1489 | if (_drbd_may_sync_now(odev)) | |
1490 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0), | |
1491 | CS_HARD, NULL) | |
1492 | != SS_NOTHING_TO_DO) ; | |
1493 | } | |
1494 | } | |
695d08fa | 1495 | rcu_read_unlock(); |
b411b363 PR |
1496 | return rv; |
1497 | } | |
1498 | ||
b30ab791 | 1499 | void resume_next_sg(struct drbd_device *device) |
b411b363 PR |
1500 | { |
1501 | write_lock_irq(&global_state_lock); | |
b30ab791 | 1502 | _drbd_resume_next(device); |
b411b363 PR |
1503 | write_unlock_irq(&global_state_lock); |
1504 | } | |
1505 | ||
b30ab791 | 1506 | void suspend_other_sg(struct drbd_device *device) |
b411b363 PR |
1507 | { |
1508 | write_lock_irq(&global_state_lock); | |
b30ab791 | 1509 | _drbd_pause_after(device); |
b411b363 PR |
1510 | write_unlock_irq(&global_state_lock); |
1511 | } | |
1512 | ||
dc97b708 | 1513 | /* caller must hold global_state_lock */ |
b30ab791 | 1514 | enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor) |
b411b363 | 1515 | { |
54761697 | 1516 | struct drbd_device *odev; |
95f8efd0 | 1517 | int resync_after; |
b411b363 PR |
1518 | |
1519 | if (o_minor == -1) | |
1520 | return NO_ERROR; | |
a3f8f7dc | 1521 | if (o_minor < -1 || o_minor > MINORMASK) |
95f8efd0 | 1522 | return ERR_RESYNC_AFTER; |
b411b363 PR |
1523 | |
1524 | /* check for loops */ | |
b30ab791 | 1525 | odev = minor_to_device(o_minor); |
b411b363 | 1526 | while (1) { |
b30ab791 | 1527 | if (odev == device) |
95f8efd0 | 1528 | return ERR_RESYNC_AFTER_CYCLE; |
b411b363 | 1529 | |
a3f8f7dc LE |
1530 | /* You are free to depend on diskless, non-existing, |
1531 | * or not yet/no longer existing minors. | |
1532 | * We only reject dependency loops. | |
1533 | * We cannot follow the dependency chain beyond a detached or | |
1534 | * missing minor. | |
1535 | */ | |
1536 | if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS) | |
1537 | return NO_ERROR; | |
1538 | ||
daeda1cc | 1539 | rcu_read_lock(); |
95f8efd0 | 1540 | resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; |
daeda1cc | 1541 | rcu_read_unlock(); |
b411b363 | 1542 | /* dependency chain ends here, no cycles. */ |
95f8efd0 | 1543 | if (resync_after == -1) |
b411b363 PR |
1544 | return NO_ERROR; |
1545 | ||
1546 | /* follow the dependency chain */ | |
b30ab791 | 1547 | odev = minor_to_device(resync_after); |
b411b363 PR |
1548 | } |
1549 | } | |
1550 | ||
dc97b708 | 1551 | /* caller must hold global_state_lock */ |
b30ab791 | 1552 | void drbd_resync_after_changed(struct drbd_device *device) |
b411b363 PR |
1553 | { |
1554 | int changes; | |
b411b363 | 1555 | |
dc97b708 | 1556 | do { |
b30ab791 AG |
1557 | changes = _drbd_pause_after(device); |
1558 | changes |= _drbd_resume_next(device); | |
dc97b708 | 1559 | } while (changes); |
b411b363 PR |
1560 | } |
1561 | ||
b30ab791 | 1562 | void drbd_rs_controller_reset(struct drbd_device *device) |
9bd28d3c | 1563 | { |
813472ce PR |
1564 | struct fifo_buffer *plan; |
1565 | ||
b30ab791 AG |
1566 | atomic_set(&device->rs_sect_in, 0); |
1567 | atomic_set(&device->rs_sect_ev, 0); | |
1568 | device->rs_in_flight = 0; | |
813472ce PR |
1569 | |
1570 | /* Updating the RCU protected object in place is necessary since | |
1571 | this function gets called from atomic context. | |
1572 | It is valid since all other updates also lead to an completely | |
1573 | empty fifo */ | |
1574 | rcu_read_lock(); | |
b30ab791 | 1575 | plan = rcu_dereference(device->rs_plan_s); |
813472ce PR |
1576 | plan->total = 0; |
1577 | fifo_set(plan, 0); | |
1578 | rcu_read_unlock(); | |
9bd28d3c LE |
1579 | } |
1580 | ||
1f04af33 PR |
1581 | void start_resync_timer_fn(unsigned long data) |
1582 | { | |
b30ab791 | 1583 | struct drbd_device *device = (struct drbd_device *) data; |
1f04af33 | 1584 | |
a6b32bc3 | 1585 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->start_resync_work); |
1f04af33 PR |
1586 | } |
1587 | ||
99920dc5 | 1588 | int w_start_resync(struct drbd_work *w, int cancel) |
1f04af33 | 1589 | { |
b30ab791 | 1590 | struct drbd_device *device = w->device; |
00d56944 | 1591 | |
b30ab791 | 1592 | if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { |
1f04af33 | 1593 | dev_warn(DEV, "w_start_resync later...\n"); |
b30ab791 AG |
1594 | device->start_resync_timer.expires = jiffies + HZ/10; |
1595 | add_timer(&device->start_resync_timer); | |
99920dc5 | 1596 | return 0; |
1f04af33 PR |
1597 | } |
1598 | ||
b30ab791 AG |
1599 | drbd_start_resync(device, C_SYNC_SOURCE); |
1600 | clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); | |
99920dc5 | 1601 | return 0; |
1f04af33 PR |
1602 | } |
1603 | ||
b411b363 PR |
1604 | /** |
1605 | * drbd_start_resync() - Start the resync process | |
b30ab791 | 1606 | * @device: DRBD device. |
b411b363 PR |
1607 | * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET |
1608 | * | |
1609 | * This function might bring you directly into one of the | |
1610 | * C_PAUSED_SYNC_* states. | |
1611 | */ | |
b30ab791 | 1612 | void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) |
b411b363 PR |
1613 | { |
1614 | union drbd_state ns; | |
1615 | int r; | |
1616 | ||
b30ab791 | 1617 | if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { |
b411b363 PR |
1618 | dev_err(DEV, "Resync already running!\n"); |
1619 | return; | |
1620 | } | |
1621 | ||
b30ab791 | 1622 | if (!test_bit(B_RS_H_DONE, &device->flags)) { |
e64a3294 PR |
1623 | if (side == C_SYNC_TARGET) { |
1624 | /* Since application IO was locked out during C_WF_BITMAP_T and | |
1625 | C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET | |
1626 | we check that we might make the data inconsistent. */ | |
b30ab791 | 1627 | r = drbd_khelper(device, "before-resync-target"); |
e64a3294 PR |
1628 | r = (r >> 8) & 0xff; |
1629 | if (r > 0) { | |
1630 | dev_info(DEV, "before-resync-target handler returned %d, " | |
09b9e797 | 1631 | "dropping connection.\n", r); |
a6b32bc3 | 1632 | conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
09b9e797 PR |
1633 | return; |
1634 | } | |
e64a3294 | 1635 | } else /* C_SYNC_SOURCE */ { |
b30ab791 | 1636 | r = drbd_khelper(device, "before-resync-source"); |
e64a3294 PR |
1637 | r = (r >> 8) & 0xff; |
1638 | if (r > 0) { | |
1639 | if (r == 3) { | |
1640 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1641 | "ignoring. Old userland tools?", r); | |
1642 | } else { | |
1643 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1644 | "dropping connection.\n", r); | |
a6b32bc3 AG |
1645 | conn_request_state(first_peer_device(device)->connection, |
1646 | NS(conn, C_DISCONNECTING), CS_HARD); | |
e64a3294 PR |
1647 | return; |
1648 | } | |
1649 | } | |
09b9e797 | 1650 | } |
b411b363 PR |
1651 | } |
1652 | ||
a6b32bc3 | 1653 | if (current == first_peer_device(device)->connection->worker.task) { |
dad20554 | 1654 | /* The worker should not sleep waiting for state_mutex, |
e64a3294 | 1655 | that can take long */ |
b30ab791 AG |
1656 | if (!mutex_trylock(device->state_mutex)) { |
1657 | set_bit(B_RS_H_DONE, &device->flags); | |
1658 | device->start_resync_timer.expires = jiffies + HZ/5; | |
1659 | add_timer(&device->start_resync_timer); | |
e64a3294 PR |
1660 | return; |
1661 | } | |
1662 | } else { | |
b30ab791 | 1663 | mutex_lock(device->state_mutex); |
e64a3294 | 1664 | } |
b30ab791 | 1665 | clear_bit(B_RS_H_DONE, &device->flags); |
b411b363 | 1666 | |
0cfac5dd | 1667 | write_lock_irq(&global_state_lock); |
a700471b | 1668 | /* Did some connection breakage or IO error race with us? */ |
b30ab791 AG |
1669 | if (device->state.conn < C_CONNECTED |
1670 | || !get_ldev_if_state(device, D_NEGOTIATING)) { | |
0cfac5dd | 1671 | write_unlock_irq(&global_state_lock); |
b30ab791 | 1672 | mutex_unlock(device->state_mutex); |
b411b363 PR |
1673 | return; |
1674 | } | |
1675 | ||
b30ab791 | 1676 | ns = drbd_read_state(device); |
b411b363 | 1677 | |
b30ab791 | 1678 | ns.aftr_isp = !_drbd_may_sync_now(device); |
b411b363 PR |
1679 | |
1680 | ns.conn = side; | |
1681 | ||
1682 | if (side == C_SYNC_TARGET) | |
1683 | ns.disk = D_INCONSISTENT; | |
1684 | else /* side == C_SYNC_SOURCE */ | |
1685 | ns.pdsk = D_INCONSISTENT; | |
1686 | ||
b30ab791 AG |
1687 | r = __drbd_set_state(device, ns, CS_VERBOSE, NULL); |
1688 | ns = drbd_read_state(device); | |
b411b363 PR |
1689 | |
1690 | if (ns.conn < C_CONNECTED) | |
1691 | r = SS_UNKNOWN_ERROR; | |
1692 | ||
1693 | if (r == SS_SUCCESS) { | |
b30ab791 | 1694 | unsigned long tw = drbd_bm_total_weight(device); |
1d7734a0 LE |
1695 | unsigned long now = jiffies; |
1696 | int i; | |
1697 | ||
b30ab791 AG |
1698 | device->rs_failed = 0; |
1699 | device->rs_paused = 0; | |
1700 | device->rs_same_csum = 0; | |
1701 | device->rs_last_events = 0; | |
1702 | device->rs_last_sect_ev = 0; | |
1703 | device->rs_total = tw; | |
1704 | device->rs_start = now; | |
1d7734a0 | 1705 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
b30ab791 AG |
1706 | device->rs_mark_left[i] = tw; |
1707 | device->rs_mark_time[i] = now; | |
1d7734a0 | 1708 | } |
b30ab791 | 1709 | _drbd_pause_after(device); |
b411b363 PR |
1710 | } |
1711 | write_unlock_irq(&global_state_lock); | |
5a22db89 | 1712 | |
b411b363 | 1713 | if (r == SS_SUCCESS) { |
328e0f12 PR |
1714 | /* reset rs_last_bcast when a resync or verify is started, |
1715 | * to deal with potential jiffies wrap. */ | |
b30ab791 | 1716 | device->rs_last_bcast = jiffies - HZ; |
328e0f12 | 1717 | |
b411b363 PR |
1718 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", |
1719 | drbd_conn_str(ns.conn), | |
b30ab791 AG |
1720 | (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), |
1721 | (unsigned long) device->rs_total); | |
6c922ed5 | 1722 | if (side == C_SYNC_TARGET) |
b30ab791 | 1723 | device->bm_resync_fo = 0; |
6c922ed5 LE |
1724 | |
1725 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid | |
1726 | * with w_send_oos, or the sync target will get confused as to | |
1727 | * how much bits to resync. We cannot do that always, because for an | |
1728 | * empty resync and protocol < 95, we need to do it here, as we call | |
1729 | * drbd_resync_finished from here in that case. | |
1730 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | |
1731 | * and from after_state_ch otherwise. */ | |
a6b32bc3 AG |
1732 | if (side == C_SYNC_SOURCE && |
1733 | first_peer_device(device)->connection->agreed_pro_version < 96) | |
b30ab791 | 1734 | drbd_gen_and_send_sync_uuid(device); |
b411b363 | 1735 | |
a6b32bc3 AG |
1736 | if (first_peer_device(device)->connection->agreed_pro_version < 95 && |
1737 | device->rs_total == 0) { | |
af85e8e8 LE |
1738 | /* This still has a race (about when exactly the peers |
1739 | * detect connection loss) that can lead to a full sync | |
1740 | * on next handshake. In 8.3.9 we fixed this with explicit | |
1741 | * resync-finished notifications, but the fix | |
1742 | * introduces a protocol change. Sleeping for some | |
1743 | * time longer than the ping interval + timeout on the | |
1744 | * SyncSource, to give the SyncTarget the chance to | |
1745 | * detect connection loss, then waiting for a ping | |
1746 | * response (implicit in drbd_resync_finished) reduces | |
1747 | * the race considerably, but does not solve it. */ | |
44ed167d PR |
1748 | if (side == C_SYNC_SOURCE) { |
1749 | struct net_conf *nc; | |
1750 | int timeo; | |
1751 | ||
1752 | rcu_read_lock(); | |
a6b32bc3 | 1753 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); |
44ed167d PR |
1754 | timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; |
1755 | rcu_read_unlock(); | |
1756 | schedule_timeout_interruptible(timeo); | |
1757 | } | |
b30ab791 | 1758 | drbd_resync_finished(device); |
b411b363 PR |
1759 | } |
1760 | ||
b30ab791 AG |
1761 | drbd_rs_controller_reset(device); |
1762 | /* ns.conn may already be != device->state.conn, | |
b411b363 PR |
1763 | * we may have been paused in between, or become paused until |
1764 | * the timer triggers. | |
1765 | * No matter, that is handled in resync_timer_fn() */ | |
1766 | if (ns.conn == C_SYNC_TARGET) | |
b30ab791 | 1767 | mod_timer(&device->resync_timer, jiffies); |
b411b363 | 1768 | |
b30ab791 | 1769 | drbd_md_sync(device); |
b411b363 | 1770 | } |
b30ab791 AG |
1771 | put_ldev(device); |
1772 | mutex_unlock(device->state_mutex); | |
b411b363 PR |
1773 | } |
1774 | ||
b6dd1a89 LE |
1775 | /* If the resource already closed the current epoch, but we did not |
1776 | * (because we have not yet seen new requests), we should send the | |
1777 | * corresponding barrier now. Must be checked within the same spinlock | |
1778 | * that is used to check for new requests. */ | |
bde89a9e | 1779 | static bool need_to_send_barrier(struct drbd_connection *connection) |
b6dd1a89 LE |
1780 | { |
1781 | if (!connection->send.seen_any_write_yet) | |
1782 | return false; | |
1783 | ||
1784 | /* Skip barriers that do not contain any writes. | |
1785 | * This may happen during AHEAD mode. */ | |
1786 | if (!connection->send.current_epoch_writes) | |
1787 | return false; | |
1788 | ||
1789 | /* ->req_lock is held when requests are queued on | |
1790 | * connection->sender_work, and put into ->transfer_log. | |
1791 | * It is also held when ->current_tle_nr is increased. | |
1792 | * So either there are already new requests queued, | |
1793 | * and corresponding barriers will be send there. | |
1794 | * Or nothing new is queued yet, so the difference will be 1. | |
1795 | */ | |
1796 | if (atomic_read(&connection->current_tle_nr) != | |
1797 | connection->send.current_epoch_nr + 1) | |
1798 | return false; | |
1799 | ||
1800 | return true; | |
1801 | } | |
1802 | ||
a186e478 | 1803 | static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list) |
8c0785a5 LE |
1804 | { |
1805 | spin_lock_irq(&queue->q_lock); | |
1806 | list_splice_init(&queue->q, work_list); | |
1807 | spin_unlock_irq(&queue->q_lock); | |
1808 | return !list_empty(work_list); | |
1809 | } | |
1810 | ||
a186e478 | 1811 | static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list) |
8c0785a5 LE |
1812 | { |
1813 | spin_lock_irq(&queue->q_lock); | |
1814 | if (!list_empty(&queue->q)) | |
1815 | list_move(queue->q.next, work_list); | |
1816 | spin_unlock_irq(&queue->q_lock); | |
1817 | return !list_empty(work_list); | |
1818 | } | |
1819 | ||
bde89a9e | 1820 | static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list) |
b6dd1a89 LE |
1821 | { |
1822 | DEFINE_WAIT(wait); | |
1823 | struct net_conf *nc; | |
1824 | int uncork, cork; | |
1825 | ||
1826 | dequeue_work_item(&connection->sender_work, work_list); | |
1827 | if (!list_empty(work_list)) | |
1828 | return; | |
1829 | ||
1830 | /* Still nothing to do? | |
1831 | * Maybe we still need to close the current epoch, | |
1832 | * even if no new requests are queued yet. | |
1833 | * | |
1834 | * Also, poke TCP, just in case. | |
1835 | * Then wait for new work (or signal). */ | |
1836 | rcu_read_lock(); | |
1837 | nc = rcu_dereference(connection->net_conf); | |
1838 | uncork = nc ? nc->tcp_cork : 0; | |
1839 | rcu_read_unlock(); | |
1840 | if (uncork) { | |
1841 | mutex_lock(&connection->data.mutex); | |
1842 | if (connection->data.socket) | |
1843 | drbd_tcp_uncork(connection->data.socket); | |
1844 | mutex_unlock(&connection->data.mutex); | |
1845 | } | |
1846 | ||
1847 | for (;;) { | |
1848 | int send_barrier; | |
1849 | prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); | |
1850 | spin_lock_irq(&connection->req_lock); | |
1851 | spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ | |
bc317a9e LE |
1852 | /* dequeue single item only, |
1853 | * we still use drbd_queue_work_front() in some places */ | |
1854 | if (!list_empty(&connection->sender_work.q)) | |
1855 | list_move(connection->sender_work.q.next, work_list); | |
b6dd1a89 LE |
1856 | spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ |
1857 | if (!list_empty(work_list) || signal_pending(current)) { | |
1858 | spin_unlock_irq(&connection->req_lock); | |
1859 | break; | |
1860 | } | |
1861 | send_barrier = need_to_send_barrier(connection); | |
1862 | spin_unlock_irq(&connection->req_lock); | |
1863 | if (send_barrier) { | |
1864 | drbd_send_barrier(connection); | |
1865 | connection->send.current_epoch_nr++; | |
1866 | } | |
1867 | schedule(); | |
1868 | /* may be woken up for other things but new work, too, | |
1869 | * e.g. if the current epoch got closed. | |
1870 | * In which case we send the barrier above. */ | |
1871 | } | |
1872 | finish_wait(&connection->sender_work.q_wait, &wait); | |
1873 | ||
1874 | /* someone may have changed the config while we have been waiting above. */ | |
1875 | rcu_read_lock(); | |
1876 | nc = rcu_dereference(connection->net_conf); | |
1877 | cork = nc ? nc->tcp_cork : 0; | |
1878 | rcu_read_unlock(); | |
1879 | mutex_lock(&connection->data.mutex); | |
1880 | if (connection->data.socket) { | |
1881 | if (cork) | |
1882 | drbd_tcp_cork(connection->data.socket); | |
1883 | else if (!uncork) | |
1884 | drbd_tcp_uncork(connection->data.socket); | |
1885 | } | |
1886 | mutex_unlock(&connection->data.mutex); | |
1887 | } | |
1888 | ||
b411b363 PR |
1889 | int drbd_worker(struct drbd_thread *thi) |
1890 | { | |
bde89a9e | 1891 | struct drbd_connection *connection = thi->connection; |
b411b363 | 1892 | struct drbd_work *w = NULL; |
b30ab791 | 1893 | struct drbd_device *device; |
b411b363 | 1894 | LIST_HEAD(work_list); |
8c0785a5 | 1895 | int vnr; |
b411b363 | 1896 | |
e77a0a5c | 1897 | while (get_t_state(thi) == RUNNING) { |
80822284 | 1898 | drbd_thread_current_set_cpu(thi); |
b411b363 | 1899 | |
8c0785a5 LE |
1900 | /* as long as we use drbd_queue_work_front(), |
1901 | * we may only dequeue single work items here, not batches. */ | |
1902 | if (list_empty(&work_list)) | |
bde89a9e | 1903 | wait_for_work(connection, &work_list); |
b411b363 | 1904 | |
8c0785a5 | 1905 | if (signal_pending(current)) { |
b411b363 | 1906 | flush_signals(current); |
19393e10 | 1907 | if (get_t_state(thi) == RUNNING) { |
bde89a9e | 1908 | conn_warn(connection, "Worker got an unexpected signal\n"); |
b411b363 | 1909 | continue; |
19393e10 | 1910 | } |
b411b363 PR |
1911 | break; |
1912 | } | |
1913 | ||
e77a0a5c | 1914 | if (get_t_state(thi) != RUNNING) |
b411b363 | 1915 | break; |
b411b363 | 1916 | |
8c0785a5 LE |
1917 | while (!list_empty(&work_list)) { |
1918 | w = list_first_entry(&work_list, struct drbd_work, list); | |
1919 | list_del_init(&w->list); | |
bde89a9e | 1920 | if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) |
8c0785a5 | 1921 | continue; |
bde89a9e AG |
1922 | if (connection->cstate >= C_WF_REPORT_PARAMS) |
1923 | conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); | |
b411b363 PR |
1924 | } |
1925 | } | |
b411b363 | 1926 | |
8c0785a5 | 1927 | do { |
b411b363 | 1928 | while (!list_empty(&work_list)) { |
8c0785a5 | 1929 | w = list_first_entry(&work_list, struct drbd_work, list); |
b411b363 | 1930 | list_del_init(&w->list); |
00d56944 | 1931 | w->cb(w, 1); |
b411b363 | 1932 | } |
bde89a9e | 1933 | dequeue_work_batch(&connection->sender_work, &work_list); |
8c0785a5 | 1934 | } while (!list_empty(&work_list)); |
b411b363 | 1935 | |
c141ebda | 1936 | rcu_read_lock(); |
bde89a9e | 1937 | idr_for_each_entry(&connection->volumes, device, vnr) { |
b30ab791 AG |
1938 | D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); |
1939 | kref_get(&device->kref); | |
c141ebda | 1940 | rcu_read_unlock(); |
b30ab791 | 1941 | drbd_device_cleanup(device); |
05a10ec7 | 1942 | kref_put(&device->kref, drbd_destroy_device); |
c141ebda | 1943 | rcu_read_lock(); |
0e29d163 | 1944 | } |
c141ebda | 1945 | rcu_read_unlock(); |
b411b363 PR |
1946 | |
1947 | return 0; | |
1948 | } |