]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | drbd_receiver.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | ||
26 | #include <linux/module.h> | |
27 | ||
28 | #include <asm/uaccess.h> | |
29 | #include <net/sock.h> | |
30 | ||
31 | #include <linux/drbd.h> | |
32 | #include <linux/fs.h> | |
33 | #include <linux/file.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/mm.h> | |
36 | #include <linux/memcontrol.h> | |
37 | #include <linux/mm_inline.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/pkt_sched.h> | |
40 | #define __KERNEL_SYSCALLS__ | |
41 | #include <linux/unistd.h> | |
42 | #include <linux/vmalloc.h> | |
43 | #include <linux/random.h> | |
44 | #include <linux/string.h> | |
45 | #include <linux/scatterlist.h> | |
46 | #include "drbd_int.h" | |
47 | #include "drbd_req.h" | |
48 | ||
49 | #include "drbd_vli.h" | |
50 | ||
51 | struct packet_info { | |
52 | enum drbd_packet cmd; | |
53 | int size; | |
54 | int vnr; | |
55 | }; | |
56 | ||
57 | enum finish_epoch { | |
58 | FE_STILL_LIVE, | |
59 | FE_DESTROYED, | |
60 | FE_RECYCLED, | |
61 | }; | |
62 | ||
63 | static int drbd_do_handshake(struct drbd_tconn *tconn); | |
64 | static int drbd_do_auth(struct drbd_tconn *tconn); | |
65 | static int drbd_disconnected(int vnr, void *p, void *data); | |
66 | ||
67 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); | |
68 | static int e_end_block(struct drbd_conf *, struct drbd_work *, int); | |
69 | ||
70 | ||
71 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | |
72 | ||
73 | /* | |
74 | * some helper functions to deal with single linked page lists, | |
75 | * page->private being our "next" pointer. | |
76 | */ | |
77 | ||
78 | /* If at least n pages are linked at head, get n pages off. | |
79 | * Otherwise, don't modify head, and return NULL. | |
80 | * Locking is the responsibility of the caller. | |
81 | */ | |
82 | static struct page *page_chain_del(struct page **head, int n) | |
83 | { | |
84 | struct page *page; | |
85 | struct page *tmp; | |
86 | ||
87 | BUG_ON(!n); | |
88 | BUG_ON(!head); | |
89 | ||
90 | page = *head; | |
91 | ||
92 | if (!page) | |
93 | return NULL; | |
94 | ||
95 | while (page) { | |
96 | tmp = page_chain_next(page); | |
97 | if (--n == 0) | |
98 | break; /* found sufficient pages */ | |
99 | if (tmp == NULL) | |
100 | /* insufficient pages, don't use any of them. */ | |
101 | return NULL; | |
102 | page = tmp; | |
103 | } | |
104 | ||
105 | /* add end of list marker for the returned list */ | |
106 | set_page_private(page, 0); | |
107 | /* actual return value, and adjustment of head */ | |
108 | page = *head; | |
109 | *head = tmp; | |
110 | return page; | |
111 | } | |
112 | ||
113 | /* may be used outside of locks to find the tail of a (usually short) | |
114 | * "private" page chain, before adding it back to a global chain head | |
115 | * with page_chain_add() under a spinlock. */ | |
116 | static struct page *page_chain_tail(struct page *page, int *len) | |
117 | { | |
118 | struct page *tmp; | |
119 | int i = 1; | |
120 | while ((tmp = page_chain_next(page))) | |
121 | ++i, page = tmp; | |
122 | if (len) | |
123 | *len = i; | |
124 | return page; | |
125 | } | |
126 | ||
127 | static int page_chain_free(struct page *page) | |
128 | { | |
129 | struct page *tmp; | |
130 | int i = 0; | |
131 | page_chain_for_each_safe(page, tmp) { | |
132 | put_page(page); | |
133 | ++i; | |
134 | } | |
135 | return i; | |
136 | } | |
137 | ||
138 | static void page_chain_add(struct page **head, | |
139 | struct page *chain_first, struct page *chain_last) | |
140 | { | |
141 | #if 1 | |
142 | struct page *tmp; | |
143 | tmp = page_chain_tail(chain_first, NULL); | |
144 | BUG_ON(tmp != chain_last); | |
145 | #endif | |
146 | ||
147 | /* add chain to head */ | |
148 | set_page_private(chain_last, (unsigned long)*head); | |
149 | *head = chain_first; | |
150 | } | |
151 | ||
152 | static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) | |
153 | { | |
154 | struct page *page = NULL; | |
155 | struct page *tmp = NULL; | |
156 | int i = 0; | |
157 | ||
158 | /* Yes, testing drbd_pp_vacant outside the lock is racy. | |
159 | * So what. It saves a spin_lock. */ | |
160 | if (drbd_pp_vacant >= number) { | |
161 | spin_lock(&drbd_pp_lock); | |
162 | page = page_chain_del(&drbd_pp_pool, number); | |
163 | if (page) | |
164 | drbd_pp_vacant -= number; | |
165 | spin_unlock(&drbd_pp_lock); | |
166 | if (page) | |
167 | return page; | |
168 | } | |
169 | ||
170 | /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD | |
171 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
172 | * which in turn might block on the other node at this very place. */ | |
173 | for (i = 0; i < number; i++) { | |
174 | tmp = alloc_page(GFP_TRY); | |
175 | if (!tmp) | |
176 | break; | |
177 | set_page_private(tmp, (unsigned long)page); | |
178 | page = tmp; | |
179 | } | |
180 | ||
181 | if (i == number) | |
182 | return page; | |
183 | ||
184 | /* Not enough pages immediately available this time. | |
185 | * No need to jump around here, drbd_pp_alloc will retry this | |
186 | * function "soon". */ | |
187 | if (page) { | |
188 | tmp = page_chain_tail(page, NULL); | |
189 | spin_lock(&drbd_pp_lock); | |
190 | page_chain_add(&drbd_pp_pool, page, tmp); | |
191 | drbd_pp_vacant += i; | |
192 | spin_unlock(&drbd_pp_lock); | |
193 | } | |
194 | return NULL; | |
195 | } | |
196 | ||
197 | static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) | |
198 | { | |
199 | struct drbd_peer_request *peer_req; | |
200 | struct list_head *le, *tle; | |
201 | ||
202 | /* The EEs are always appended to the end of the list. Since | |
203 | they are sent in order over the wire, they have to finish | |
204 | in order. As soon as we see the first not finished we can | |
205 | stop to examine the list... */ | |
206 | ||
207 | list_for_each_safe(le, tle, &mdev->net_ee) { | |
208 | peer_req = list_entry(le, struct drbd_peer_request, w.list); | |
209 | if (drbd_ee_has_active_page(peer_req)) | |
210 | break; | |
211 | list_move(le, to_be_freed); | |
212 | } | |
213 | } | |
214 | ||
215 | static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) | |
216 | { | |
217 | LIST_HEAD(reclaimed); | |
218 | struct drbd_peer_request *peer_req, *t; | |
219 | ||
220 | spin_lock_irq(&mdev->tconn->req_lock); | |
221 | reclaim_net_ee(mdev, &reclaimed); | |
222 | spin_unlock_irq(&mdev->tconn->req_lock); | |
223 | ||
224 | list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) | |
225 | drbd_free_net_ee(mdev, peer_req); | |
226 | } | |
227 | ||
228 | /** | |
229 | * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) | |
230 | * @mdev: DRBD device. | |
231 | * @number: number of pages requested | |
232 | * @retry: whether to retry, if not enough pages are available right now | |
233 | * | |
234 | * Tries to allocate number pages, first from our own page pool, then from | |
235 | * the kernel, unless this allocation would exceed the max_buffers setting. | |
236 | * Possibly retry until DRBD frees sufficient pages somewhere else. | |
237 | * | |
238 | * Returns a page chain linked via page->private. | |
239 | */ | |
240 | static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) | |
241 | { | |
242 | struct page *page = NULL; | |
243 | DEFINE_WAIT(wait); | |
244 | ||
245 | /* Yes, we may run up to @number over max_buffers. If we | |
246 | * follow it strictly, the admin will get it wrong anyways. */ | |
247 | if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) | |
248 | page = drbd_pp_first_pages_or_try_alloc(mdev, number); | |
249 | ||
250 | while (page == NULL) { | |
251 | prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); | |
252 | ||
253 | drbd_kick_lo_and_reclaim_net(mdev); | |
254 | ||
255 | if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) { | |
256 | page = drbd_pp_first_pages_or_try_alloc(mdev, number); | |
257 | if (page) | |
258 | break; | |
259 | } | |
260 | ||
261 | if (!retry) | |
262 | break; | |
263 | ||
264 | if (signal_pending(current)) { | |
265 | dev_warn(DEV, "drbd_pp_alloc interrupted!\n"); | |
266 | break; | |
267 | } | |
268 | ||
269 | schedule(); | |
270 | } | |
271 | finish_wait(&drbd_pp_wait, &wait); | |
272 | ||
273 | if (page) | |
274 | atomic_add(number, &mdev->pp_in_use); | |
275 | return page; | |
276 | } | |
277 | ||
278 | /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. | |
279 | * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock); | |
280 | * Either links the page chain back to the global pool, | |
281 | * or returns all pages to the system. */ | |
282 | static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) | |
283 | { | |
284 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; | |
285 | int i; | |
286 | ||
287 | if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) | |
288 | i = page_chain_free(page); | |
289 | else { | |
290 | struct page *tmp; | |
291 | tmp = page_chain_tail(page, &i); | |
292 | spin_lock(&drbd_pp_lock); | |
293 | page_chain_add(&drbd_pp_pool, page, tmp); | |
294 | drbd_pp_vacant += i; | |
295 | spin_unlock(&drbd_pp_lock); | |
296 | } | |
297 | i = atomic_sub_return(i, a); | |
298 | if (i < 0) | |
299 | dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", | |
300 | is_net ? "pp_in_use_by_net" : "pp_in_use", i); | |
301 | wake_up(&drbd_pp_wait); | |
302 | } | |
303 | ||
304 | /* | |
305 | You need to hold the req_lock: | |
306 | _drbd_wait_ee_list_empty() | |
307 | ||
308 | You must not have the req_lock: | |
309 | drbd_free_ee() | |
310 | drbd_alloc_ee() | |
311 | drbd_init_ee() | |
312 | drbd_release_ee() | |
313 | drbd_ee_fix_bhs() | |
314 | drbd_process_done_ee() | |
315 | drbd_clear_done_ee() | |
316 | drbd_wait_ee_list_empty() | |
317 | */ | |
318 | ||
319 | struct drbd_peer_request * | |
320 | drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector, | |
321 | unsigned int data_size, gfp_t gfp_mask) __must_hold(local) | |
322 | { | |
323 | struct drbd_peer_request *peer_req; | |
324 | struct page *page; | |
325 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; | |
326 | ||
327 | if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) | |
328 | return NULL; | |
329 | ||
330 | peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); | |
331 | if (!peer_req) { | |
332 | if (!(gfp_mask & __GFP_NOWARN)) | |
333 | dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); | |
338 | if (!page) | |
339 | goto fail; | |
340 | ||
341 | drbd_clear_interval(&peer_req->i); | |
342 | peer_req->i.size = data_size; | |
343 | peer_req->i.sector = sector; | |
344 | peer_req->i.local = false; | |
345 | peer_req->i.waiting = false; | |
346 | ||
347 | peer_req->epoch = NULL; | |
348 | peer_req->w.mdev = mdev; | |
349 | peer_req->pages = page; | |
350 | atomic_set(&peer_req->pending_bios, 0); | |
351 | peer_req->flags = 0; | |
352 | /* | |
353 | * The block_id is opaque to the receiver. It is not endianness | |
354 | * converted, and sent back to the sender unchanged. | |
355 | */ | |
356 | peer_req->block_id = id; | |
357 | ||
358 | return peer_req; | |
359 | ||
360 | fail: | |
361 | mempool_free(peer_req, drbd_ee_mempool); | |
362 | return NULL; | |
363 | } | |
364 | ||
365 | void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, | |
366 | int is_net) | |
367 | { | |
368 | if (peer_req->flags & EE_HAS_DIGEST) | |
369 | kfree(peer_req->digest); | |
370 | drbd_pp_free(mdev, peer_req->pages, is_net); | |
371 | D_ASSERT(atomic_read(&peer_req->pending_bios) == 0); | |
372 | D_ASSERT(drbd_interval_empty(&peer_req->i)); | |
373 | mempool_free(peer_req, drbd_ee_mempool); | |
374 | } | |
375 | ||
376 | int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) | |
377 | { | |
378 | LIST_HEAD(work_list); | |
379 | struct drbd_peer_request *peer_req, *t; | |
380 | int count = 0; | |
381 | int is_net = list == &mdev->net_ee; | |
382 | ||
383 | spin_lock_irq(&mdev->tconn->req_lock); | |
384 | list_splice_init(list, &work_list); | |
385 | spin_unlock_irq(&mdev->tconn->req_lock); | |
386 | ||
387 | list_for_each_entry_safe(peer_req, t, &work_list, w.list) { | |
388 | drbd_free_some_ee(mdev, peer_req, is_net); | |
389 | count++; | |
390 | } | |
391 | return count; | |
392 | } | |
393 | ||
394 | ||
395 | /* | |
396 | * This function is called from _asender only_ | |
397 | * but see also comments in _req_mod(,BARRIER_ACKED) | |
398 | * and receive_Barrier. | |
399 | * | |
400 | * Move entries from net_ee to done_ee, if ready. | |
401 | * Grab done_ee, call all callbacks, free the entries. | |
402 | * The callbacks typically send out ACKs. | |
403 | */ | |
404 | static int drbd_process_done_ee(struct drbd_conf *mdev) | |
405 | { | |
406 | LIST_HEAD(work_list); | |
407 | LIST_HEAD(reclaimed); | |
408 | struct drbd_peer_request *peer_req, *t; | |
409 | int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); | |
410 | ||
411 | spin_lock_irq(&mdev->tconn->req_lock); | |
412 | reclaim_net_ee(mdev, &reclaimed); | |
413 | list_splice_init(&mdev->done_ee, &work_list); | |
414 | spin_unlock_irq(&mdev->tconn->req_lock); | |
415 | ||
416 | list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) | |
417 | drbd_free_net_ee(mdev, peer_req); | |
418 | ||
419 | /* possible callbacks here: | |
420 | * e_end_block, and e_end_resync_block, e_send_discard_ack. | |
421 | * all ignore the last argument. | |
422 | */ | |
423 | list_for_each_entry_safe(peer_req, t, &work_list, w.list) { | |
424 | /* list_del not necessary, next/prev members not touched */ | |
425 | ok = peer_req->w.cb(mdev, &peer_req->w, !ok) && ok; | |
426 | drbd_free_ee(mdev, peer_req); | |
427 | } | |
428 | wake_up(&mdev->ee_wait); | |
429 | ||
430 | return ok; | |
431 | } | |
432 | ||
433 | void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) | |
434 | { | |
435 | DEFINE_WAIT(wait); | |
436 | ||
437 | /* avoids spin_lock/unlock | |
438 | * and calling prepare_to_wait in the fast path */ | |
439 | while (!list_empty(head)) { | |
440 | prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); | |
441 | spin_unlock_irq(&mdev->tconn->req_lock); | |
442 | io_schedule(); | |
443 | finish_wait(&mdev->ee_wait, &wait); | |
444 | spin_lock_irq(&mdev->tconn->req_lock); | |
445 | } | |
446 | } | |
447 | ||
448 | void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) | |
449 | { | |
450 | spin_lock_irq(&mdev->tconn->req_lock); | |
451 | _drbd_wait_ee_list_empty(mdev, head); | |
452 | spin_unlock_irq(&mdev->tconn->req_lock); | |
453 | } | |
454 | ||
455 | /* see also kernel_accept; which is only present since 2.6.18. | |
456 | * also we want to log which part of it failed, exactly */ | |
457 | static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock) | |
458 | { | |
459 | struct sock *sk = sock->sk; | |
460 | int err = 0; | |
461 | ||
462 | *what = "listen"; | |
463 | err = sock->ops->listen(sock, 5); | |
464 | if (err < 0) | |
465 | goto out; | |
466 | ||
467 | *what = "sock_create_lite"; | |
468 | err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, | |
469 | newsock); | |
470 | if (err < 0) | |
471 | goto out; | |
472 | ||
473 | *what = "accept"; | |
474 | err = sock->ops->accept(sock, *newsock, 0); | |
475 | if (err < 0) { | |
476 | sock_release(*newsock); | |
477 | *newsock = NULL; | |
478 | goto out; | |
479 | } | |
480 | (*newsock)->ops = sock->ops; | |
481 | ||
482 | out: | |
483 | return err; | |
484 | } | |
485 | ||
486 | static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) | |
487 | { | |
488 | mm_segment_t oldfs; | |
489 | struct kvec iov = { | |
490 | .iov_base = buf, | |
491 | .iov_len = size, | |
492 | }; | |
493 | struct msghdr msg = { | |
494 | .msg_iovlen = 1, | |
495 | .msg_iov = (struct iovec *)&iov, | |
496 | .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) | |
497 | }; | |
498 | int rv; | |
499 | ||
500 | oldfs = get_fs(); | |
501 | set_fs(KERNEL_DS); | |
502 | rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); | |
503 | set_fs(oldfs); | |
504 | ||
505 | return rv; | |
506 | } | |
507 | ||
508 | static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size) | |
509 | { | |
510 | mm_segment_t oldfs; | |
511 | struct kvec iov = { | |
512 | .iov_base = buf, | |
513 | .iov_len = size, | |
514 | }; | |
515 | struct msghdr msg = { | |
516 | .msg_iovlen = 1, | |
517 | .msg_iov = (struct iovec *)&iov, | |
518 | .msg_flags = MSG_WAITALL | MSG_NOSIGNAL | |
519 | }; | |
520 | int rv; | |
521 | ||
522 | oldfs = get_fs(); | |
523 | set_fs(KERNEL_DS); | |
524 | ||
525 | for (;;) { | |
526 | rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags); | |
527 | if (rv == size) | |
528 | break; | |
529 | ||
530 | /* Note: | |
531 | * ECONNRESET other side closed the connection | |
532 | * ERESTARTSYS (on sock) we got a signal | |
533 | */ | |
534 | ||
535 | if (rv < 0) { | |
536 | if (rv == -ECONNRESET) | |
537 | conn_info(tconn, "sock was reset by peer\n"); | |
538 | else if (rv != -ERESTARTSYS) | |
539 | conn_err(tconn, "sock_recvmsg returned %d\n", rv); | |
540 | break; | |
541 | } else if (rv == 0) { | |
542 | conn_info(tconn, "sock was shut down by peer\n"); | |
543 | break; | |
544 | } else { | |
545 | /* signal came in, or peer/link went down, | |
546 | * after we read a partial message | |
547 | */ | |
548 | /* D_ASSERT(signal_pending(current)); */ | |
549 | break; | |
550 | } | |
551 | }; | |
552 | ||
553 | set_fs(oldfs); | |
554 | ||
555 | if (rv != size) | |
556 | drbd_force_state(tconn->volume0, NS(conn, C_BROKEN_PIPE)); | |
557 | ||
558 | return rv; | |
559 | } | |
560 | ||
561 | /* quoting tcp(7): | |
562 | * On individual connections, the socket buffer size must be set prior to the | |
563 | * listen(2) or connect(2) calls in order to have it take effect. | |
564 | * This is our wrapper to do so. | |
565 | */ | |
566 | static void drbd_setbufsize(struct socket *sock, unsigned int snd, | |
567 | unsigned int rcv) | |
568 | { | |
569 | /* open coded SO_SNDBUF, SO_RCVBUF */ | |
570 | if (snd) { | |
571 | sock->sk->sk_sndbuf = snd; | |
572 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
573 | } | |
574 | if (rcv) { | |
575 | sock->sk->sk_rcvbuf = rcv; | |
576 | sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
577 | } | |
578 | } | |
579 | ||
580 | static struct socket *drbd_try_connect(struct drbd_tconn *tconn) | |
581 | { | |
582 | const char *what; | |
583 | struct socket *sock; | |
584 | struct sockaddr_in6 src_in6; | |
585 | int err; | |
586 | int disconnect_on_error = 1; | |
587 | ||
588 | if (!get_net_conf(tconn)) | |
589 | return NULL; | |
590 | ||
591 | what = "sock_create_kern"; | |
592 | err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family, | |
593 | SOCK_STREAM, IPPROTO_TCP, &sock); | |
594 | if (err < 0) { | |
595 | sock = NULL; | |
596 | goto out; | |
597 | } | |
598 | ||
599 | sock->sk->sk_rcvtimeo = | |
600 | sock->sk->sk_sndtimeo = tconn->net_conf->try_connect_int*HZ; | |
601 | drbd_setbufsize(sock, tconn->net_conf->sndbuf_size, | |
602 | tconn->net_conf->rcvbuf_size); | |
603 | ||
604 | /* explicitly bind to the configured IP as source IP | |
605 | * for the outgoing connections. | |
606 | * This is needed for multihomed hosts and to be | |
607 | * able to use lo: interfaces for drbd. | |
608 | * Make sure to use 0 as port number, so linux selects | |
609 | * a free one dynamically. | |
610 | */ | |
611 | memcpy(&src_in6, tconn->net_conf->my_addr, | |
612 | min_t(int, tconn->net_conf->my_addr_len, sizeof(src_in6))); | |
613 | if (((struct sockaddr *)tconn->net_conf->my_addr)->sa_family == AF_INET6) | |
614 | src_in6.sin6_port = 0; | |
615 | else | |
616 | ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ | |
617 | ||
618 | what = "bind before connect"; | |
619 | err = sock->ops->bind(sock, | |
620 | (struct sockaddr *) &src_in6, | |
621 | tconn->net_conf->my_addr_len); | |
622 | if (err < 0) | |
623 | goto out; | |
624 | ||
625 | /* connect may fail, peer not yet available. | |
626 | * stay C_WF_CONNECTION, don't go Disconnecting! */ | |
627 | disconnect_on_error = 0; | |
628 | what = "connect"; | |
629 | err = sock->ops->connect(sock, | |
630 | (struct sockaddr *)tconn->net_conf->peer_addr, | |
631 | tconn->net_conf->peer_addr_len, 0); | |
632 | ||
633 | out: | |
634 | if (err < 0) { | |
635 | if (sock) { | |
636 | sock_release(sock); | |
637 | sock = NULL; | |
638 | } | |
639 | switch (-err) { | |
640 | /* timeout, busy, signal pending */ | |
641 | case ETIMEDOUT: case EAGAIN: case EINPROGRESS: | |
642 | case EINTR: case ERESTARTSYS: | |
643 | /* peer not (yet) available, network problem */ | |
644 | case ECONNREFUSED: case ENETUNREACH: | |
645 | case EHOSTDOWN: case EHOSTUNREACH: | |
646 | disconnect_on_error = 0; | |
647 | break; | |
648 | default: | |
649 | conn_err(tconn, "%s failed, err = %d\n", what, err); | |
650 | } | |
651 | if (disconnect_on_error) | |
652 | drbd_force_state(tconn->volume0, NS(conn, C_DISCONNECTING)); | |
653 | } | |
654 | put_net_conf(tconn); | |
655 | return sock; | |
656 | } | |
657 | ||
658 | static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn) | |
659 | { | |
660 | int timeo, err; | |
661 | struct socket *s_estab = NULL, *s_listen; | |
662 | const char *what; | |
663 | ||
664 | if (!get_net_conf(tconn)) | |
665 | return NULL; | |
666 | ||
667 | what = "sock_create_kern"; | |
668 | err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family, | |
669 | SOCK_STREAM, IPPROTO_TCP, &s_listen); | |
670 | if (err) { | |
671 | s_listen = NULL; | |
672 | goto out; | |
673 | } | |
674 | ||
675 | timeo = tconn->net_conf->try_connect_int * HZ; | |
676 | timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ | |
677 | ||
678 | s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ | |
679 | s_listen->sk->sk_rcvtimeo = timeo; | |
680 | s_listen->sk->sk_sndtimeo = timeo; | |
681 | drbd_setbufsize(s_listen, tconn->net_conf->sndbuf_size, | |
682 | tconn->net_conf->rcvbuf_size); | |
683 | ||
684 | what = "bind before listen"; | |
685 | err = s_listen->ops->bind(s_listen, | |
686 | (struct sockaddr *) tconn->net_conf->my_addr, | |
687 | tconn->net_conf->my_addr_len); | |
688 | if (err < 0) | |
689 | goto out; | |
690 | ||
691 | err = drbd_accept(&what, s_listen, &s_estab); | |
692 | ||
693 | out: | |
694 | if (s_listen) | |
695 | sock_release(s_listen); | |
696 | if (err < 0) { | |
697 | if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { | |
698 | conn_err(tconn, "%s failed, err = %d\n", what, err); | |
699 | drbd_force_state(tconn->volume0, NS(conn, C_DISCONNECTING)); | |
700 | } | |
701 | } | |
702 | put_net_conf(tconn); | |
703 | ||
704 | return s_estab; | |
705 | } | |
706 | ||
707 | static int drbd_send_fp(struct drbd_tconn *tconn, struct socket *sock, enum drbd_packet cmd) | |
708 | { | |
709 | struct p_header *h = &tconn->data.sbuf.header; | |
710 | ||
711 | return _conn_send_cmd(tconn, 0, sock, cmd, h, sizeof(*h), 0); | |
712 | } | |
713 | ||
714 | static enum drbd_packet drbd_recv_fp(struct drbd_tconn *tconn, struct socket *sock) | |
715 | { | |
716 | struct p_header80 *h = &tconn->data.rbuf.header.h80; | |
717 | int rr; | |
718 | ||
719 | rr = drbd_recv_short(sock, h, sizeof(*h), 0); | |
720 | ||
721 | if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC)) | |
722 | return be16_to_cpu(h->command); | |
723 | ||
724 | return 0xffff; | |
725 | } | |
726 | ||
727 | /** | |
728 | * drbd_socket_okay() - Free the socket if its connection is not okay | |
729 | * @sock: pointer to the pointer to the socket. | |
730 | */ | |
731 | static int drbd_socket_okay(struct socket **sock) | |
732 | { | |
733 | int rr; | |
734 | char tb[4]; | |
735 | ||
736 | if (!*sock) | |
737 | return false; | |
738 | ||
739 | rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); | |
740 | ||
741 | if (rr > 0 || rr == -EAGAIN) { | |
742 | return true; | |
743 | } else { | |
744 | sock_release(*sock); | |
745 | *sock = NULL; | |
746 | return false; | |
747 | } | |
748 | } | |
749 | ||
750 | static int drbd_connected(int vnr, void *p, void *data) | |
751 | { | |
752 | struct drbd_conf *mdev = (struct drbd_conf *)p; | |
753 | int ok = 1; | |
754 | ||
755 | atomic_set(&mdev->packet_seq, 0); | |
756 | mdev->peer_seq = 0; | |
757 | ||
758 | ok &= drbd_send_sync_param(mdev, &mdev->sync_conf); | |
759 | ok &= drbd_send_sizes(mdev, 0, 0); | |
760 | ok &= drbd_send_uuids(mdev); | |
761 | ok &= drbd_send_state(mdev); | |
762 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | |
763 | clear_bit(RESIZE_PENDING, &mdev->flags); | |
764 | ||
765 | return !ok; | |
766 | } | |
767 | ||
768 | /* | |
769 | * return values: | |
770 | * 1 yes, we have a valid connection | |
771 | * 0 oops, did not work out, please try again | |
772 | * -1 peer talks different language, | |
773 | * no point in trying again, please go standalone. | |
774 | * -2 We do not have a network config... | |
775 | */ | |
776 | static int drbd_connect(struct drbd_tconn *tconn) | |
777 | { | |
778 | struct socket *s, *sock, *msock; | |
779 | int try, h, ok; | |
780 | ||
781 | if (drbd_request_state(tconn->volume0, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) | |
782 | return -2; | |
783 | ||
784 | clear_bit(DISCARD_CONCURRENT, &tconn->flags); | |
785 | tconn->agreed_pro_version = 99; | |
786 | /* agreed_pro_version must be smaller than 100 so we send the old | |
787 | header (h80) in the first packet and in the handshake packet. */ | |
788 | ||
789 | sock = NULL; | |
790 | msock = NULL; | |
791 | ||
792 | do { | |
793 | for (try = 0;;) { | |
794 | /* 3 tries, this should take less than a second! */ | |
795 | s = drbd_try_connect(tconn); | |
796 | if (s || ++try >= 3) | |
797 | break; | |
798 | /* give the other side time to call bind() & listen() */ | |
799 | schedule_timeout_interruptible(HZ / 10); | |
800 | } | |
801 | ||
802 | if (s) { | |
803 | if (!sock) { | |
804 | drbd_send_fp(tconn, s, P_HAND_SHAKE_S); | |
805 | sock = s; | |
806 | s = NULL; | |
807 | } else if (!msock) { | |
808 | drbd_send_fp(tconn, s, P_HAND_SHAKE_M); | |
809 | msock = s; | |
810 | s = NULL; | |
811 | } else { | |
812 | conn_err(tconn, "Logic error in drbd_connect()\n"); | |
813 | goto out_release_sockets; | |
814 | } | |
815 | } | |
816 | ||
817 | if (sock && msock) { | |
818 | schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10); | |
819 | ok = drbd_socket_okay(&sock); | |
820 | ok = drbd_socket_okay(&msock) && ok; | |
821 | if (ok) | |
822 | break; | |
823 | } | |
824 | ||
825 | retry: | |
826 | s = drbd_wait_for_connect(tconn); | |
827 | if (s) { | |
828 | try = drbd_recv_fp(tconn, s); | |
829 | drbd_socket_okay(&sock); | |
830 | drbd_socket_okay(&msock); | |
831 | switch (try) { | |
832 | case P_HAND_SHAKE_S: | |
833 | if (sock) { | |
834 | conn_warn(tconn, "initial packet S crossed\n"); | |
835 | sock_release(sock); | |
836 | } | |
837 | sock = s; | |
838 | break; | |
839 | case P_HAND_SHAKE_M: | |
840 | if (msock) { | |
841 | conn_warn(tconn, "initial packet M crossed\n"); | |
842 | sock_release(msock); | |
843 | } | |
844 | msock = s; | |
845 | set_bit(DISCARD_CONCURRENT, &tconn->flags); | |
846 | break; | |
847 | default: | |
848 | conn_warn(tconn, "Error receiving initial packet\n"); | |
849 | sock_release(s); | |
850 | if (random32() & 1) | |
851 | goto retry; | |
852 | } | |
853 | } | |
854 | ||
855 | if (tconn->volume0->state.conn <= C_DISCONNECTING) | |
856 | goto out_release_sockets; | |
857 | if (signal_pending(current)) { | |
858 | flush_signals(current); | |
859 | smp_rmb(); | |
860 | if (get_t_state(&tconn->receiver) == EXITING) | |
861 | goto out_release_sockets; | |
862 | } | |
863 | ||
864 | if (sock && msock) { | |
865 | ok = drbd_socket_okay(&sock); | |
866 | ok = drbd_socket_okay(&msock) && ok; | |
867 | if (ok) | |
868 | break; | |
869 | } | |
870 | } while (1); | |
871 | ||
872 | msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ | |
873 | sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ | |
874 | ||
875 | sock->sk->sk_allocation = GFP_NOIO; | |
876 | msock->sk->sk_allocation = GFP_NOIO; | |
877 | ||
878 | sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; | |
879 | msock->sk->sk_priority = TC_PRIO_INTERACTIVE; | |
880 | ||
881 | /* NOT YET ... | |
882 | * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10; | |
883 | * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; | |
884 | * first set it to the P_HAND_SHAKE timeout, | |
885 | * which we set to 4x the configured ping_timeout. */ | |
886 | sock->sk->sk_sndtimeo = | |
887 | sock->sk->sk_rcvtimeo = tconn->net_conf->ping_timeo*4*HZ/10; | |
888 | ||
889 | msock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10; | |
890 | msock->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ; | |
891 | ||
892 | /* we don't want delays. | |
893 | * we use TCP_CORK where appropriate, though */ | |
894 | drbd_tcp_nodelay(sock); | |
895 | drbd_tcp_nodelay(msock); | |
896 | ||
897 | tconn->data.socket = sock; | |
898 | tconn->meta.socket = msock; | |
899 | tconn->last_received = jiffies; | |
900 | ||
901 | h = drbd_do_handshake(tconn); | |
902 | if (h <= 0) | |
903 | return h; | |
904 | ||
905 | if (tconn->cram_hmac_tfm) { | |
906 | /* drbd_request_state(mdev, NS(conn, WFAuth)); */ | |
907 | switch (drbd_do_auth(tconn)) { | |
908 | case -1: | |
909 | conn_err(tconn, "Authentication of peer failed\n"); | |
910 | return -1; | |
911 | case 0: | |
912 | conn_err(tconn, "Authentication of peer failed, trying again.\n"); | |
913 | return 0; | |
914 | } | |
915 | } | |
916 | ||
917 | if (drbd_request_state(tconn->volume0, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS) | |
918 | return 0; | |
919 | ||
920 | sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10; | |
921 | sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; | |
922 | ||
923 | drbd_thread_start(&tconn->asender); | |
924 | ||
925 | if (drbd_send_protocol(tconn) == -1) | |
926 | return -1; | |
927 | ||
928 | return !idr_for_each(&tconn->volumes, drbd_connected, tconn); | |
929 | ||
930 | out_release_sockets: | |
931 | if (sock) | |
932 | sock_release(sock); | |
933 | if (msock) | |
934 | sock_release(msock); | |
935 | return -1; | |
936 | } | |
937 | ||
938 | static bool decode_header(struct drbd_tconn *tconn, struct p_header *h, struct packet_info *pi) | |
939 | { | |
940 | if (h->h80.magic == cpu_to_be32(DRBD_MAGIC)) { | |
941 | pi->cmd = be16_to_cpu(h->h80.command); | |
942 | pi->size = be16_to_cpu(h->h80.length); | |
943 | pi->vnr = 0; | |
944 | } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) { | |
945 | pi->cmd = be16_to_cpu(h->h95.command); | |
946 | pi->size = be32_to_cpu(h->h95.length) & 0x00ffffff; | |
947 | pi->vnr = 0; | |
948 | } else { | |
949 | conn_err(tconn, "magic?? on data m: 0x%08x c: %d l: %d\n", | |
950 | be32_to_cpu(h->h80.magic), | |
951 | be16_to_cpu(h->h80.command), | |
952 | be16_to_cpu(h->h80.length)); | |
953 | return false; | |
954 | } | |
955 | return true; | |
956 | } | |
957 | ||
958 | static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi) | |
959 | { | |
960 | struct p_header *h = &tconn->data.rbuf.header; | |
961 | int r; | |
962 | ||
963 | r = drbd_recv(tconn, h, sizeof(*h)); | |
964 | if (unlikely(r != sizeof(*h))) { | |
965 | if (!signal_pending(current)) | |
966 | conn_warn(tconn, "short read expecting header on sock: r=%d\n", r); | |
967 | return false; | |
968 | } | |
969 | ||
970 | r = decode_header(tconn, h, pi); | |
971 | tconn->last_received = jiffies; | |
972 | ||
973 | return r; | |
974 | } | |
975 | ||
976 | static void drbd_flush(struct drbd_conf *mdev) | |
977 | { | |
978 | int rv; | |
979 | ||
980 | if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { | |
981 | rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, | |
982 | NULL); | |
983 | if (rv) { | |
984 | dev_err(DEV, "local disk flush failed with status %d\n", rv); | |
985 | /* would rather check on EOPNOTSUPP, but that is not reliable. | |
986 | * don't try again for ANY return value != 0 | |
987 | * if (rv == -EOPNOTSUPP) */ | |
988 | drbd_bump_write_ordering(mdev, WO_drain_io); | |
989 | } | |
990 | put_ldev(mdev); | |
991 | } | |
992 | } | |
993 | ||
994 | /** | |
995 | * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. | |
996 | * @mdev: DRBD device. | |
997 | * @epoch: Epoch object. | |
998 | * @ev: Epoch event. | |
999 | */ | |
1000 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, | |
1001 | struct drbd_epoch *epoch, | |
1002 | enum epoch_event ev) | |
1003 | { | |
1004 | int epoch_size; | |
1005 | struct drbd_epoch *next_epoch; | |
1006 | enum finish_epoch rv = FE_STILL_LIVE; | |
1007 | ||
1008 | spin_lock(&mdev->epoch_lock); | |
1009 | do { | |
1010 | next_epoch = NULL; | |
1011 | ||
1012 | epoch_size = atomic_read(&epoch->epoch_size); | |
1013 | ||
1014 | switch (ev & ~EV_CLEANUP) { | |
1015 | case EV_PUT: | |
1016 | atomic_dec(&epoch->active); | |
1017 | break; | |
1018 | case EV_GOT_BARRIER_NR: | |
1019 | set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); | |
1020 | break; | |
1021 | case EV_BECAME_LAST: | |
1022 | /* nothing to do*/ | |
1023 | break; | |
1024 | } | |
1025 | ||
1026 | if (epoch_size != 0 && | |
1027 | atomic_read(&epoch->active) == 0 && | |
1028 | test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) { | |
1029 | if (!(ev & EV_CLEANUP)) { | |
1030 | spin_unlock(&mdev->epoch_lock); | |
1031 | drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); | |
1032 | spin_lock(&mdev->epoch_lock); | |
1033 | } | |
1034 | dec_unacked(mdev); | |
1035 | ||
1036 | if (mdev->current_epoch != epoch) { | |
1037 | next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); | |
1038 | list_del(&epoch->list); | |
1039 | ev = EV_BECAME_LAST | (ev & EV_CLEANUP); | |
1040 | mdev->epochs--; | |
1041 | kfree(epoch); | |
1042 | ||
1043 | if (rv == FE_STILL_LIVE) | |
1044 | rv = FE_DESTROYED; | |
1045 | } else { | |
1046 | epoch->flags = 0; | |
1047 | atomic_set(&epoch->epoch_size, 0); | |
1048 | /* atomic_set(&epoch->active, 0); is already zero */ | |
1049 | if (rv == FE_STILL_LIVE) | |
1050 | rv = FE_RECYCLED; | |
1051 | wake_up(&mdev->ee_wait); | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | if (!next_epoch) | |
1056 | break; | |
1057 | ||
1058 | epoch = next_epoch; | |
1059 | } while (1); | |
1060 | ||
1061 | spin_unlock(&mdev->epoch_lock); | |
1062 | ||
1063 | return rv; | |
1064 | } | |
1065 | ||
1066 | /** | |
1067 | * drbd_bump_write_ordering() - Fall back to an other write ordering method | |
1068 | * @mdev: DRBD device. | |
1069 | * @wo: Write ordering method to try. | |
1070 | */ | |
1071 | void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local) | |
1072 | { | |
1073 | enum write_ordering_e pwo; | |
1074 | static char *write_ordering_str[] = { | |
1075 | [WO_none] = "none", | |
1076 | [WO_drain_io] = "drain", | |
1077 | [WO_bdev_flush] = "flush", | |
1078 | }; | |
1079 | ||
1080 | pwo = mdev->write_ordering; | |
1081 | wo = min(pwo, wo); | |
1082 | if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) | |
1083 | wo = WO_drain_io; | |
1084 | if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) | |
1085 | wo = WO_none; | |
1086 | mdev->write_ordering = wo; | |
1087 | if (pwo != mdev->write_ordering || wo == WO_bdev_flush) | |
1088 | dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); | |
1089 | } | |
1090 | ||
1091 | /** | |
1092 | * drbd_submit_ee() | |
1093 | * @mdev: DRBD device. | |
1094 | * @peer_req: peer request | |
1095 | * @rw: flag field, see bio->bi_rw | |
1096 | * | |
1097 | * May spread the pages to multiple bios, | |
1098 | * depending on bio_add_page restrictions. | |
1099 | * | |
1100 | * Returns 0 if all bios have been submitted, | |
1101 | * -ENOMEM if we could not allocate enough bios, | |
1102 | * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a | |
1103 | * single page to an empty bio (which should never happen and likely indicates | |
1104 | * that the lower level IO stack is in some way broken). This has been observed | |
1105 | * on certain Xen deployments. | |
1106 | */ | |
1107 | /* TODO allocate from our own bio_set. */ | |
1108 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, | |
1109 | const unsigned rw, const int fault_type) | |
1110 | { | |
1111 | struct bio *bios = NULL; | |
1112 | struct bio *bio; | |
1113 | struct page *page = peer_req->pages; | |
1114 | sector_t sector = peer_req->i.sector; | |
1115 | unsigned ds = peer_req->i.size; | |
1116 | unsigned n_bios = 0; | |
1117 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; | |
1118 | int err = -ENOMEM; | |
1119 | ||
1120 | /* In most cases, we will only need one bio. But in case the lower | |
1121 | * level restrictions happen to be different at this offset on this | |
1122 | * side than those of the sending peer, we may need to submit the | |
1123 | * request in more than one bio. */ | |
1124 | next_bio: | |
1125 | bio = bio_alloc(GFP_NOIO, nr_pages); | |
1126 | if (!bio) { | |
1127 | dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); | |
1128 | goto fail; | |
1129 | } | |
1130 | /* > peer_req->i.sector, unless this is the first bio */ | |
1131 | bio->bi_sector = sector; | |
1132 | bio->bi_bdev = mdev->ldev->backing_bdev; | |
1133 | bio->bi_rw = rw; | |
1134 | bio->bi_private = peer_req; | |
1135 | bio->bi_end_io = drbd_endio_sec; | |
1136 | ||
1137 | bio->bi_next = bios; | |
1138 | bios = bio; | |
1139 | ++n_bios; | |
1140 | ||
1141 | page_chain_for_each(page) { | |
1142 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); | |
1143 | if (!bio_add_page(bio, page, len, 0)) { | |
1144 | /* A single page must always be possible! | |
1145 | * But in case it fails anyways, | |
1146 | * we deal with it, and complain (below). */ | |
1147 | if (bio->bi_vcnt == 0) { | |
1148 | dev_err(DEV, | |
1149 | "bio_add_page failed for len=%u, " | |
1150 | "bi_vcnt=0 (bi_sector=%llu)\n", | |
1151 | len, (unsigned long long)bio->bi_sector); | |
1152 | err = -ENOSPC; | |
1153 | goto fail; | |
1154 | } | |
1155 | goto next_bio; | |
1156 | } | |
1157 | ds -= len; | |
1158 | sector += len >> 9; | |
1159 | --nr_pages; | |
1160 | } | |
1161 | D_ASSERT(page == NULL); | |
1162 | D_ASSERT(ds == 0); | |
1163 | ||
1164 | atomic_set(&peer_req->pending_bios, n_bios); | |
1165 | do { | |
1166 | bio = bios; | |
1167 | bios = bios->bi_next; | |
1168 | bio->bi_next = NULL; | |
1169 | ||
1170 | drbd_generic_make_request(mdev, fault_type, bio); | |
1171 | } while (bios); | |
1172 | return 0; | |
1173 | ||
1174 | fail: | |
1175 | while (bios) { | |
1176 | bio = bios; | |
1177 | bios = bios->bi_next; | |
1178 | bio_put(bio); | |
1179 | } | |
1180 | return err; | |
1181 | } | |
1182 | ||
1183 | static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev, | |
1184 | struct drbd_peer_request *peer_req) | |
1185 | { | |
1186 | struct drbd_interval *i = &peer_req->i; | |
1187 | ||
1188 | drbd_remove_interval(&mdev->write_requests, i); | |
1189 | drbd_clear_interval(i); | |
1190 | ||
1191 | /* Wake up any processes waiting for this peer request to complete. */ | |
1192 | if (i->waiting) | |
1193 | wake_up(&mdev->misc_wait); | |
1194 | } | |
1195 | ||
1196 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packet cmd, | |
1197 | unsigned int data_size) | |
1198 | { | |
1199 | int rv; | |
1200 | struct p_barrier *p = &mdev->tconn->data.rbuf.barrier; | |
1201 | struct drbd_epoch *epoch; | |
1202 | ||
1203 | inc_unacked(mdev); | |
1204 | ||
1205 | mdev->current_epoch->barrier_nr = p->barrier; | |
1206 | rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); | |
1207 | ||
1208 | /* P_BARRIER_ACK may imply that the corresponding extent is dropped from | |
1209 | * the activity log, which means it would not be resynced in case the | |
1210 | * R_PRIMARY crashes now. | |
1211 | * Therefore we must send the barrier_ack after the barrier request was | |
1212 | * completed. */ | |
1213 | switch (mdev->write_ordering) { | |
1214 | case WO_none: | |
1215 | if (rv == FE_RECYCLED) | |
1216 | return true; | |
1217 | ||
1218 | /* receiver context, in the writeout path of the other node. | |
1219 | * avoid potential distributed deadlock */ | |
1220 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | |
1221 | if (epoch) | |
1222 | break; | |
1223 | else | |
1224 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); | |
1225 | /* Fall through */ | |
1226 | ||
1227 | case WO_bdev_flush: | |
1228 | case WO_drain_io: | |
1229 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | |
1230 | drbd_flush(mdev); | |
1231 | ||
1232 | if (atomic_read(&mdev->current_epoch->epoch_size)) { | |
1233 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | |
1234 | if (epoch) | |
1235 | break; | |
1236 | } | |
1237 | ||
1238 | epoch = mdev->current_epoch; | |
1239 | wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); | |
1240 | ||
1241 | D_ASSERT(atomic_read(&epoch->active) == 0); | |
1242 | D_ASSERT(epoch->flags == 0); | |
1243 | ||
1244 | return true; | |
1245 | default: | |
1246 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); | |
1247 | return false; | |
1248 | } | |
1249 | ||
1250 | epoch->flags = 0; | |
1251 | atomic_set(&epoch->epoch_size, 0); | |
1252 | atomic_set(&epoch->active, 0); | |
1253 | ||
1254 | spin_lock(&mdev->epoch_lock); | |
1255 | if (atomic_read(&mdev->current_epoch->epoch_size)) { | |
1256 | list_add(&epoch->list, &mdev->current_epoch->list); | |
1257 | mdev->current_epoch = epoch; | |
1258 | mdev->epochs++; | |
1259 | } else { | |
1260 | /* The current_epoch got recycled while we allocated this one... */ | |
1261 | kfree(epoch); | |
1262 | } | |
1263 | spin_unlock(&mdev->epoch_lock); | |
1264 | ||
1265 | return true; | |
1266 | } | |
1267 | ||
1268 | /* used from receive_RSDataReply (recv_resync_read) | |
1269 | * and from receive_Data */ | |
1270 | static struct drbd_peer_request * | |
1271 | read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, | |
1272 | int data_size) __must_hold(local) | |
1273 | { | |
1274 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
1275 | struct drbd_peer_request *peer_req; | |
1276 | struct page *page; | |
1277 | int dgs, ds, rr; | |
1278 | void *dig_in = mdev->tconn->int_dig_in; | |
1279 | void *dig_vv = mdev->tconn->int_dig_vv; | |
1280 | unsigned long *data; | |
1281 | ||
1282 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ? | |
1283 | crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0; | |
1284 | ||
1285 | if (dgs) { | |
1286 | rr = drbd_recv(mdev->tconn, dig_in, dgs); | |
1287 | if (rr != dgs) { | |
1288 | if (!signal_pending(current)) | |
1289 | dev_warn(DEV, | |
1290 | "short read receiving data digest: read %d expected %d\n", | |
1291 | rr, dgs); | |
1292 | return NULL; | |
1293 | } | |
1294 | } | |
1295 | ||
1296 | data_size -= dgs; | |
1297 | ||
1298 | if (!expect(data_size != 0)) | |
1299 | return NULL; | |
1300 | if (!expect(IS_ALIGNED(data_size, 512))) | |
1301 | return NULL; | |
1302 | if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) | |
1303 | return NULL; | |
1304 | ||
1305 | /* even though we trust out peer, | |
1306 | * we sometimes have to double check. */ | |
1307 | if (sector + (data_size>>9) > capacity) { | |
1308 | dev_err(DEV, "request from peer beyond end of local disk: " | |
1309 | "capacity: %llus < sector: %llus + size: %u\n", | |
1310 | (unsigned long long)capacity, | |
1311 | (unsigned long long)sector, data_size); | |
1312 | return NULL; | |
1313 | } | |
1314 | ||
1315 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD | |
1316 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
1317 | * which in turn might block on the other node at this very place. */ | |
1318 | peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); | |
1319 | if (!peer_req) | |
1320 | return NULL; | |
1321 | ||
1322 | ds = data_size; | |
1323 | page = peer_req->pages; | |
1324 | page_chain_for_each(page) { | |
1325 | unsigned len = min_t(int, ds, PAGE_SIZE); | |
1326 | data = kmap(page); | |
1327 | rr = drbd_recv(mdev->tconn, data, len); | |
1328 | if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { | |
1329 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); | |
1330 | data[0] = data[0] ^ (unsigned long)-1; | |
1331 | } | |
1332 | kunmap(page); | |
1333 | if (rr != len) { | |
1334 | drbd_free_ee(mdev, peer_req); | |
1335 | if (!signal_pending(current)) | |
1336 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | |
1337 | rr, len); | |
1338 | return NULL; | |
1339 | } | |
1340 | ds -= rr; | |
1341 | } | |
1342 | ||
1343 | if (dgs) { | |
1344 | drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv); | |
1345 | if (memcmp(dig_in, dig_vv, dgs)) { | |
1346 | dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", | |
1347 | (unsigned long long)sector, data_size); | |
1348 | drbd_bcast_ee(mdev, "digest failed", | |
1349 | dgs, dig_in, dig_vv, peer_req); | |
1350 | drbd_free_ee(mdev, peer_req); | |
1351 | return NULL; | |
1352 | } | |
1353 | } | |
1354 | mdev->recv_cnt += data_size>>9; | |
1355 | return peer_req; | |
1356 | } | |
1357 | ||
1358 | /* drbd_drain_block() just takes a data block | |
1359 | * out of the socket input buffer, and discards it. | |
1360 | */ | |
1361 | static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |
1362 | { | |
1363 | struct page *page; | |
1364 | int rr, rv = 1; | |
1365 | void *data; | |
1366 | ||
1367 | if (!data_size) | |
1368 | return true; | |
1369 | ||
1370 | page = drbd_pp_alloc(mdev, 1, 1); | |
1371 | ||
1372 | data = kmap(page); | |
1373 | while (data_size) { | |
1374 | rr = drbd_recv(mdev->tconn, data, min_t(int, data_size, PAGE_SIZE)); | |
1375 | if (rr != min_t(int, data_size, PAGE_SIZE)) { | |
1376 | rv = 0; | |
1377 | if (!signal_pending(current)) | |
1378 | dev_warn(DEV, | |
1379 | "short read receiving data: read %d expected %d\n", | |
1380 | rr, min_t(int, data_size, PAGE_SIZE)); | |
1381 | break; | |
1382 | } | |
1383 | data_size -= rr; | |
1384 | } | |
1385 | kunmap(page); | |
1386 | drbd_pp_free(mdev, page, 0); | |
1387 | return rv; | |
1388 | } | |
1389 | ||
1390 | static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |
1391 | sector_t sector, int data_size) | |
1392 | { | |
1393 | struct bio_vec *bvec; | |
1394 | struct bio *bio; | |
1395 | int dgs, rr, i, expect; | |
1396 | void *dig_in = mdev->tconn->int_dig_in; | |
1397 | void *dig_vv = mdev->tconn->int_dig_vv; | |
1398 | ||
1399 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ? | |
1400 | crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0; | |
1401 | ||
1402 | if (dgs) { | |
1403 | rr = drbd_recv(mdev->tconn, dig_in, dgs); | |
1404 | if (rr != dgs) { | |
1405 | if (!signal_pending(current)) | |
1406 | dev_warn(DEV, | |
1407 | "short read receiving data reply digest: read %d expected %d\n", | |
1408 | rr, dgs); | |
1409 | return 0; | |
1410 | } | |
1411 | } | |
1412 | ||
1413 | data_size -= dgs; | |
1414 | ||
1415 | /* optimistically update recv_cnt. if receiving fails below, | |
1416 | * we disconnect anyways, and counters will be reset. */ | |
1417 | mdev->recv_cnt += data_size>>9; | |
1418 | ||
1419 | bio = req->master_bio; | |
1420 | D_ASSERT(sector == bio->bi_sector); | |
1421 | ||
1422 | bio_for_each_segment(bvec, bio, i) { | |
1423 | expect = min_t(int, data_size, bvec->bv_len); | |
1424 | rr = drbd_recv(mdev->tconn, | |
1425 | kmap(bvec->bv_page)+bvec->bv_offset, | |
1426 | expect); | |
1427 | kunmap(bvec->bv_page); | |
1428 | if (rr != expect) { | |
1429 | if (!signal_pending(current)) | |
1430 | dev_warn(DEV, "short read receiving data reply: " | |
1431 | "read %d expected %d\n", | |
1432 | rr, expect); | |
1433 | return 0; | |
1434 | } | |
1435 | data_size -= rr; | |
1436 | } | |
1437 | ||
1438 | if (dgs) { | |
1439 | drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv); | |
1440 | if (memcmp(dig_in, dig_vv, dgs)) { | |
1441 | dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); | |
1442 | return 0; | |
1443 | } | |
1444 | } | |
1445 | ||
1446 | D_ASSERT(data_size == 0); | |
1447 | return 1; | |
1448 | } | |
1449 | ||
1450 | /* e_end_resync_block() is called via | |
1451 | * drbd_process_done_ee() by asender only */ | |
1452 | static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
1453 | { | |
1454 | struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w; | |
1455 | sector_t sector = peer_req->i.sector; | |
1456 | int ok; | |
1457 | ||
1458 | D_ASSERT(drbd_interval_empty(&peer_req->i)); | |
1459 | ||
1460 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { | |
1461 | drbd_set_in_sync(mdev, sector, peer_req->i.size); | |
1462 | ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req); | |
1463 | } else { | |
1464 | /* Record failure to sync */ | |
1465 | drbd_rs_failed_io(mdev, sector, peer_req->i.size); | |
1466 | ||
1467 | ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req); | |
1468 | } | |
1469 | dec_unacked(mdev); | |
1470 | ||
1471 | return ok; | |
1472 | } | |
1473 | ||
1474 | static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) | |
1475 | { | |
1476 | struct drbd_peer_request *peer_req; | |
1477 | ||
1478 | peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size); | |
1479 | if (!peer_req) | |
1480 | goto fail; | |
1481 | ||
1482 | dec_rs_pending(mdev); | |
1483 | ||
1484 | inc_unacked(mdev); | |
1485 | /* corresponding dec_unacked() in e_end_resync_block() | |
1486 | * respective _drbd_clear_done_ee */ | |
1487 | ||
1488 | peer_req->w.cb = e_end_resync_block; | |
1489 | ||
1490 | spin_lock_irq(&mdev->tconn->req_lock); | |
1491 | list_add(&peer_req->w.list, &mdev->sync_ee); | |
1492 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1493 | ||
1494 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); | |
1495 | if (drbd_submit_ee(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) | |
1496 | return true; | |
1497 | ||
1498 | /* don't care for the reason here */ | |
1499 | dev_err(DEV, "submit failed, triggering re-connect\n"); | |
1500 | spin_lock_irq(&mdev->tconn->req_lock); | |
1501 | list_del(&peer_req->w.list); | |
1502 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1503 | ||
1504 | drbd_free_ee(mdev, peer_req); | |
1505 | fail: | |
1506 | put_ldev(mdev); | |
1507 | return false; | |
1508 | } | |
1509 | ||
1510 | static struct drbd_request * | |
1511 | find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id, | |
1512 | sector_t sector, bool missing_ok, const char *func) | |
1513 | { | |
1514 | struct drbd_request *req; | |
1515 | ||
1516 | /* Request object according to our peer */ | |
1517 | req = (struct drbd_request *)(unsigned long)id; | |
1518 | if (drbd_contains_interval(root, sector, &req->i) && req->i.local) | |
1519 | return req; | |
1520 | if (!missing_ok) { | |
1521 | dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func, | |
1522 | (unsigned long)id, (unsigned long long)sector); | |
1523 | } | |
1524 | return NULL; | |
1525 | } | |
1526 | ||
1527 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packet cmd, | |
1528 | unsigned int data_size) | |
1529 | { | |
1530 | struct drbd_request *req; | |
1531 | sector_t sector; | |
1532 | int ok; | |
1533 | struct p_data *p = &mdev->tconn->data.rbuf.data; | |
1534 | ||
1535 | sector = be64_to_cpu(p->sector); | |
1536 | ||
1537 | spin_lock_irq(&mdev->tconn->req_lock); | |
1538 | req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); | |
1539 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1540 | if (unlikely(!req)) | |
1541 | return false; | |
1542 | ||
1543 | /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid | |
1544 | * special casing it there for the various failure cases. | |
1545 | * still no race with drbd_fail_pending_reads */ | |
1546 | ok = recv_dless_read(mdev, req, sector, data_size); | |
1547 | ||
1548 | if (ok) | |
1549 | req_mod(req, DATA_RECEIVED); | |
1550 | /* else: nothing. handled from drbd_disconnect... | |
1551 | * I don't think we may complete this just yet | |
1552 | * in case we are "on-disconnect: freeze" */ | |
1553 | ||
1554 | return ok; | |
1555 | } | |
1556 | ||
1557 | static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd, | |
1558 | unsigned int data_size) | |
1559 | { | |
1560 | sector_t sector; | |
1561 | int ok; | |
1562 | struct p_data *p = &mdev->tconn->data.rbuf.data; | |
1563 | ||
1564 | sector = be64_to_cpu(p->sector); | |
1565 | D_ASSERT(p->block_id == ID_SYNCER); | |
1566 | ||
1567 | if (get_ldev(mdev)) { | |
1568 | /* data is submitted to disk within recv_resync_read. | |
1569 | * corresponding put_ldev done below on error, | |
1570 | * or in drbd_endio_sec. */ | |
1571 | ok = recv_resync_read(mdev, sector, data_size); | |
1572 | } else { | |
1573 | if (__ratelimit(&drbd_ratelimit_state)) | |
1574 | dev_err(DEV, "Can not write resync data to local disk.\n"); | |
1575 | ||
1576 | ok = drbd_drain_block(mdev, data_size); | |
1577 | ||
1578 | drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); | |
1579 | } | |
1580 | ||
1581 | atomic_add(data_size >> 9, &mdev->rs_sect_in); | |
1582 | ||
1583 | return ok; | |
1584 | } | |
1585 | ||
1586 | /* e_end_block() is called via drbd_process_done_ee(). | |
1587 | * this means this function only runs in the asender thread | |
1588 | */ | |
1589 | static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1590 | { | |
1591 | struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w; | |
1592 | sector_t sector = peer_req->i.sector; | |
1593 | int ok = 1, pcmd; | |
1594 | ||
1595 | if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) { | |
1596 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { | |
1597 | pcmd = (mdev->state.conn >= C_SYNC_SOURCE && | |
1598 | mdev->state.conn <= C_PAUSED_SYNC_T && | |
1599 | peer_req->flags & EE_MAY_SET_IN_SYNC) ? | |
1600 | P_RS_WRITE_ACK : P_WRITE_ACK; | |
1601 | ok &= drbd_send_ack(mdev, pcmd, peer_req); | |
1602 | if (pcmd == P_RS_WRITE_ACK) | |
1603 | drbd_set_in_sync(mdev, sector, peer_req->i.size); | |
1604 | } else { | |
1605 | ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req); | |
1606 | /* we expect it to be marked out of sync anyways... | |
1607 | * maybe assert this? */ | |
1608 | } | |
1609 | dec_unacked(mdev); | |
1610 | } | |
1611 | /* we delete from the conflict detection hash _after_ we sent out the | |
1612 | * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ | |
1613 | if (mdev->tconn->net_conf->two_primaries) { | |
1614 | spin_lock_irq(&mdev->tconn->req_lock); | |
1615 | D_ASSERT(!drbd_interval_empty(&peer_req->i)); | |
1616 | drbd_remove_epoch_entry_interval(mdev, peer_req); | |
1617 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1618 | } else | |
1619 | D_ASSERT(drbd_interval_empty(&peer_req->i)); | |
1620 | ||
1621 | drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); | |
1622 | ||
1623 | return ok; | |
1624 | } | |
1625 | ||
1626 | static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
1627 | { | |
1628 | struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w; | |
1629 | int ok = 1; | |
1630 | ||
1631 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); | |
1632 | ok = drbd_send_ack(mdev, P_DISCARD_ACK, peer_req); | |
1633 | ||
1634 | spin_lock_irq(&mdev->tconn->req_lock); | |
1635 | D_ASSERT(!drbd_interval_empty(&peer_req->i)); | |
1636 | drbd_remove_epoch_entry_interval(mdev, peer_req); | |
1637 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1638 | ||
1639 | dec_unacked(mdev); | |
1640 | ||
1641 | return ok; | |
1642 | } | |
1643 | ||
1644 | static bool seq_greater(u32 a, u32 b) | |
1645 | { | |
1646 | /* | |
1647 | * We assume 32-bit wrap-around here. | |
1648 | * For 24-bit wrap-around, we would have to shift: | |
1649 | * a <<= 8; b <<= 8; | |
1650 | */ | |
1651 | return (s32)a - (s32)b > 0; | |
1652 | } | |
1653 | ||
1654 | static u32 seq_max(u32 a, u32 b) | |
1655 | { | |
1656 | return seq_greater(a, b) ? a : b; | |
1657 | } | |
1658 | ||
1659 | static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) | |
1660 | { | |
1661 | unsigned int old_peer_seq; | |
1662 | ||
1663 | spin_lock(&mdev->peer_seq_lock); | |
1664 | old_peer_seq = mdev->peer_seq; | |
1665 | mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); | |
1666 | spin_unlock(&mdev->peer_seq_lock); | |
1667 | if (old_peer_seq != peer_seq) | |
1668 | wake_up(&mdev->seq_wait); | |
1669 | } | |
1670 | ||
1671 | /* Called from receive_Data. | |
1672 | * Synchronize packets on sock with packets on msock. | |
1673 | * | |
1674 | * This is here so even when a P_DATA packet traveling via sock overtook an Ack | |
1675 | * packet traveling on msock, they are still processed in the order they have | |
1676 | * been sent. | |
1677 | * | |
1678 | * Note: we don't care for Ack packets overtaking P_DATA packets. | |
1679 | * | |
1680 | * In case packet_seq is larger than mdev->peer_seq number, there are | |
1681 | * outstanding packets on the msock. We wait for them to arrive. | |
1682 | * In case we are the logically next packet, we update mdev->peer_seq | |
1683 | * ourselves. Correctly handles 32bit wrap around. | |
1684 | * | |
1685 | * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, | |
1686 | * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds | |
1687 | * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have | |
1688 | * 1<<9 == 512 seconds aka ages for the 32bit wrap around... | |
1689 | * | |
1690 | * returns 0 if we may process the packet, | |
1691 | * -ERESTARTSYS if we were interrupted (by disconnect signal). */ | |
1692 | static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) | |
1693 | { | |
1694 | DEFINE_WAIT(wait); | |
1695 | unsigned int p_seq; | |
1696 | long timeout; | |
1697 | int ret = 0; | |
1698 | spin_lock(&mdev->peer_seq_lock); | |
1699 | for (;;) { | |
1700 | prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); | |
1701 | if (!seq_greater(packet_seq, mdev->peer_seq + 1)) | |
1702 | break; | |
1703 | if (signal_pending(current)) { | |
1704 | ret = -ERESTARTSYS; | |
1705 | break; | |
1706 | } | |
1707 | p_seq = mdev->peer_seq; | |
1708 | spin_unlock(&mdev->peer_seq_lock); | |
1709 | timeout = schedule_timeout(30*HZ); | |
1710 | spin_lock(&mdev->peer_seq_lock); | |
1711 | if (timeout == 0 && p_seq == mdev->peer_seq) { | |
1712 | ret = -ETIMEDOUT; | |
1713 | dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n"); | |
1714 | break; | |
1715 | } | |
1716 | } | |
1717 | finish_wait(&mdev->seq_wait, &wait); | |
1718 | if (mdev->peer_seq+1 == packet_seq) | |
1719 | mdev->peer_seq++; | |
1720 | spin_unlock(&mdev->peer_seq_lock); | |
1721 | return ret; | |
1722 | } | |
1723 | ||
1724 | /* see also bio_flags_to_wire() | |
1725 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | |
1726 | * flags and back. We may replicate to other kernel versions. */ | |
1727 | static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | |
1728 | { | |
1729 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | |
1730 | (dpf & DP_FUA ? REQ_FUA : 0) | | |
1731 | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | | |
1732 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | |
1733 | } | |
1734 | ||
1735 | /* mirrored write */ | |
1736 | static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd, | |
1737 | unsigned int data_size) | |
1738 | { | |
1739 | sector_t sector; | |
1740 | struct drbd_peer_request *peer_req; | |
1741 | struct p_data *p = &mdev->tconn->data.rbuf.data; | |
1742 | int rw = WRITE; | |
1743 | u32 dp_flags; | |
1744 | ||
1745 | if (!get_ldev(mdev)) { | |
1746 | spin_lock(&mdev->peer_seq_lock); | |
1747 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) | |
1748 | mdev->peer_seq++; | |
1749 | spin_unlock(&mdev->peer_seq_lock); | |
1750 | ||
1751 | drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); | |
1752 | atomic_inc(&mdev->current_epoch->epoch_size); | |
1753 | return drbd_drain_block(mdev, data_size); | |
1754 | } | |
1755 | ||
1756 | /* get_ldev(mdev) successful. | |
1757 | * Corresponding put_ldev done either below (on various errors), | |
1758 | * or in drbd_endio_sec, if we successfully submit the data at | |
1759 | * the end of this function. */ | |
1760 | ||
1761 | sector = be64_to_cpu(p->sector); | |
1762 | peer_req = read_in_block(mdev, p->block_id, sector, data_size); | |
1763 | if (!peer_req) { | |
1764 | put_ldev(mdev); | |
1765 | return false; | |
1766 | } | |
1767 | ||
1768 | peer_req->w.cb = e_end_block; | |
1769 | ||
1770 | dp_flags = be32_to_cpu(p->dp_flags); | |
1771 | rw |= wire_flags_to_bio(mdev, dp_flags); | |
1772 | ||
1773 | if (dp_flags & DP_MAY_SET_IN_SYNC) | |
1774 | peer_req->flags |= EE_MAY_SET_IN_SYNC; | |
1775 | ||
1776 | spin_lock(&mdev->epoch_lock); | |
1777 | peer_req->epoch = mdev->current_epoch; | |
1778 | atomic_inc(&peer_req->epoch->epoch_size); | |
1779 | atomic_inc(&peer_req->epoch->active); | |
1780 | spin_unlock(&mdev->epoch_lock); | |
1781 | ||
1782 | /* I'm the receiver, I do hold a net_cnt reference. */ | |
1783 | if (!mdev->tconn->net_conf->two_primaries) { | |
1784 | spin_lock_irq(&mdev->tconn->req_lock); | |
1785 | } else { | |
1786 | /* don't get the req_lock yet, | |
1787 | * we may sleep in drbd_wait_peer_seq */ | |
1788 | const int size = peer_req->i.size; | |
1789 | const int discard = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags); | |
1790 | DEFINE_WAIT(wait); | |
1791 | int first; | |
1792 | ||
1793 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); | |
1794 | ||
1795 | /* conflict detection and handling: | |
1796 | * 1. wait on the sequence number, | |
1797 | * in case this data packet overtook ACK packets. | |
1798 | * 2. check for conflicting write requests. | |
1799 | * | |
1800 | * Note: for two_primaries, we are protocol C, | |
1801 | * so there cannot be any request that is DONE | |
1802 | * but still on the transfer log. | |
1803 | * | |
1804 | * if no conflicting request is found: | |
1805 | * submit. | |
1806 | * | |
1807 | * if any conflicting request is found | |
1808 | * that has not yet been acked, | |
1809 | * AND I have the "discard concurrent writes" flag: | |
1810 | * queue (via done_ee) the P_DISCARD_ACK; OUT. | |
1811 | * | |
1812 | * if any conflicting request is found: | |
1813 | * block the receiver, waiting on misc_wait | |
1814 | * until no more conflicting requests are there, | |
1815 | * or we get interrupted (disconnect). | |
1816 | * | |
1817 | * we do not just write after local io completion of those | |
1818 | * requests, but only after req is done completely, i.e. | |
1819 | * we wait for the P_DISCARD_ACK to arrive! | |
1820 | * | |
1821 | * then proceed normally, i.e. submit. | |
1822 | */ | |
1823 | if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) | |
1824 | goto out_interrupted; | |
1825 | ||
1826 | spin_lock_irq(&mdev->tconn->req_lock); | |
1827 | ||
1828 | first = 1; | |
1829 | for (;;) { | |
1830 | struct drbd_interval *i; | |
1831 | int have_unacked = 0; | |
1832 | int have_conflict = 0; | |
1833 | prepare_to_wait(&mdev->misc_wait, &wait, | |
1834 | TASK_INTERRUPTIBLE); | |
1835 | ||
1836 | i = drbd_find_overlap(&mdev->write_requests, sector, size); | |
1837 | if (i) { | |
1838 | /* only ALERT on first iteration, | |
1839 | * we may be woken up early... */ | |
1840 | if (first) | |
1841 | dev_alert(DEV, "%s[%u] Concurrent %s write detected!" | |
1842 | " new: %llus +%u; pending: %llus +%u\n", | |
1843 | current->comm, current->pid, | |
1844 | i->local ? "local" : "remote", | |
1845 | (unsigned long long)sector, size, | |
1846 | (unsigned long long)i->sector, i->size); | |
1847 | ||
1848 | if (i->local) { | |
1849 | struct drbd_request *req2; | |
1850 | ||
1851 | req2 = container_of(i, struct drbd_request, i); | |
1852 | if (req2->rq_state & RQ_NET_PENDING) | |
1853 | ++have_unacked; | |
1854 | } | |
1855 | ++have_conflict; | |
1856 | } | |
1857 | if (!have_conflict) | |
1858 | break; | |
1859 | ||
1860 | /* Discard Ack only for the _first_ iteration */ | |
1861 | if (first && discard && have_unacked) { | |
1862 | dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", | |
1863 | (unsigned long long)sector); | |
1864 | inc_unacked(mdev); | |
1865 | peer_req->w.cb = e_send_discard_ack; | |
1866 | list_add_tail(&peer_req->w.list, &mdev->done_ee); | |
1867 | ||
1868 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1869 | ||
1870 | /* we could probably send that P_DISCARD_ACK ourselves, | |
1871 | * but I don't like the receiver using the msock */ | |
1872 | ||
1873 | put_ldev(mdev); | |
1874 | wake_asender(mdev->tconn); | |
1875 | finish_wait(&mdev->misc_wait, &wait); | |
1876 | return true; | |
1877 | } | |
1878 | ||
1879 | if (signal_pending(current)) { | |
1880 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1881 | finish_wait(&mdev->misc_wait, &wait); | |
1882 | goto out_interrupted; | |
1883 | } | |
1884 | ||
1885 | /* Indicate to wake up mdev->misc_wait upon completion. */ | |
1886 | i->waiting = true; | |
1887 | ||
1888 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1889 | if (first) { | |
1890 | first = 0; | |
1891 | dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " | |
1892 | "sec=%llus\n", (unsigned long long)sector); | |
1893 | } else if (discard) { | |
1894 | /* we had none on the first iteration. | |
1895 | * there must be none now. */ | |
1896 | D_ASSERT(have_unacked == 0); | |
1897 | } | |
1898 | schedule(); | |
1899 | spin_lock_irq(&mdev->tconn->req_lock); | |
1900 | } | |
1901 | finish_wait(&mdev->misc_wait, &wait); | |
1902 | ||
1903 | drbd_insert_interval(&mdev->write_requests, &peer_req->i); | |
1904 | } | |
1905 | ||
1906 | list_add(&peer_req->w.list, &mdev->active_ee); | |
1907 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1908 | ||
1909 | switch (mdev->tconn->net_conf->wire_protocol) { | |
1910 | case DRBD_PROT_C: | |
1911 | inc_unacked(mdev); | |
1912 | /* corresponding dec_unacked() in e_end_block() | |
1913 | * respective _drbd_clear_done_ee */ | |
1914 | break; | |
1915 | case DRBD_PROT_B: | |
1916 | /* I really don't like it that the receiver thread | |
1917 | * sends on the msock, but anyways */ | |
1918 | drbd_send_ack(mdev, P_RECV_ACK, peer_req); | |
1919 | break; | |
1920 | case DRBD_PROT_A: | |
1921 | /* nothing to do */ | |
1922 | break; | |
1923 | } | |
1924 | ||
1925 | if (mdev->state.pdsk < D_INCONSISTENT) { | |
1926 | /* In case we have the only disk of the cluster, */ | |
1927 | drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size); | |
1928 | peer_req->flags |= EE_CALL_AL_COMPLETE_IO; | |
1929 | peer_req->flags &= ~EE_MAY_SET_IN_SYNC; | |
1930 | drbd_al_begin_io(mdev, peer_req->i.sector); | |
1931 | } | |
1932 | ||
1933 | if (drbd_submit_ee(mdev, peer_req, rw, DRBD_FAULT_DT_WR) == 0) | |
1934 | return true; | |
1935 | ||
1936 | /* don't care for the reason here */ | |
1937 | dev_err(DEV, "submit failed, triggering re-connect\n"); | |
1938 | spin_lock_irq(&mdev->tconn->req_lock); | |
1939 | list_del(&peer_req->w.list); | |
1940 | drbd_remove_epoch_entry_interval(mdev, peer_req); | |
1941 | spin_unlock_irq(&mdev->tconn->req_lock); | |
1942 | if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) | |
1943 | drbd_al_complete_io(mdev, peer_req->i.sector); | |
1944 | ||
1945 | out_interrupted: | |
1946 | drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP); | |
1947 | put_ldev(mdev); | |
1948 | drbd_free_ee(mdev, peer_req); | |
1949 | return false; | |
1950 | } | |
1951 | ||
1952 | /* We may throttle resync, if the lower device seems to be busy, | |
1953 | * and current sync rate is above c_min_rate. | |
1954 | * | |
1955 | * To decide whether or not the lower device is busy, we use a scheme similar | |
1956 | * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" | |
1957 | * (more than 64 sectors) of activity we cannot account for with our own resync | |
1958 | * activity, it obviously is "busy". | |
1959 | * | |
1960 | * The current sync rate used here uses only the most recent two step marks, | |
1961 | * to have a short time average so we can react faster. | |
1962 | */ | |
1963 | int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) | |
1964 | { | |
1965 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; | |
1966 | unsigned long db, dt, dbdt; | |
1967 | struct lc_element *tmp; | |
1968 | int curr_events; | |
1969 | int throttle = 0; | |
1970 | ||
1971 | /* feature disabled? */ | |
1972 | if (mdev->sync_conf.c_min_rate == 0) | |
1973 | return 0; | |
1974 | ||
1975 | spin_lock_irq(&mdev->al_lock); | |
1976 | tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); | |
1977 | if (tmp) { | |
1978 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | |
1979 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) { | |
1980 | spin_unlock_irq(&mdev->al_lock); | |
1981 | return 0; | |
1982 | } | |
1983 | /* Do not slow down if app IO is already waiting for this extent */ | |
1984 | } | |
1985 | spin_unlock_irq(&mdev->al_lock); | |
1986 | ||
1987 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + | |
1988 | (int)part_stat_read(&disk->part0, sectors[1]) - | |
1989 | atomic_read(&mdev->rs_sect_ev); | |
1990 | ||
1991 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { | |
1992 | unsigned long rs_left; | |
1993 | int i; | |
1994 | ||
1995 | mdev->rs_last_events = curr_events; | |
1996 | ||
1997 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, | |
1998 | * approx. */ | |
1999 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; | |
2000 | ||
2001 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | |
2002 | rs_left = mdev->ov_left; | |
2003 | else | |
2004 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | |
2005 | ||
2006 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; | |
2007 | if (!dt) | |
2008 | dt++; | |
2009 | db = mdev->rs_mark_left[i] - rs_left; | |
2010 | dbdt = Bit2KB(db/dt); | |
2011 | ||
2012 | if (dbdt > mdev->sync_conf.c_min_rate) | |
2013 | throttle = 1; | |
2014 | } | |
2015 | return throttle; | |
2016 | } | |
2017 | ||
2018 | ||
2019 | static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd, | |
2020 | unsigned int digest_size) | |
2021 | { | |
2022 | sector_t sector; | |
2023 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
2024 | struct drbd_peer_request *peer_req; | |
2025 | struct digest_info *di = NULL; | |
2026 | int size, verb; | |
2027 | unsigned int fault_type; | |
2028 | struct p_block_req *p = &mdev->tconn->data.rbuf.block_req; | |
2029 | ||
2030 | sector = be64_to_cpu(p->sector); | |
2031 | size = be32_to_cpu(p->blksize); | |
2032 | ||
2033 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { | |
2034 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | |
2035 | (unsigned long long)sector, size); | |
2036 | return false; | |
2037 | } | |
2038 | if (sector + (size>>9) > capacity) { | |
2039 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | |
2040 | (unsigned long long)sector, size); | |
2041 | return false; | |
2042 | } | |
2043 | ||
2044 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { | |
2045 | verb = 1; | |
2046 | switch (cmd) { | |
2047 | case P_DATA_REQUEST: | |
2048 | drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); | |
2049 | break; | |
2050 | case P_RS_DATA_REQUEST: | |
2051 | case P_CSUM_RS_REQUEST: | |
2052 | case P_OV_REQUEST: | |
2053 | drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); | |
2054 | break; | |
2055 | case P_OV_REPLY: | |
2056 | verb = 0; | |
2057 | dec_rs_pending(mdev); | |
2058 | drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); | |
2059 | break; | |
2060 | default: | |
2061 | dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", | |
2062 | cmdname(cmd)); | |
2063 | } | |
2064 | if (verb && __ratelimit(&drbd_ratelimit_state)) | |
2065 | dev_err(DEV, "Can not satisfy peer's read request, " | |
2066 | "no local data.\n"); | |
2067 | ||
2068 | /* drain possibly payload */ | |
2069 | return drbd_drain_block(mdev, digest_size); | |
2070 | } | |
2071 | ||
2072 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD | |
2073 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
2074 | * which in turn might block on the other node at this very place. */ | |
2075 | peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); | |
2076 | if (!peer_req) { | |
2077 | put_ldev(mdev); | |
2078 | return false; | |
2079 | } | |
2080 | ||
2081 | switch (cmd) { | |
2082 | case P_DATA_REQUEST: | |
2083 | peer_req->w.cb = w_e_end_data_req; | |
2084 | fault_type = DRBD_FAULT_DT_RD; | |
2085 | /* application IO, don't drbd_rs_begin_io */ | |
2086 | goto submit; | |
2087 | ||
2088 | case P_RS_DATA_REQUEST: | |
2089 | peer_req->w.cb = w_e_end_rsdata_req; | |
2090 | fault_type = DRBD_FAULT_RS_RD; | |
2091 | /* used in the sector offset progress display */ | |
2092 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | |
2093 | break; | |
2094 | ||
2095 | case P_OV_REPLY: | |
2096 | case P_CSUM_RS_REQUEST: | |
2097 | fault_type = DRBD_FAULT_RS_RD; | |
2098 | di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); | |
2099 | if (!di) | |
2100 | goto out_free_e; | |
2101 | ||
2102 | di->digest_size = digest_size; | |
2103 | di->digest = (((char *)di)+sizeof(struct digest_info)); | |
2104 | ||
2105 | peer_req->digest = di; | |
2106 | peer_req->flags |= EE_HAS_DIGEST; | |
2107 | ||
2108 | if (drbd_recv(mdev->tconn, di->digest, digest_size) != digest_size) | |
2109 | goto out_free_e; | |
2110 | ||
2111 | if (cmd == P_CSUM_RS_REQUEST) { | |
2112 | D_ASSERT(mdev->tconn->agreed_pro_version >= 89); | |
2113 | peer_req->w.cb = w_e_end_csum_rs_req; | |
2114 | /* used in the sector offset progress display */ | |
2115 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | |
2116 | } else if (cmd == P_OV_REPLY) { | |
2117 | /* track progress, we may need to throttle */ | |
2118 | atomic_add(size >> 9, &mdev->rs_sect_in); | |
2119 | peer_req->w.cb = w_e_end_ov_reply; | |
2120 | dec_rs_pending(mdev); | |
2121 | /* drbd_rs_begin_io done when we sent this request, | |
2122 | * but accounting still needs to be done. */ | |
2123 | goto submit_for_resync; | |
2124 | } | |
2125 | break; | |
2126 | ||
2127 | case P_OV_REQUEST: | |
2128 | if (mdev->ov_start_sector == ~(sector_t)0 && | |
2129 | mdev->tconn->agreed_pro_version >= 90) { | |
2130 | unsigned long now = jiffies; | |
2131 | int i; | |
2132 | mdev->ov_start_sector = sector; | |
2133 | mdev->ov_position = sector; | |
2134 | mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); | |
2135 | mdev->rs_total = mdev->ov_left; | |
2136 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | |
2137 | mdev->rs_mark_left[i] = mdev->ov_left; | |
2138 | mdev->rs_mark_time[i] = now; | |
2139 | } | |
2140 | dev_info(DEV, "Online Verify start sector: %llu\n", | |
2141 | (unsigned long long)sector); | |
2142 | } | |
2143 | peer_req->w.cb = w_e_end_ov_req; | |
2144 | fault_type = DRBD_FAULT_RS_RD; | |
2145 | break; | |
2146 | ||
2147 | default: | |
2148 | dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", | |
2149 | cmdname(cmd)); | |
2150 | fault_type = DRBD_FAULT_MAX; | |
2151 | goto out_free_e; | |
2152 | } | |
2153 | ||
2154 | /* Throttle, drbd_rs_begin_io and submit should become asynchronous | |
2155 | * wrt the receiver, but it is not as straightforward as it may seem. | |
2156 | * Various places in the resync start and stop logic assume resync | |
2157 | * requests are processed in order, requeuing this on the worker thread | |
2158 | * introduces a bunch of new code for synchronization between threads. | |
2159 | * | |
2160 | * Unlimited throttling before drbd_rs_begin_io may stall the resync | |
2161 | * "forever", throttling after drbd_rs_begin_io will lock that extent | |
2162 | * for application writes for the same time. For now, just throttle | |
2163 | * here, where the rest of the code expects the receiver to sleep for | |
2164 | * a while, anyways. | |
2165 | */ | |
2166 | ||
2167 | /* Throttle before drbd_rs_begin_io, as that locks out application IO; | |
2168 | * this defers syncer requests for some time, before letting at least | |
2169 | * on request through. The resync controller on the receiving side | |
2170 | * will adapt to the incoming rate accordingly. | |
2171 | * | |
2172 | * We cannot throttle here if remote is Primary/SyncTarget: | |
2173 | * we would also throttle its application reads. | |
2174 | * In that case, throttling is done on the SyncTarget only. | |
2175 | */ | |
2176 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) | |
2177 | schedule_timeout_uninterruptible(HZ/10); | |
2178 | if (drbd_rs_begin_io(mdev, sector)) | |
2179 | goto out_free_e; | |
2180 | ||
2181 | submit_for_resync: | |
2182 | atomic_add(size >> 9, &mdev->rs_sect_ev); | |
2183 | ||
2184 | submit: | |
2185 | inc_unacked(mdev); | |
2186 | spin_lock_irq(&mdev->tconn->req_lock); | |
2187 | list_add_tail(&peer_req->w.list, &mdev->read_ee); | |
2188 | spin_unlock_irq(&mdev->tconn->req_lock); | |
2189 | ||
2190 | if (drbd_submit_ee(mdev, peer_req, READ, fault_type) == 0) | |
2191 | return true; | |
2192 | ||
2193 | /* don't care for the reason here */ | |
2194 | dev_err(DEV, "submit failed, triggering re-connect\n"); | |
2195 | spin_lock_irq(&mdev->tconn->req_lock); | |
2196 | list_del(&peer_req->w.list); | |
2197 | spin_unlock_irq(&mdev->tconn->req_lock); | |
2198 | /* no drbd_rs_complete_io(), we are dropping the connection anyways */ | |
2199 | ||
2200 | out_free_e: | |
2201 | put_ldev(mdev); | |
2202 | drbd_free_ee(mdev, peer_req); | |
2203 | return false; | |
2204 | } | |
2205 | ||
2206 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | |
2207 | { | |
2208 | int self, peer, rv = -100; | |
2209 | unsigned long ch_self, ch_peer; | |
2210 | ||
2211 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | |
2212 | peer = mdev->p_uuid[UI_BITMAP] & 1; | |
2213 | ||
2214 | ch_peer = mdev->p_uuid[UI_SIZE]; | |
2215 | ch_self = mdev->comm_bm_set; | |
2216 | ||
2217 | switch (mdev->tconn->net_conf->after_sb_0p) { | |
2218 | case ASB_CONSENSUS: | |
2219 | case ASB_DISCARD_SECONDARY: | |
2220 | case ASB_CALL_HELPER: | |
2221 | dev_err(DEV, "Configuration error.\n"); | |
2222 | break; | |
2223 | case ASB_DISCONNECT: | |
2224 | break; | |
2225 | case ASB_DISCARD_YOUNGER_PRI: | |
2226 | if (self == 0 && peer == 1) { | |
2227 | rv = -1; | |
2228 | break; | |
2229 | } | |
2230 | if (self == 1 && peer == 0) { | |
2231 | rv = 1; | |
2232 | break; | |
2233 | } | |
2234 | /* Else fall through to one of the other strategies... */ | |
2235 | case ASB_DISCARD_OLDER_PRI: | |
2236 | if (self == 0 && peer == 1) { | |
2237 | rv = 1; | |
2238 | break; | |
2239 | } | |
2240 | if (self == 1 && peer == 0) { | |
2241 | rv = -1; | |
2242 | break; | |
2243 | } | |
2244 | /* Else fall through to one of the other strategies... */ | |
2245 | dev_warn(DEV, "Discard younger/older primary did not find a decision\n" | |
2246 | "Using discard-least-changes instead\n"); | |
2247 | case ASB_DISCARD_ZERO_CHG: | |
2248 | if (ch_peer == 0 && ch_self == 0) { | |
2249 | rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) | |
2250 | ? -1 : 1; | |
2251 | break; | |
2252 | } else { | |
2253 | if (ch_peer == 0) { rv = 1; break; } | |
2254 | if (ch_self == 0) { rv = -1; break; } | |
2255 | } | |
2256 | if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG) | |
2257 | break; | |
2258 | case ASB_DISCARD_LEAST_CHG: | |
2259 | if (ch_self < ch_peer) | |
2260 | rv = -1; | |
2261 | else if (ch_self > ch_peer) | |
2262 | rv = 1; | |
2263 | else /* ( ch_self == ch_peer ) */ | |
2264 | /* Well, then use something else. */ | |
2265 | rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) | |
2266 | ? -1 : 1; | |
2267 | break; | |
2268 | case ASB_DISCARD_LOCAL: | |
2269 | rv = -1; | |
2270 | break; | |
2271 | case ASB_DISCARD_REMOTE: | |
2272 | rv = 1; | |
2273 | } | |
2274 | ||
2275 | return rv; | |
2276 | } | |
2277 | ||
2278 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |
2279 | { | |
2280 | int hg, rv = -100; | |
2281 | ||
2282 | switch (mdev->tconn->net_conf->after_sb_1p) { | |
2283 | case ASB_DISCARD_YOUNGER_PRI: | |
2284 | case ASB_DISCARD_OLDER_PRI: | |
2285 | case ASB_DISCARD_LEAST_CHG: | |
2286 | case ASB_DISCARD_LOCAL: | |
2287 | case ASB_DISCARD_REMOTE: | |
2288 | dev_err(DEV, "Configuration error.\n"); | |
2289 | break; | |
2290 | case ASB_DISCONNECT: | |
2291 | break; | |
2292 | case ASB_CONSENSUS: | |
2293 | hg = drbd_asb_recover_0p(mdev); | |
2294 | if (hg == -1 && mdev->state.role == R_SECONDARY) | |
2295 | rv = hg; | |
2296 | if (hg == 1 && mdev->state.role == R_PRIMARY) | |
2297 | rv = hg; | |
2298 | break; | |
2299 | case ASB_VIOLENTLY: | |
2300 | rv = drbd_asb_recover_0p(mdev); | |
2301 | break; | |
2302 | case ASB_DISCARD_SECONDARY: | |
2303 | return mdev->state.role == R_PRIMARY ? 1 : -1; | |
2304 | case ASB_CALL_HELPER: | |
2305 | hg = drbd_asb_recover_0p(mdev); | |
2306 | if (hg == -1 && mdev->state.role == R_PRIMARY) { | |
2307 | enum drbd_state_rv rv2; | |
2308 | ||
2309 | drbd_set_role(mdev, R_SECONDARY, 0); | |
2310 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | |
2311 | * we might be here in C_WF_REPORT_PARAMS which is transient. | |
2312 | * we do not need to wait for the after state change work either. */ | |
2313 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | |
2314 | if (rv2 != SS_SUCCESS) { | |
2315 | drbd_khelper(mdev, "pri-lost-after-sb"); | |
2316 | } else { | |
2317 | dev_warn(DEV, "Successfully gave up primary role.\n"); | |
2318 | rv = hg; | |
2319 | } | |
2320 | } else | |
2321 | rv = hg; | |
2322 | } | |
2323 | ||
2324 | return rv; | |
2325 | } | |
2326 | ||
2327 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | |
2328 | { | |
2329 | int hg, rv = -100; | |
2330 | ||
2331 | switch (mdev->tconn->net_conf->after_sb_2p) { | |
2332 | case ASB_DISCARD_YOUNGER_PRI: | |
2333 | case ASB_DISCARD_OLDER_PRI: | |
2334 | case ASB_DISCARD_LEAST_CHG: | |
2335 | case ASB_DISCARD_LOCAL: | |
2336 | case ASB_DISCARD_REMOTE: | |
2337 | case ASB_CONSENSUS: | |
2338 | case ASB_DISCARD_SECONDARY: | |
2339 | dev_err(DEV, "Configuration error.\n"); | |
2340 | break; | |
2341 | case ASB_VIOLENTLY: | |
2342 | rv = drbd_asb_recover_0p(mdev); | |
2343 | break; | |
2344 | case ASB_DISCONNECT: | |
2345 | break; | |
2346 | case ASB_CALL_HELPER: | |
2347 | hg = drbd_asb_recover_0p(mdev); | |
2348 | if (hg == -1) { | |
2349 | enum drbd_state_rv rv2; | |
2350 | ||
2351 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | |
2352 | * we might be here in C_WF_REPORT_PARAMS which is transient. | |
2353 | * we do not need to wait for the after state change work either. */ | |
2354 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | |
2355 | if (rv2 != SS_SUCCESS) { | |
2356 | drbd_khelper(mdev, "pri-lost-after-sb"); | |
2357 | } else { | |
2358 | dev_warn(DEV, "Successfully gave up primary role.\n"); | |
2359 | rv = hg; | |
2360 | } | |
2361 | } else | |
2362 | rv = hg; | |
2363 | } | |
2364 | ||
2365 | return rv; | |
2366 | } | |
2367 | ||
2368 | static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, | |
2369 | u64 bits, u64 flags) | |
2370 | { | |
2371 | if (!uuid) { | |
2372 | dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); | |
2373 | return; | |
2374 | } | |
2375 | dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", | |
2376 | text, | |
2377 | (unsigned long long)uuid[UI_CURRENT], | |
2378 | (unsigned long long)uuid[UI_BITMAP], | |
2379 | (unsigned long long)uuid[UI_HISTORY_START], | |
2380 | (unsigned long long)uuid[UI_HISTORY_END], | |
2381 | (unsigned long long)bits, | |
2382 | (unsigned long long)flags); | |
2383 | } | |
2384 | ||
2385 | /* | |
2386 | 100 after split brain try auto recover | |
2387 | 2 C_SYNC_SOURCE set BitMap | |
2388 | 1 C_SYNC_SOURCE use BitMap | |
2389 | 0 no Sync | |
2390 | -1 C_SYNC_TARGET use BitMap | |
2391 | -2 C_SYNC_TARGET set BitMap | |
2392 | -100 after split brain, disconnect | |
2393 | -1000 unrelated data | |
2394 | -1091 requires proto 91 | |
2395 | -1096 requires proto 96 | |
2396 | */ | |
2397 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) | |
2398 | { | |
2399 | u64 self, peer; | |
2400 | int i, j; | |
2401 | ||
2402 | self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); | |
2403 | peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); | |
2404 | ||
2405 | *rule_nr = 10; | |
2406 | if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) | |
2407 | return 0; | |
2408 | ||
2409 | *rule_nr = 20; | |
2410 | if ((self == UUID_JUST_CREATED || self == (u64)0) && | |
2411 | peer != UUID_JUST_CREATED) | |
2412 | return -2; | |
2413 | ||
2414 | *rule_nr = 30; | |
2415 | if (self != UUID_JUST_CREATED && | |
2416 | (peer == UUID_JUST_CREATED || peer == (u64)0)) | |
2417 | return 2; | |
2418 | ||
2419 | if (self == peer) { | |
2420 | int rct, dc; /* roles at crash time */ | |
2421 | ||
2422 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { | |
2423 | ||
2424 | if (mdev->tconn->agreed_pro_version < 91) | |
2425 | return -1091; | |
2426 | ||
2427 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && | |
2428 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { | |
2429 | dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); | |
2430 | drbd_uuid_set_bm(mdev, 0UL); | |
2431 | ||
2432 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, | |
2433 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); | |
2434 | *rule_nr = 34; | |
2435 | } else { | |
2436 | dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); | |
2437 | *rule_nr = 36; | |
2438 | } | |
2439 | ||
2440 | return 1; | |
2441 | } | |
2442 | ||
2443 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { | |
2444 | ||
2445 | if (mdev->tconn->agreed_pro_version < 91) | |
2446 | return -1091; | |
2447 | ||
2448 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && | |
2449 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { | |
2450 | dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); | |
2451 | ||
2452 | mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; | |
2453 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; | |
2454 | mdev->p_uuid[UI_BITMAP] = 0UL; | |
2455 | ||
2456 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | |
2457 | *rule_nr = 35; | |
2458 | } else { | |
2459 | dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); | |
2460 | *rule_nr = 37; | |
2461 | } | |
2462 | ||
2463 | return -1; | |
2464 | } | |
2465 | ||
2466 | /* Common power [off|failure] */ | |
2467 | rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + | |
2468 | (mdev->p_uuid[UI_FLAGS] & 2); | |
2469 | /* lowest bit is set when we were primary, | |
2470 | * next bit (weight 2) is set when peer was primary */ | |
2471 | *rule_nr = 40; | |
2472 | ||
2473 | switch (rct) { | |
2474 | case 0: /* !self_pri && !peer_pri */ return 0; | |
2475 | case 1: /* self_pri && !peer_pri */ return 1; | |
2476 | case 2: /* !self_pri && peer_pri */ return -1; | |
2477 | case 3: /* self_pri && peer_pri */ | |
2478 | dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags); | |
2479 | return dc ? -1 : 1; | |
2480 | } | |
2481 | } | |
2482 | ||
2483 | *rule_nr = 50; | |
2484 | peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); | |
2485 | if (self == peer) | |
2486 | return -1; | |
2487 | ||
2488 | *rule_nr = 51; | |
2489 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | |
2490 | if (self == peer) { | |
2491 | if (mdev->tconn->agreed_pro_version < 96 ? | |
2492 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == | |
2493 | (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : | |
2494 | peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { | |
2495 | /* The last P_SYNC_UUID did not get though. Undo the last start of | |
2496 | resync as sync source modifications of the peer's UUIDs. */ | |
2497 | ||
2498 | if (mdev->tconn->agreed_pro_version < 91) | |
2499 | return -1091; | |
2500 | ||
2501 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; | |
2502 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; | |
2503 | ||
2504 | dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); | |
2505 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | |
2506 | ||
2507 | return -1; | |
2508 | } | |
2509 | } | |
2510 | ||
2511 | *rule_nr = 60; | |
2512 | self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); | |
2513 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { | |
2514 | peer = mdev->p_uuid[i] & ~((u64)1); | |
2515 | if (self == peer) | |
2516 | return -2; | |
2517 | } | |
2518 | ||
2519 | *rule_nr = 70; | |
2520 | self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); | |
2521 | peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); | |
2522 | if (self == peer) | |
2523 | return 1; | |
2524 | ||
2525 | *rule_nr = 71; | |
2526 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | |
2527 | if (self == peer) { | |
2528 | if (mdev->tconn->agreed_pro_version < 96 ? | |
2529 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == | |
2530 | (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : | |
2531 | self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | |
2532 | /* The last P_SYNC_UUID did not get though. Undo the last start of | |
2533 | resync as sync source modifications of our UUIDs. */ | |
2534 | ||
2535 | if (mdev->tconn->agreed_pro_version < 91) | |
2536 | return -1091; | |
2537 | ||
2538 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); | |
2539 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); | |
2540 | ||
2541 | dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); | |
2542 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, | |
2543 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); | |
2544 | ||
2545 | return 1; | |
2546 | } | |
2547 | } | |
2548 | ||
2549 | ||
2550 | *rule_nr = 80; | |
2551 | peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); | |
2552 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { | |
2553 | self = mdev->ldev->md.uuid[i] & ~((u64)1); | |
2554 | if (self == peer) | |
2555 | return 2; | |
2556 | } | |
2557 | ||
2558 | *rule_nr = 90; | |
2559 | self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); | |
2560 | peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); | |
2561 | if (self == peer && self != ((u64)0)) | |
2562 | return 100; | |
2563 | ||
2564 | *rule_nr = 100; | |
2565 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { | |
2566 | self = mdev->ldev->md.uuid[i] & ~((u64)1); | |
2567 | for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { | |
2568 | peer = mdev->p_uuid[j] & ~((u64)1); | |
2569 | if (self == peer) | |
2570 | return -100; | |
2571 | } | |
2572 | } | |
2573 | ||
2574 | return -1000; | |
2575 | } | |
2576 | ||
2577 | /* drbd_sync_handshake() returns the new conn state on success, or | |
2578 | CONN_MASK (-1) on failure. | |
2579 | */ | |
2580 | static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, | |
2581 | enum drbd_disk_state peer_disk) __must_hold(local) | |
2582 | { | |
2583 | int hg, rule_nr; | |
2584 | enum drbd_conns rv = C_MASK; | |
2585 | enum drbd_disk_state mydisk; | |
2586 | ||
2587 | mydisk = mdev->state.disk; | |
2588 | if (mydisk == D_NEGOTIATING) | |
2589 | mydisk = mdev->new_state_tmp.disk; | |
2590 | ||
2591 | dev_info(DEV, "drbd_sync_handshake:\n"); | |
2592 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); | |
2593 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, | |
2594 | mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | |
2595 | ||
2596 | hg = drbd_uuid_compare(mdev, &rule_nr); | |
2597 | ||
2598 | dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); | |
2599 | ||
2600 | if (hg == -1000) { | |
2601 | dev_alert(DEV, "Unrelated data, aborting!\n"); | |
2602 | return C_MASK; | |
2603 | } | |
2604 | if (hg < -1000) { | |
2605 | dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); | |
2606 | return C_MASK; | |
2607 | } | |
2608 | ||
2609 | if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || | |
2610 | (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { | |
2611 | int f = (hg == -100) || abs(hg) == 2; | |
2612 | hg = mydisk > D_INCONSISTENT ? 1 : -1; | |
2613 | if (f) | |
2614 | hg = hg*2; | |
2615 | dev_info(DEV, "Becoming sync %s due to disk states.\n", | |
2616 | hg > 0 ? "source" : "target"); | |
2617 | } | |
2618 | ||
2619 | if (abs(hg) == 100) | |
2620 | drbd_khelper(mdev, "initial-split-brain"); | |
2621 | ||
2622 | if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) { | |
2623 | int pcount = (mdev->state.role == R_PRIMARY) | |
2624 | + (peer_role == R_PRIMARY); | |
2625 | int forced = (hg == -100); | |
2626 | ||
2627 | switch (pcount) { | |
2628 | case 0: | |
2629 | hg = drbd_asb_recover_0p(mdev); | |
2630 | break; | |
2631 | case 1: | |
2632 | hg = drbd_asb_recover_1p(mdev); | |
2633 | break; | |
2634 | case 2: | |
2635 | hg = drbd_asb_recover_2p(mdev); | |
2636 | break; | |
2637 | } | |
2638 | if (abs(hg) < 100) { | |
2639 | dev_warn(DEV, "Split-Brain detected, %d primaries, " | |
2640 | "automatically solved. Sync from %s node\n", | |
2641 | pcount, (hg < 0) ? "peer" : "this"); | |
2642 | if (forced) { | |
2643 | dev_warn(DEV, "Doing a full sync, since" | |
2644 | " UUIDs where ambiguous.\n"); | |
2645 | hg = hg*2; | |
2646 | } | |
2647 | } | |
2648 | } | |
2649 | ||
2650 | if (hg == -100) { | |
2651 | if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1)) | |
2652 | hg = -1; | |
2653 | if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1)) | |
2654 | hg = 1; | |
2655 | ||
2656 | if (abs(hg) < 100) | |
2657 | dev_warn(DEV, "Split-Brain detected, manually solved. " | |
2658 | "Sync from %s node\n", | |
2659 | (hg < 0) ? "peer" : "this"); | |
2660 | } | |
2661 | ||
2662 | if (hg == -100) { | |
2663 | /* FIXME this log message is not correct if we end up here | |
2664 | * after an attempted attach on a diskless node. | |
2665 | * We just refuse to attach -- well, we drop the "connection" | |
2666 | * to that disk, in a way... */ | |
2667 | dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); | |
2668 | drbd_khelper(mdev, "split-brain"); | |
2669 | return C_MASK; | |
2670 | } | |
2671 | ||
2672 | if (hg > 0 && mydisk <= D_INCONSISTENT) { | |
2673 | dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); | |
2674 | return C_MASK; | |
2675 | } | |
2676 | ||
2677 | if (hg < 0 && /* by intention we do not use mydisk here. */ | |
2678 | mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { | |
2679 | switch (mdev->tconn->net_conf->rr_conflict) { | |
2680 | case ASB_CALL_HELPER: | |
2681 | drbd_khelper(mdev, "pri-lost"); | |
2682 | /* fall through */ | |
2683 | case ASB_DISCONNECT: | |
2684 | dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); | |
2685 | return C_MASK; | |
2686 | case ASB_VIOLENTLY: | |
2687 | dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" | |
2688 | "assumption\n"); | |
2689 | } | |
2690 | } | |
2691 | ||
2692 | if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { | |
2693 | if (hg == 0) | |
2694 | dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); | |
2695 | else | |
2696 | dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", | |
2697 | drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), | |
2698 | abs(hg) >= 2 ? "full" : "bit-map based"); | |
2699 | return C_MASK; | |
2700 | } | |
2701 | ||
2702 | if (abs(hg) >= 2) { | |
2703 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); | |
2704 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", | |
2705 | BM_LOCKED_SET_ALLOWED)) | |
2706 | return C_MASK; | |
2707 | } | |
2708 | ||
2709 | if (hg > 0) { /* become sync source. */ | |
2710 | rv = C_WF_BITMAP_S; | |
2711 | } else if (hg < 0) { /* become sync target */ | |
2712 | rv = C_WF_BITMAP_T; | |
2713 | } else { | |
2714 | rv = C_CONNECTED; | |
2715 | if (drbd_bm_total_weight(mdev)) { | |
2716 | dev_info(DEV, "No resync, but %lu bits in bitmap!\n", | |
2717 | drbd_bm_total_weight(mdev)); | |
2718 | } | |
2719 | } | |
2720 | ||
2721 | return rv; | |
2722 | } | |
2723 | ||
2724 | /* returns 1 if invalid */ | |
2725 | static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) | |
2726 | { | |
2727 | /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ | |
2728 | if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) || | |
2729 | (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL)) | |
2730 | return 0; | |
2731 | ||
2732 | /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ | |
2733 | if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL || | |
2734 | self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL) | |
2735 | return 1; | |
2736 | ||
2737 | /* everything else is valid if they are equal on both sides. */ | |
2738 | if (peer == self) | |
2739 | return 0; | |
2740 | ||
2741 | /* everything es is invalid. */ | |
2742 | return 1; | |
2743 | } | |
2744 | ||
2745 | static int receive_protocol(struct drbd_conf *mdev, enum drbd_packet cmd, | |
2746 | unsigned int data_size) | |
2747 | { | |
2748 | struct p_protocol *p = &mdev->tconn->data.rbuf.protocol; | |
2749 | int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; | |
2750 | int p_want_lose, p_two_primaries, cf; | |
2751 | char p_integrity_alg[SHARED_SECRET_MAX] = ""; | |
2752 | ||
2753 | p_proto = be32_to_cpu(p->protocol); | |
2754 | p_after_sb_0p = be32_to_cpu(p->after_sb_0p); | |
2755 | p_after_sb_1p = be32_to_cpu(p->after_sb_1p); | |
2756 | p_after_sb_2p = be32_to_cpu(p->after_sb_2p); | |
2757 | p_two_primaries = be32_to_cpu(p->two_primaries); | |
2758 | cf = be32_to_cpu(p->conn_flags); | |
2759 | p_want_lose = cf & CF_WANT_LOSE; | |
2760 | ||
2761 | clear_bit(CONN_DRY_RUN, &mdev->flags); | |
2762 | ||
2763 | if (cf & CF_DRY_RUN) | |
2764 | set_bit(CONN_DRY_RUN, &mdev->flags); | |
2765 | ||
2766 | if (p_proto != mdev->tconn->net_conf->wire_protocol) { | |
2767 | dev_err(DEV, "incompatible communication protocols\n"); | |
2768 | goto disconnect; | |
2769 | } | |
2770 | ||
2771 | if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) { | |
2772 | dev_err(DEV, "incompatible after-sb-0pri settings\n"); | |
2773 | goto disconnect; | |
2774 | } | |
2775 | ||
2776 | if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) { | |
2777 | dev_err(DEV, "incompatible after-sb-1pri settings\n"); | |
2778 | goto disconnect; | |
2779 | } | |
2780 | ||
2781 | if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) { | |
2782 | dev_err(DEV, "incompatible after-sb-2pri settings\n"); | |
2783 | goto disconnect; | |
2784 | } | |
2785 | ||
2786 | if (p_want_lose && mdev->tconn->net_conf->want_lose) { | |
2787 | dev_err(DEV, "both sides have the 'want_lose' flag set\n"); | |
2788 | goto disconnect; | |
2789 | } | |
2790 | ||
2791 | if (p_two_primaries != mdev->tconn->net_conf->two_primaries) { | |
2792 | dev_err(DEV, "incompatible setting of the two-primaries options\n"); | |
2793 | goto disconnect; | |
2794 | } | |
2795 | ||
2796 | if (mdev->tconn->agreed_pro_version >= 87) { | |
2797 | unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg; | |
2798 | ||
2799 | if (drbd_recv(mdev->tconn, p_integrity_alg, data_size) != data_size) | |
2800 | return false; | |
2801 | ||
2802 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; | |
2803 | if (strcmp(p_integrity_alg, my_alg)) { | |
2804 | dev_err(DEV, "incompatible setting of the data-integrity-alg\n"); | |
2805 | goto disconnect; | |
2806 | } | |
2807 | dev_info(DEV, "data-integrity-alg: %s\n", | |
2808 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); | |
2809 | } | |
2810 | ||
2811 | return true; | |
2812 | ||
2813 | disconnect: | |
2814 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
2815 | return false; | |
2816 | } | |
2817 | ||
2818 | /* helper function | |
2819 | * input: alg name, feature name | |
2820 | * return: NULL (alg name was "") | |
2821 | * ERR_PTR(error) if something goes wrong | |
2822 | * or the crypto hash ptr, if it worked out ok. */ | |
2823 | struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, | |
2824 | const char *alg, const char *name) | |
2825 | { | |
2826 | struct crypto_hash *tfm; | |
2827 | ||
2828 | if (!alg[0]) | |
2829 | return NULL; | |
2830 | ||
2831 | tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); | |
2832 | if (IS_ERR(tfm)) { | |
2833 | dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", | |
2834 | alg, name, PTR_ERR(tfm)); | |
2835 | return tfm; | |
2836 | } | |
2837 | if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { | |
2838 | crypto_free_hash(tfm); | |
2839 | dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name); | |
2840 | return ERR_PTR(-EINVAL); | |
2841 | } | |
2842 | return tfm; | |
2843 | } | |
2844 | ||
2845 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd, | |
2846 | unsigned int packet_size) | |
2847 | { | |
2848 | int ok = true; | |
2849 | struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95; | |
2850 | unsigned int header_size, data_size, exp_max_sz; | |
2851 | struct crypto_hash *verify_tfm = NULL; | |
2852 | struct crypto_hash *csums_tfm = NULL; | |
2853 | const int apv = mdev->tconn->agreed_pro_version; | |
2854 | int *rs_plan_s = NULL; | |
2855 | int fifo_size = 0; | |
2856 | ||
2857 | exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) | |
2858 | : apv == 88 ? sizeof(struct p_rs_param) | |
2859 | + SHARED_SECRET_MAX | |
2860 | : apv <= 94 ? sizeof(struct p_rs_param_89) | |
2861 | : /* apv >= 95 */ sizeof(struct p_rs_param_95); | |
2862 | ||
2863 | if (packet_size > exp_max_sz) { | |
2864 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", | |
2865 | packet_size, exp_max_sz); | |
2866 | return false; | |
2867 | } | |
2868 | ||
2869 | if (apv <= 88) { | |
2870 | header_size = sizeof(struct p_rs_param) - sizeof(struct p_header); | |
2871 | data_size = packet_size - header_size; | |
2872 | } else if (apv <= 94) { | |
2873 | header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header); | |
2874 | data_size = packet_size - header_size; | |
2875 | D_ASSERT(data_size == 0); | |
2876 | } else { | |
2877 | header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header); | |
2878 | data_size = packet_size - header_size; | |
2879 | D_ASSERT(data_size == 0); | |
2880 | } | |
2881 | ||
2882 | /* initialize verify_alg and csums_alg */ | |
2883 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | |
2884 | ||
2885 | if (drbd_recv(mdev->tconn, &p->head.payload, header_size) != header_size) | |
2886 | return false; | |
2887 | ||
2888 | mdev->sync_conf.rate = be32_to_cpu(p->rate); | |
2889 | ||
2890 | if (apv >= 88) { | |
2891 | if (apv == 88) { | |
2892 | if (data_size > SHARED_SECRET_MAX) { | |
2893 | dev_err(DEV, "verify-alg too long, " | |
2894 | "peer wants %u, accepting only %u byte\n", | |
2895 | data_size, SHARED_SECRET_MAX); | |
2896 | return false; | |
2897 | } | |
2898 | ||
2899 | if (drbd_recv(mdev->tconn, p->verify_alg, data_size) != data_size) | |
2900 | return false; | |
2901 | ||
2902 | /* we expect NUL terminated string */ | |
2903 | /* but just in case someone tries to be evil */ | |
2904 | D_ASSERT(p->verify_alg[data_size-1] == 0); | |
2905 | p->verify_alg[data_size-1] = 0; | |
2906 | ||
2907 | } else /* apv >= 89 */ { | |
2908 | /* we still expect NUL terminated strings */ | |
2909 | /* but just in case someone tries to be evil */ | |
2910 | D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); | |
2911 | D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); | |
2912 | p->verify_alg[SHARED_SECRET_MAX-1] = 0; | |
2913 | p->csums_alg[SHARED_SECRET_MAX-1] = 0; | |
2914 | } | |
2915 | ||
2916 | if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { | |
2917 | if (mdev->state.conn == C_WF_REPORT_PARAMS) { | |
2918 | dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", | |
2919 | mdev->sync_conf.verify_alg, p->verify_alg); | |
2920 | goto disconnect; | |
2921 | } | |
2922 | verify_tfm = drbd_crypto_alloc_digest_safe(mdev, | |
2923 | p->verify_alg, "verify-alg"); | |
2924 | if (IS_ERR(verify_tfm)) { | |
2925 | verify_tfm = NULL; | |
2926 | goto disconnect; | |
2927 | } | |
2928 | } | |
2929 | ||
2930 | if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { | |
2931 | if (mdev->state.conn == C_WF_REPORT_PARAMS) { | |
2932 | dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", | |
2933 | mdev->sync_conf.csums_alg, p->csums_alg); | |
2934 | goto disconnect; | |
2935 | } | |
2936 | csums_tfm = drbd_crypto_alloc_digest_safe(mdev, | |
2937 | p->csums_alg, "csums-alg"); | |
2938 | if (IS_ERR(csums_tfm)) { | |
2939 | csums_tfm = NULL; | |
2940 | goto disconnect; | |
2941 | } | |
2942 | } | |
2943 | ||
2944 | if (apv > 94) { | |
2945 | mdev->sync_conf.rate = be32_to_cpu(p->rate); | |
2946 | mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); | |
2947 | mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); | |
2948 | mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); | |
2949 | mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); | |
2950 | ||
2951 | fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; | |
2952 | if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { | |
2953 | rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); | |
2954 | if (!rs_plan_s) { | |
2955 | dev_err(DEV, "kmalloc of fifo_buffer failed"); | |
2956 | goto disconnect; | |
2957 | } | |
2958 | } | |
2959 | } | |
2960 | ||
2961 | spin_lock(&mdev->peer_seq_lock); | |
2962 | /* lock against drbd_nl_syncer_conf() */ | |
2963 | if (verify_tfm) { | |
2964 | strcpy(mdev->sync_conf.verify_alg, p->verify_alg); | |
2965 | mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; | |
2966 | crypto_free_hash(mdev->verify_tfm); | |
2967 | mdev->verify_tfm = verify_tfm; | |
2968 | dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); | |
2969 | } | |
2970 | if (csums_tfm) { | |
2971 | strcpy(mdev->sync_conf.csums_alg, p->csums_alg); | |
2972 | mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; | |
2973 | crypto_free_hash(mdev->csums_tfm); | |
2974 | mdev->csums_tfm = csums_tfm; | |
2975 | dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); | |
2976 | } | |
2977 | if (fifo_size != mdev->rs_plan_s.size) { | |
2978 | kfree(mdev->rs_plan_s.values); | |
2979 | mdev->rs_plan_s.values = rs_plan_s; | |
2980 | mdev->rs_plan_s.size = fifo_size; | |
2981 | mdev->rs_planed = 0; | |
2982 | } | |
2983 | spin_unlock(&mdev->peer_seq_lock); | |
2984 | } | |
2985 | ||
2986 | return ok; | |
2987 | disconnect: | |
2988 | /* just for completeness: actually not needed, | |
2989 | * as this is not reached if csums_tfm was ok. */ | |
2990 | crypto_free_hash(csums_tfm); | |
2991 | /* but free the verify_tfm again, if csums_tfm did not work out */ | |
2992 | crypto_free_hash(verify_tfm); | |
2993 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
2994 | return false; | |
2995 | } | |
2996 | ||
2997 | /* warn if the arguments differ by more than 12.5% */ | |
2998 | static void warn_if_differ_considerably(struct drbd_conf *mdev, | |
2999 | const char *s, sector_t a, sector_t b) | |
3000 | { | |
3001 | sector_t d; | |
3002 | if (a == 0 || b == 0) | |
3003 | return; | |
3004 | d = (a > b) ? (a - b) : (b - a); | |
3005 | if (d > (a>>3) || d > (b>>3)) | |
3006 | dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, | |
3007 | (unsigned long long)a, (unsigned long long)b); | |
3008 | } | |
3009 | ||
3010 | static int receive_sizes(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3011 | unsigned int data_size) | |
3012 | { | |
3013 | struct p_sizes *p = &mdev->tconn->data.rbuf.sizes; | |
3014 | enum determine_dev_size dd = unchanged; | |
3015 | sector_t p_size, p_usize, my_usize; | |
3016 | int ldsc = 0; /* local disk size changed */ | |
3017 | enum dds_flags ddsf; | |
3018 | ||
3019 | p_size = be64_to_cpu(p->d_size); | |
3020 | p_usize = be64_to_cpu(p->u_size); | |
3021 | ||
3022 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { | |
3023 | dev_err(DEV, "some backing storage is needed\n"); | |
3024 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
3025 | return false; | |
3026 | } | |
3027 | ||
3028 | /* just store the peer's disk size for now. | |
3029 | * we still need to figure out whether we accept that. */ | |
3030 | mdev->p_size = p_size; | |
3031 | ||
3032 | if (get_ldev(mdev)) { | |
3033 | warn_if_differ_considerably(mdev, "lower level device sizes", | |
3034 | p_size, drbd_get_max_capacity(mdev->ldev)); | |
3035 | warn_if_differ_considerably(mdev, "user requested size", | |
3036 | p_usize, mdev->ldev->dc.disk_size); | |
3037 | ||
3038 | /* if this is the first connect, or an otherwise expected | |
3039 | * param exchange, choose the minimum */ | |
3040 | if (mdev->state.conn == C_WF_REPORT_PARAMS) | |
3041 | p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size, | |
3042 | p_usize); | |
3043 | ||
3044 | my_usize = mdev->ldev->dc.disk_size; | |
3045 | ||
3046 | if (mdev->ldev->dc.disk_size != p_usize) { | |
3047 | mdev->ldev->dc.disk_size = p_usize; | |
3048 | dev_info(DEV, "Peer sets u_size to %lu sectors\n", | |
3049 | (unsigned long)mdev->ldev->dc.disk_size); | |
3050 | } | |
3051 | ||
3052 | /* Never shrink a device with usable data during connect. | |
3053 | But allow online shrinking if we are connected. */ | |
3054 | if (drbd_new_dev_size(mdev, mdev->ldev, 0) < | |
3055 | drbd_get_capacity(mdev->this_bdev) && | |
3056 | mdev->state.disk >= D_OUTDATED && | |
3057 | mdev->state.conn < C_CONNECTED) { | |
3058 | dev_err(DEV, "The peer's disk size is too small!\n"); | |
3059 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
3060 | mdev->ldev->dc.disk_size = my_usize; | |
3061 | put_ldev(mdev); | |
3062 | return false; | |
3063 | } | |
3064 | put_ldev(mdev); | |
3065 | } | |
3066 | ||
3067 | ddsf = be16_to_cpu(p->dds_flags); | |
3068 | if (get_ldev(mdev)) { | |
3069 | dd = drbd_determine_dev_size(mdev, ddsf); | |
3070 | put_ldev(mdev); | |
3071 | if (dd == dev_size_error) | |
3072 | return false; | |
3073 | drbd_md_sync(mdev); | |
3074 | } else { | |
3075 | /* I am diskless, need to accept the peer's size. */ | |
3076 | drbd_set_my_capacity(mdev, p_size); | |
3077 | } | |
3078 | ||
3079 | mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); | |
3080 | drbd_reconsider_max_bio_size(mdev); | |
3081 | ||
3082 | if (get_ldev(mdev)) { | |
3083 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { | |
3084 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | |
3085 | ldsc = 1; | |
3086 | } | |
3087 | ||
3088 | put_ldev(mdev); | |
3089 | } | |
3090 | ||
3091 | if (mdev->state.conn > C_WF_REPORT_PARAMS) { | |
3092 | if (be64_to_cpu(p->c_size) != | |
3093 | drbd_get_capacity(mdev->this_bdev) || ldsc) { | |
3094 | /* we have different sizes, probably peer | |
3095 | * needs to know my new size... */ | |
3096 | drbd_send_sizes(mdev, 0, ddsf); | |
3097 | } | |
3098 | if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || | |
3099 | (dd == grew && mdev->state.conn == C_CONNECTED)) { | |
3100 | if (mdev->state.pdsk >= D_INCONSISTENT && | |
3101 | mdev->state.disk >= D_INCONSISTENT) { | |
3102 | if (ddsf & DDSF_NO_RESYNC) | |
3103 | dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); | |
3104 | else | |
3105 | resync_after_online_grow(mdev); | |
3106 | } else | |
3107 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); | |
3108 | } | |
3109 | } | |
3110 | ||
3111 | return true; | |
3112 | } | |
3113 | ||
3114 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3115 | unsigned int data_size) | |
3116 | { | |
3117 | struct p_uuids *p = &mdev->tconn->data.rbuf.uuids; | |
3118 | u64 *p_uuid; | |
3119 | int i, updated_uuids = 0; | |
3120 | ||
3121 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); | |
3122 | ||
3123 | for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) | |
3124 | p_uuid[i] = be64_to_cpu(p->uuid[i]); | |
3125 | ||
3126 | kfree(mdev->p_uuid); | |
3127 | mdev->p_uuid = p_uuid; | |
3128 | ||
3129 | if (mdev->state.conn < C_CONNECTED && | |
3130 | mdev->state.disk < D_INCONSISTENT && | |
3131 | mdev->state.role == R_PRIMARY && | |
3132 | (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { | |
3133 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", | |
3134 | (unsigned long long)mdev->ed_uuid); | |
3135 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
3136 | return false; | |
3137 | } | |
3138 | ||
3139 | if (get_ldev(mdev)) { | |
3140 | int skip_initial_sync = | |
3141 | mdev->state.conn == C_CONNECTED && | |
3142 | mdev->tconn->agreed_pro_version >= 90 && | |
3143 | mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && | |
3144 | (p_uuid[UI_FLAGS] & 8); | |
3145 | if (skip_initial_sync) { | |
3146 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); | |
3147 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, | |
3148 | "clear_n_write from receive_uuids", | |
3149 | BM_LOCKED_TEST_ALLOWED); | |
3150 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); | |
3151 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | |
3152 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | |
3153 | CS_VERBOSE, NULL); | |
3154 | drbd_md_sync(mdev); | |
3155 | updated_uuids = 1; | |
3156 | } | |
3157 | put_ldev(mdev); | |
3158 | } else if (mdev->state.disk < D_INCONSISTENT && | |
3159 | mdev->state.role == R_PRIMARY) { | |
3160 | /* I am a diskless primary, the peer just created a new current UUID | |
3161 | for me. */ | |
3162 | updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | |
3163 | } | |
3164 | ||
3165 | /* Before we test for the disk state, we should wait until an eventually | |
3166 | ongoing cluster wide state change is finished. That is important if | |
3167 | we are primary and are detaching from our disk. We need to see the | |
3168 | new disk state... */ | |
3169 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | |
3170 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) | |
3171 | updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | |
3172 | ||
3173 | if (updated_uuids) | |
3174 | drbd_print_uuids(mdev, "receiver updated UUIDs to"); | |
3175 | ||
3176 | return true; | |
3177 | } | |
3178 | ||
3179 | /** | |
3180 | * convert_state() - Converts the peer's view of the cluster state to our point of view | |
3181 | * @ps: The state as seen by the peer. | |
3182 | */ | |
3183 | static union drbd_state convert_state(union drbd_state ps) | |
3184 | { | |
3185 | union drbd_state ms; | |
3186 | ||
3187 | static enum drbd_conns c_tab[] = { | |
3188 | [C_CONNECTED] = C_CONNECTED, | |
3189 | ||
3190 | [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, | |
3191 | [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, | |
3192 | [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ | |
3193 | [C_VERIFY_S] = C_VERIFY_T, | |
3194 | [C_MASK] = C_MASK, | |
3195 | }; | |
3196 | ||
3197 | ms.i = ps.i; | |
3198 | ||
3199 | ms.conn = c_tab[ps.conn]; | |
3200 | ms.peer = ps.role; | |
3201 | ms.role = ps.peer; | |
3202 | ms.pdsk = ps.disk; | |
3203 | ms.disk = ps.pdsk; | |
3204 | ms.peer_isp = (ps.aftr_isp | ps.user_isp); | |
3205 | ||
3206 | return ms; | |
3207 | } | |
3208 | ||
3209 | static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3210 | unsigned int data_size) | |
3211 | { | |
3212 | struct p_req_state *p = &mdev->tconn->data.rbuf.req_state; | |
3213 | union drbd_state mask, val; | |
3214 | enum drbd_state_rv rv; | |
3215 | ||
3216 | mask.i = be32_to_cpu(p->mask); | |
3217 | val.i = be32_to_cpu(p->val); | |
3218 | ||
3219 | if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) && | |
3220 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { | |
3221 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); | |
3222 | return true; | |
3223 | } | |
3224 | ||
3225 | mask = convert_state(mask); | |
3226 | val = convert_state(val); | |
3227 | ||
3228 | rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); | |
3229 | ||
3230 | drbd_send_sr_reply(mdev, rv); | |
3231 | drbd_md_sync(mdev); | |
3232 | ||
3233 | return true; | |
3234 | } | |
3235 | ||
3236 | static int receive_state(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3237 | unsigned int data_size) | |
3238 | { | |
3239 | struct p_state *p = &mdev->tconn->data.rbuf.state; | |
3240 | union drbd_state os, ns, peer_state; | |
3241 | enum drbd_disk_state real_peer_disk; | |
3242 | enum chg_state_flags cs_flags; | |
3243 | int rv; | |
3244 | ||
3245 | peer_state.i = be32_to_cpu(p->state); | |
3246 | ||
3247 | real_peer_disk = peer_state.disk; | |
3248 | if (peer_state.disk == D_NEGOTIATING) { | |
3249 | real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; | |
3250 | dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); | |
3251 | } | |
3252 | ||
3253 | spin_lock_irq(&mdev->tconn->req_lock); | |
3254 | retry: | |
3255 | os = ns = mdev->state; | |
3256 | spin_unlock_irq(&mdev->tconn->req_lock); | |
3257 | ||
3258 | /* peer says his disk is uptodate, while we think it is inconsistent, | |
3259 | * and this happens while we think we have a sync going on. */ | |
3260 | if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE && | |
3261 | os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { | |
3262 | /* If we are (becoming) SyncSource, but peer is still in sync | |
3263 | * preparation, ignore its uptodate-ness to avoid flapping, it | |
3264 | * will change to inconsistent once the peer reaches active | |
3265 | * syncing states. | |
3266 | * It may have changed syncer-paused flags, however, so we | |
3267 | * cannot ignore this completely. */ | |
3268 | if (peer_state.conn > C_CONNECTED && | |
3269 | peer_state.conn < C_SYNC_SOURCE) | |
3270 | real_peer_disk = D_INCONSISTENT; | |
3271 | ||
3272 | /* if peer_state changes to connected at the same time, | |
3273 | * it explicitly notifies us that it finished resync. | |
3274 | * Maybe we should finish it up, too? */ | |
3275 | else if (os.conn >= C_SYNC_SOURCE && | |
3276 | peer_state.conn == C_CONNECTED) { | |
3277 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) | |
3278 | drbd_resync_finished(mdev); | |
3279 | return true; | |
3280 | } | |
3281 | } | |
3282 | ||
3283 | /* peer says his disk is inconsistent, while we think it is uptodate, | |
3284 | * and this happens while the peer still thinks we have a sync going on, | |
3285 | * but we think we are already done with the sync. | |
3286 | * We ignore this to avoid flapping pdsk. | |
3287 | * This should not happen, if the peer is a recent version of drbd. */ | |
3288 | if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && | |
3289 | os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) | |
3290 | real_peer_disk = D_UP_TO_DATE; | |
3291 | ||
3292 | if (ns.conn == C_WF_REPORT_PARAMS) | |
3293 | ns.conn = C_CONNECTED; | |
3294 | ||
3295 | if (peer_state.conn == C_AHEAD) | |
3296 | ns.conn = C_BEHIND; | |
3297 | ||
3298 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && | |
3299 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
3300 | int cr; /* consider resync */ | |
3301 | ||
3302 | /* if we established a new connection */ | |
3303 | cr = (os.conn < C_CONNECTED); | |
3304 | /* if we had an established connection | |
3305 | * and one of the nodes newly attaches a disk */ | |
3306 | cr |= (os.conn == C_CONNECTED && | |
3307 | (peer_state.disk == D_NEGOTIATING || | |
3308 | os.disk == D_NEGOTIATING)); | |
3309 | /* if we have both been inconsistent, and the peer has been | |
3310 | * forced to be UpToDate with --overwrite-data */ | |
3311 | cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); | |
3312 | /* if we had been plain connected, and the admin requested to | |
3313 | * start a sync by "invalidate" or "invalidate-remote" */ | |
3314 | cr |= (os.conn == C_CONNECTED && | |
3315 | (peer_state.conn >= C_STARTING_SYNC_S && | |
3316 | peer_state.conn <= C_WF_BITMAP_T)); | |
3317 | ||
3318 | if (cr) | |
3319 | ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); | |
3320 | ||
3321 | put_ldev(mdev); | |
3322 | if (ns.conn == C_MASK) { | |
3323 | ns.conn = C_CONNECTED; | |
3324 | if (mdev->state.disk == D_NEGOTIATING) { | |
3325 | drbd_force_state(mdev, NS(disk, D_FAILED)); | |
3326 | } else if (peer_state.disk == D_NEGOTIATING) { | |
3327 | dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); | |
3328 | peer_state.disk = D_DISKLESS; | |
3329 | real_peer_disk = D_DISKLESS; | |
3330 | } else { | |
3331 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) | |
3332 | return false; | |
3333 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); | |
3334 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
3335 | return false; | |
3336 | } | |
3337 | } | |
3338 | } | |
3339 | ||
3340 | spin_lock_irq(&mdev->tconn->req_lock); | |
3341 | if (mdev->state.i != os.i) | |
3342 | goto retry; | |
3343 | clear_bit(CONSIDER_RESYNC, &mdev->flags); | |
3344 | ns.peer = peer_state.role; | |
3345 | ns.pdsk = real_peer_disk; | |
3346 | ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); | |
3347 | if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) | |
3348 | ns.disk = mdev->new_state_tmp.disk; | |
3349 | cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); | |
3350 | if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && | |
3351 | test_bit(NEW_CUR_UUID, &mdev->flags)) { | |
3352 | /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this | |
3353 | for temporal network outages! */ | |
3354 | spin_unlock_irq(&mdev->tconn->req_lock); | |
3355 | dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); | |
3356 | tl_clear(mdev); | |
3357 | drbd_uuid_new_current(mdev); | |
3358 | clear_bit(NEW_CUR_UUID, &mdev->flags); | |
3359 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); | |
3360 | return false; | |
3361 | } | |
3362 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); | |
3363 | ns = mdev->state; | |
3364 | spin_unlock_irq(&mdev->tconn->req_lock); | |
3365 | ||
3366 | if (rv < SS_SUCCESS) { | |
3367 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
3368 | return false; | |
3369 | } | |
3370 | ||
3371 | if (os.conn > C_WF_REPORT_PARAMS) { | |
3372 | if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && | |
3373 | peer_state.disk != D_NEGOTIATING ) { | |
3374 | /* we want resync, peer has not yet decided to sync... */ | |
3375 | /* Nowadays only used when forcing a node into primary role and | |
3376 | setting its disk to UpToDate with that */ | |
3377 | drbd_send_uuids(mdev); | |
3378 | drbd_send_state(mdev); | |
3379 | } | |
3380 | } | |
3381 | ||
3382 | mdev->tconn->net_conf->want_lose = 0; | |
3383 | ||
3384 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ | |
3385 | ||
3386 | return true; | |
3387 | } | |
3388 | ||
3389 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3390 | unsigned int data_size) | |
3391 | { | |
3392 | struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid; | |
3393 | ||
3394 | wait_event(mdev->misc_wait, | |
3395 | mdev->state.conn == C_WF_SYNC_UUID || | |
3396 | mdev->state.conn == C_BEHIND || | |
3397 | mdev->state.conn < C_CONNECTED || | |
3398 | mdev->state.disk < D_NEGOTIATING); | |
3399 | ||
3400 | /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ | |
3401 | ||
3402 | /* Here the _drbd_uuid_ functions are right, current should | |
3403 | _not_ be rotated into the history */ | |
3404 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
3405 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); | |
3406 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); | |
3407 | ||
3408 | drbd_print_uuids(mdev, "updated sync uuid"); | |
3409 | drbd_start_resync(mdev, C_SYNC_TARGET); | |
3410 | ||
3411 | put_ldev(mdev); | |
3412 | } else | |
3413 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); | |
3414 | ||
3415 | return true; | |
3416 | } | |
3417 | ||
3418 | /** | |
3419 | * receive_bitmap_plain | |
3420 | * | |
3421 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
3422 | * code upon failure. | |
3423 | */ | |
3424 | static int | |
3425 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | |
3426 | unsigned long *buffer, struct bm_xfer_ctx *c) | |
3427 | { | |
3428 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | |
3429 | unsigned want = num_words * sizeof(long); | |
3430 | int err; | |
3431 | ||
3432 | if (want != data_size) { | |
3433 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); | |
3434 | return -EIO; | |
3435 | } | |
3436 | if (want == 0) | |
3437 | return 0; | |
3438 | err = drbd_recv(mdev->tconn, buffer, want); | |
3439 | if (err != want) { | |
3440 | if (err >= 0) | |
3441 | err = -EIO; | |
3442 | return err; | |
3443 | } | |
3444 | ||
3445 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); | |
3446 | ||
3447 | c->word_offset += num_words; | |
3448 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
3449 | if (c->bit_offset > c->bm_bits) | |
3450 | c->bit_offset = c->bm_bits; | |
3451 | ||
3452 | return 1; | |
3453 | } | |
3454 | ||
3455 | /** | |
3456 | * recv_bm_rle_bits | |
3457 | * | |
3458 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
3459 | * code upon failure. | |
3460 | */ | |
3461 | static int | |
3462 | recv_bm_rle_bits(struct drbd_conf *mdev, | |
3463 | struct p_compressed_bm *p, | |
3464 | struct bm_xfer_ctx *c, | |
3465 | unsigned int len) | |
3466 | { | |
3467 | struct bitstream bs; | |
3468 | u64 look_ahead; | |
3469 | u64 rl; | |
3470 | u64 tmp; | |
3471 | unsigned long s = c->bit_offset; | |
3472 | unsigned long e; | |
3473 | int toggle = DCBP_get_start(p); | |
3474 | int have; | |
3475 | int bits; | |
3476 | ||
3477 | bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p)); | |
3478 | ||
3479 | bits = bitstream_get_bits(&bs, &look_ahead, 64); | |
3480 | if (bits < 0) | |
3481 | return -EIO; | |
3482 | ||
3483 | for (have = bits; have > 0; s += rl, toggle = !toggle) { | |
3484 | bits = vli_decode_bits(&rl, look_ahead); | |
3485 | if (bits <= 0) | |
3486 | return -EIO; | |
3487 | ||
3488 | if (toggle) { | |
3489 | e = s + rl -1; | |
3490 | if (e >= c->bm_bits) { | |
3491 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); | |
3492 | return -EIO; | |
3493 | } | |
3494 | _drbd_bm_set_bits(mdev, s, e); | |
3495 | } | |
3496 | ||
3497 | if (have < bits) { | |
3498 | dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", | |
3499 | have, bits, look_ahead, | |
3500 | (unsigned int)(bs.cur.b - p->code), | |
3501 | (unsigned int)bs.buf_len); | |
3502 | return -EIO; | |
3503 | } | |
3504 | look_ahead >>= bits; | |
3505 | have -= bits; | |
3506 | ||
3507 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | |
3508 | if (bits < 0) | |
3509 | return -EIO; | |
3510 | look_ahead |= tmp << have; | |
3511 | have += bits; | |
3512 | } | |
3513 | ||
3514 | c->bit_offset = s; | |
3515 | bm_xfer_ctx_bit_to_word_offset(c); | |
3516 | ||
3517 | return (s != c->bm_bits); | |
3518 | } | |
3519 | ||
3520 | /** | |
3521 | * decode_bitmap_c | |
3522 | * | |
3523 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
3524 | * code upon failure. | |
3525 | */ | |
3526 | static int | |
3527 | decode_bitmap_c(struct drbd_conf *mdev, | |
3528 | struct p_compressed_bm *p, | |
3529 | struct bm_xfer_ctx *c, | |
3530 | unsigned int len) | |
3531 | { | |
3532 | if (DCBP_get_code(p) == RLE_VLI_Bits) | |
3533 | return recv_bm_rle_bits(mdev, p, c, len); | |
3534 | ||
3535 | /* other variants had been implemented for evaluation, | |
3536 | * but have been dropped as this one turned out to be "best" | |
3537 | * during all our tests. */ | |
3538 | ||
3539 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); | |
3540 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | |
3541 | return -EIO; | |
3542 | } | |
3543 | ||
3544 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, | |
3545 | const char *direction, struct bm_xfer_ctx *c) | |
3546 | { | |
3547 | /* what would it take to transfer it "plaintext" */ | |
3548 | unsigned plain = sizeof(struct p_header) * | |
3549 | ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) | |
3550 | + c->bm_words * sizeof(long); | |
3551 | unsigned total = c->bytes[0] + c->bytes[1]; | |
3552 | unsigned r; | |
3553 | ||
3554 | /* total can not be zero. but just in case: */ | |
3555 | if (total == 0) | |
3556 | return; | |
3557 | ||
3558 | /* don't report if not compressed */ | |
3559 | if (total >= plain) | |
3560 | return; | |
3561 | ||
3562 | /* total < plain. check for overflow, still */ | |
3563 | r = (total > UINT_MAX/1000) ? (total / (plain/1000)) | |
3564 | : (1000 * total / plain); | |
3565 | ||
3566 | if (r > 1000) | |
3567 | r = 1000; | |
3568 | ||
3569 | r = 1000 - r; | |
3570 | dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " | |
3571 | "total %u; compression: %u.%u%%\n", | |
3572 | direction, | |
3573 | c->bytes[1], c->packets[1], | |
3574 | c->bytes[0], c->packets[0], | |
3575 | total, r/10, r % 10); | |
3576 | } | |
3577 | ||
3578 | /* Since we are processing the bitfield from lower addresses to higher, | |
3579 | it does not matter if the process it in 32 bit chunks or 64 bit | |
3580 | chunks as long as it is little endian. (Understand it as byte stream, | |
3581 | beginning with the lowest byte...) If we would use big endian | |
3582 | we would need to process it from the highest address to the lowest, | |
3583 | in order to be agnostic to the 32 vs 64 bits issue. | |
3584 | ||
3585 | returns 0 on failure, 1 if we successfully received it. */ | |
3586 | static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3587 | unsigned int data_size) | |
3588 | { | |
3589 | struct bm_xfer_ctx c; | |
3590 | void *buffer; | |
3591 | int err; | |
3592 | int ok = false; | |
3593 | struct p_header *h = &mdev->tconn->data.rbuf.header; | |
3594 | struct packet_info pi; | |
3595 | ||
3596 | drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); | |
3597 | /* you are supposed to send additional out-of-sync information | |
3598 | * if you actually set bits during this phase */ | |
3599 | ||
3600 | /* maybe we should use some per thread scratch page, | |
3601 | * and allocate that during initial device creation? */ | |
3602 | buffer = (unsigned long *) __get_free_page(GFP_NOIO); | |
3603 | if (!buffer) { | |
3604 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | |
3605 | goto out; | |
3606 | } | |
3607 | ||
3608 | c = (struct bm_xfer_ctx) { | |
3609 | .bm_bits = drbd_bm_bits(mdev), | |
3610 | .bm_words = drbd_bm_words(mdev), | |
3611 | }; | |
3612 | ||
3613 | for(;;) { | |
3614 | if (cmd == P_BITMAP) { | |
3615 | err = receive_bitmap_plain(mdev, data_size, buffer, &c); | |
3616 | } else if (cmd == P_COMPRESSED_BITMAP) { | |
3617 | /* MAYBE: sanity check that we speak proto >= 90, | |
3618 | * and the feature is enabled! */ | |
3619 | struct p_compressed_bm *p; | |
3620 | ||
3621 | if (data_size > BM_PACKET_PAYLOAD_BYTES) { | |
3622 | dev_err(DEV, "ReportCBitmap packet too large\n"); | |
3623 | goto out; | |
3624 | } | |
3625 | /* use the page buff */ | |
3626 | p = buffer; | |
3627 | memcpy(p, h, sizeof(*h)); | |
3628 | if (drbd_recv(mdev->tconn, p->head.payload, data_size) != data_size) | |
3629 | goto out; | |
3630 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { | |
3631 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); | |
3632 | goto out; | |
3633 | } | |
3634 | err = decode_bitmap_c(mdev, p, &c, data_size); | |
3635 | } else { | |
3636 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); | |
3637 | goto out; | |
3638 | } | |
3639 | ||
3640 | c.packets[cmd == P_BITMAP]++; | |
3641 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header) + data_size; | |
3642 | ||
3643 | if (err <= 0) { | |
3644 | if (err < 0) | |
3645 | goto out; | |
3646 | break; | |
3647 | } | |
3648 | if (!drbd_recv_header(mdev->tconn, &pi)) | |
3649 | goto out; | |
3650 | cmd = pi.cmd; | |
3651 | data_size = pi.size; | |
3652 | } | |
3653 | ||
3654 | INFO_bm_xfer_stats(mdev, "receive", &c); | |
3655 | ||
3656 | if (mdev->state.conn == C_WF_BITMAP_T) { | |
3657 | enum drbd_state_rv rv; | |
3658 | ||
3659 | ok = !drbd_send_bitmap(mdev); | |
3660 | if (!ok) | |
3661 | goto out; | |
3662 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ | |
3663 | rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); | |
3664 | D_ASSERT(rv == SS_SUCCESS); | |
3665 | } else if (mdev->state.conn != C_WF_BITMAP_S) { | |
3666 | /* admin may have requested C_DISCONNECTING, | |
3667 | * other threads may have noticed network errors */ | |
3668 | dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", | |
3669 | drbd_conn_str(mdev->state.conn)); | |
3670 | } | |
3671 | ||
3672 | ok = true; | |
3673 | out: | |
3674 | drbd_bm_unlock(mdev); | |
3675 | if (ok && mdev->state.conn == C_WF_BITMAP_S) | |
3676 | drbd_start_resync(mdev, C_SYNC_SOURCE); | |
3677 | free_page((unsigned long) buffer); | |
3678 | return ok; | |
3679 | } | |
3680 | ||
3681 | static int receive_skip(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3682 | unsigned int data_size) | |
3683 | { | |
3684 | /* TODO zero copy sink :) */ | |
3685 | static char sink[128]; | |
3686 | int size, want, r; | |
3687 | ||
3688 | dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", | |
3689 | cmd, data_size); | |
3690 | ||
3691 | size = data_size; | |
3692 | while (size > 0) { | |
3693 | want = min_t(int, size, sizeof(sink)); | |
3694 | r = drbd_recv(mdev->tconn, sink, want); | |
3695 | if (!expect(r > 0)) | |
3696 | break; | |
3697 | size -= r; | |
3698 | } | |
3699 | return size == 0; | |
3700 | } | |
3701 | ||
3702 | static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3703 | unsigned int data_size) | |
3704 | { | |
3705 | /* Make sure we've acked all the TCP data associated | |
3706 | * with the data requests being unplugged */ | |
3707 | drbd_tcp_quickack(mdev->tconn->data.socket); | |
3708 | ||
3709 | return true; | |
3710 | } | |
3711 | ||
3712 | static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packet cmd, | |
3713 | unsigned int data_size) | |
3714 | { | |
3715 | struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc; | |
3716 | ||
3717 | switch (mdev->state.conn) { | |
3718 | case C_WF_SYNC_UUID: | |
3719 | case C_WF_BITMAP_T: | |
3720 | case C_BEHIND: | |
3721 | break; | |
3722 | default: | |
3723 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", | |
3724 | drbd_conn_str(mdev->state.conn)); | |
3725 | } | |
3726 | ||
3727 | drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); | |
3728 | ||
3729 | return true; | |
3730 | } | |
3731 | ||
3732 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packet cmd, | |
3733 | unsigned int to_receive); | |
3734 | ||
3735 | struct data_cmd { | |
3736 | int expect_payload; | |
3737 | size_t pkt_size; | |
3738 | drbd_cmd_handler_f function; | |
3739 | }; | |
3740 | ||
3741 | static struct data_cmd drbd_cmd_handler[] = { | |
3742 | [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, | |
3743 | [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, | |
3744 | [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , | |
3745 | [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , | |
3746 | [P_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } , | |
3747 | [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header), receive_bitmap } , | |
3748 | [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header), receive_UnplugRemote }, | |
3749 | [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, | |
3750 | [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, | |
3751 | [P_SYNC_PARAM] = { 1, sizeof(struct p_header), receive_SyncParam }, | |
3752 | [P_SYNC_PARAM89] = { 1, sizeof(struct p_header), receive_SyncParam }, | |
3753 | [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, | |
3754 | [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, | |
3755 | [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, | |
3756 | [P_STATE] = { 0, sizeof(struct p_state), receive_state }, | |
3757 | [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, | |
3758 | [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, | |
3759 | [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, | |
3760 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | |
3761 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | |
3762 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, | |
3763 | [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, | |
3764 | /* anything missing from this table is in | |
3765 | * the asender_tbl, see get_asender_cmd */ | |
3766 | [P_MAX_CMD] = { 0, 0, NULL }, | |
3767 | }; | |
3768 | ||
3769 | /* All handler functions that expect a sub-header get that sub-heder in | |
3770 | mdev->tconn->data.rbuf.header.head.payload. | |
3771 | ||
3772 | Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual | |
3773 | p_header, but they may not rely on that. Since there is also p_header95 ! | |
3774 | */ | |
3775 | ||
3776 | static void drbdd(struct drbd_tconn *tconn) | |
3777 | { | |
3778 | struct p_header *header = &tconn->data.rbuf.header; | |
3779 | struct packet_info pi; | |
3780 | size_t shs; /* sub header size */ | |
3781 | int rv; | |
3782 | ||
3783 | while (get_t_state(&tconn->receiver) == RUNNING) { | |
3784 | drbd_thread_current_set_cpu(&tconn->receiver); | |
3785 | if (!drbd_recv_header(tconn, &pi)) | |
3786 | goto err_out; | |
3787 | ||
3788 | if (unlikely(pi.cmd >= P_MAX_CMD || !drbd_cmd_handler[pi.cmd].function)) { | |
3789 | conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size); | |
3790 | goto err_out; | |
3791 | } | |
3792 | ||
3793 | shs = drbd_cmd_handler[pi.cmd].pkt_size - sizeof(struct p_header); | |
3794 | if (pi.size - shs > 0 && !drbd_cmd_handler[pi.cmd].expect_payload) { | |
3795 | conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size); | |
3796 | goto err_out; | |
3797 | } | |
3798 | ||
3799 | if (shs) { | |
3800 | rv = drbd_recv(tconn, &header->payload, shs); | |
3801 | if (unlikely(rv != shs)) { | |
3802 | if (!signal_pending(current)) | |
3803 | conn_warn(tconn, "short read while reading sub header: rv=%d\n", rv); | |
3804 | goto err_out; | |
3805 | } | |
3806 | } | |
3807 | ||
3808 | rv = drbd_cmd_handler[pi.cmd].function(vnr_to_mdev(tconn, pi.vnr), pi.cmd, pi.size - shs); | |
3809 | ||
3810 | if (unlikely(!rv)) { | |
3811 | conn_err(tconn, "error receiving %s, l: %d!\n", | |
3812 | cmdname(pi.cmd), pi.size); | |
3813 | goto err_out; | |
3814 | } | |
3815 | } | |
3816 | ||
3817 | if (0) { | |
3818 | err_out: | |
3819 | drbd_force_state(tconn->volume0, NS(conn, C_PROTOCOL_ERROR)); | |
3820 | } | |
3821 | } | |
3822 | ||
3823 | void drbd_flush_workqueue(struct drbd_conf *mdev) | |
3824 | { | |
3825 | struct drbd_wq_barrier barr; | |
3826 | ||
3827 | barr.w.cb = w_prev_work_done; | |
3828 | barr.w.mdev = mdev; | |
3829 | init_completion(&barr.done); | |
3830 | drbd_queue_work(&mdev->tconn->data.work, &barr.w); | |
3831 | wait_for_completion(&barr.done); | |
3832 | } | |
3833 | ||
3834 | static void drbd_disconnect(struct drbd_tconn *tconn) | |
3835 | { | |
3836 | union drbd_state os, ns; | |
3837 | int rv = SS_UNKNOWN_ERROR; | |
3838 | ||
3839 | if (tconn->volume0->state.conn == C_STANDALONE) | |
3840 | return; | |
3841 | ||
3842 | /* asender does not clean up anything. it must not interfere, either */ | |
3843 | drbd_thread_stop(&tconn->asender); | |
3844 | drbd_free_sock(tconn); | |
3845 | ||
3846 | idr_for_each(&tconn->volumes, drbd_disconnected, tconn); | |
3847 | ||
3848 | conn_info(tconn, "Connection closed\n"); | |
3849 | ||
3850 | spin_lock_irq(&tconn->req_lock); | |
3851 | os = tconn->volume0->state; | |
3852 | if (os.conn >= C_UNCONNECTED) { | |
3853 | /* Do not restart in case we are C_DISCONNECTING */ | |
3854 | ns.i = os.i; | |
3855 | ns.conn = C_UNCONNECTED; | |
3856 | rv = _drbd_set_state(tconn->volume0, ns, CS_VERBOSE, NULL); | |
3857 | } | |
3858 | spin_unlock_irq(&tconn->req_lock); | |
3859 | ||
3860 | if (os.conn == C_DISCONNECTING) { | |
3861 | wait_event(tconn->net_cnt_wait, atomic_read(&tconn->net_cnt) == 0); | |
3862 | ||
3863 | crypto_free_hash(tconn->cram_hmac_tfm); | |
3864 | tconn->cram_hmac_tfm = NULL; | |
3865 | ||
3866 | kfree(tconn->net_conf); | |
3867 | tconn->net_conf = NULL; | |
3868 | drbd_request_state(tconn->volume0, NS(conn, C_STANDALONE)); | |
3869 | } | |
3870 | } | |
3871 | ||
3872 | static int drbd_disconnected(int vnr, void *p, void *data) | |
3873 | { | |
3874 | struct drbd_conf *mdev = (struct drbd_conf *)p; | |
3875 | enum drbd_fencing_p fp; | |
3876 | unsigned int i; | |
3877 | ||
3878 | /* wait for current activity to cease. */ | |
3879 | spin_lock_irq(&mdev->tconn->req_lock); | |
3880 | _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | |
3881 | _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); | |
3882 | _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); | |
3883 | spin_unlock_irq(&mdev->tconn->req_lock); | |
3884 | ||
3885 | /* We do not have data structures that would allow us to | |
3886 | * get the rs_pending_cnt down to 0 again. | |
3887 | * * On C_SYNC_TARGET we do not have any data structures describing | |
3888 | * the pending RSDataRequest's we have sent. | |
3889 | * * On C_SYNC_SOURCE there is no data structure that tracks | |
3890 | * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. | |
3891 | * And no, it is not the sum of the reference counts in the | |
3892 | * resync_LRU. The resync_LRU tracks the whole operation including | |
3893 | * the disk-IO, while the rs_pending_cnt only tracks the blocks | |
3894 | * on the fly. */ | |
3895 | drbd_rs_cancel_all(mdev); | |
3896 | mdev->rs_total = 0; | |
3897 | mdev->rs_failed = 0; | |
3898 | atomic_set(&mdev->rs_pending_cnt, 0); | |
3899 | wake_up(&mdev->misc_wait); | |
3900 | ||
3901 | del_timer(&mdev->request_timer); | |
3902 | ||
3903 | /* make sure syncer is stopped and w_resume_next_sg queued */ | |
3904 | del_timer_sync(&mdev->resync_timer); | |
3905 | resync_timer_fn((unsigned long)mdev); | |
3906 | ||
3907 | /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, | |
3908 | * w_make_resync_request etc. which may still be on the worker queue | |
3909 | * to be "canceled" */ | |
3910 | drbd_flush_workqueue(mdev); | |
3911 | ||
3912 | /* This also does reclaim_net_ee(). If we do this too early, we might | |
3913 | * miss some resync ee and pages.*/ | |
3914 | drbd_process_done_ee(mdev); | |
3915 | ||
3916 | kfree(mdev->p_uuid); | |
3917 | mdev->p_uuid = NULL; | |
3918 | ||
3919 | if (!is_susp(mdev->state)) | |
3920 | tl_clear(mdev); | |
3921 | ||
3922 | drbd_md_sync(mdev); | |
3923 | ||
3924 | fp = FP_DONT_CARE; | |
3925 | if (get_ldev(mdev)) { | |
3926 | fp = mdev->ldev->dc.fencing; | |
3927 | put_ldev(mdev); | |
3928 | } | |
3929 | ||
3930 | if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) | |
3931 | drbd_try_outdate_peer_async(mdev); | |
3932 | ||
3933 | /* serialize with bitmap writeout triggered by the state change, | |
3934 | * if any. */ | |
3935 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | |
3936 | ||
3937 | /* tcp_close and release of sendpage pages can be deferred. I don't | |
3938 | * want to use SO_LINGER, because apparently it can be deferred for | |
3939 | * more than 20 seconds (longest time I checked). | |
3940 | * | |
3941 | * Actually we don't care for exactly when the network stack does its | |
3942 | * put_page(), but release our reference on these pages right here. | |
3943 | */ | |
3944 | i = drbd_release_ee(mdev, &mdev->net_ee); | |
3945 | if (i) | |
3946 | dev_info(DEV, "net_ee not empty, killed %u entries\n", i); | |
3947 | i = atomic_read(&mdev->pp_in_use_by_net); | |
3948 | if (i) | |
3949 | dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); | |
3950 | i = atomic_read(&mdev->pp_in_use); | |
3951 | if (i) | |
3952 | dev_info(DEV, "pp_in_use = %d, expected 0\n", i); | |
3953 | ||
3954 | D_ASSERT(list_empty(&mdev->read_ee)); | |
3955 | D_ASSERT(list_empty(&mdev->active_ee)); | |
3956 | D_ASSERT(list_empty(&mdev->sync_ee)); | |
3957 | D_ASSERT(list_empty(&mdev->done_ee)); | |
3958 | ||
3959 | /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ | |
3960 | atomic_set(&mdev->current_epoch->epoch_size, 0); | |
3961 | D_ASSERT(list_empty(&mdev->current_epoch->list)); | |
3962 | ||
3963 | return 0; | |
3964 | } | |
3965 | ||
3966 | /* | |
3967 | * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version | |
3968 | * we can agree on is stored in agreed_pro_version. | |
3969 | * | |
3970 | * feature flags and the reserved array should be enough room for future | |
3971 | * enhancements of the handshake protocol, and possible plugins... | |
3972 | * | |
3973 | * for now, they are expected to be zero, but ignored. | |
3974 | */ | |
3975 | static int drbd_send_handshake(struct drbd_tconn *tconn) | |
3976 | { | |
3977 | /* ASSERT current == mdev->tconn->receiver ... */ | |
3978 | struct p_handshake *p = &tconn->data.sbuf.handshake; | |
3979 | int ok; | |
3980 | ||
3981 | if (mutex_lock_interruptible(&tconn->data.mutex)) { | |
3982 | conn_err(tconn, "interrupted during initial handshake\n"); | |
3983 | return 0; /* interrupted. not ok. */ | |
3984 | } | |
3985 | ||
3986 | if (tconn->data.socket == NULL) { | |
3987 | mutex_unlock(&tconn->data.mutex); | |
3988 | return 0; | |
3989 | } | |
3990 | ||
3991 | memset(p, 0, sizeof(*p)); | |
3992 | p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); | |
3993 | p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); | |
3994 | ok = _conn_send_cmd(tconn, 0, tconn->data.socket, P_HAND_SHAKE, | |
3995 | &p->head, sizeof(*p), 0); | |
3996 | mutex_unlock(&tconn->data.mutex); | |
3997 | return ok; | |
3998 | } | |
3999 | ||
4000 | /* | |
4001 | * return values: | |
4002 | * 1 yes, we have a valid connection | |
4003 | * 0 oops, did not work out, please try again | |
4004 | * -1 peer talks different language, | |
4005 | * no point in trying again, please go standalone. | |
4006 | */ | |
4007 | static int drbd_do_handshake(struct drbd_tconn *tconn) | |
4008 | { | |
4009 | /* ASSERT current == tconn->receiver ... */ | |
4010 | struct p_handshake *p = &tconn->data.rbuf.handshake; | |
4011 | const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80); | |
4012 | struct packet_info pi; | |
4013 | int rv; | |
4014 | ||
4015 | rv = drbd_send_handshake(tconn); | |
4016 | if (!rv) | |
4017 | return 0; | |
4018 | ||
4019 | rv = drbd_recv_header(tconn, &pi); | |
4020 | if (!rv) | |
4021 | return 0; | |
4022 | ||
4023 | if (pi.cmd != P_HAND_SHAKE) { | |
4024 | conn_err(tconn, "expected HandShake packet, received: %s (0x%04x)\n", | |
4025 | cmdname(pi.cmd), pi.cmd); | |
4026 | return -1; | |
4027 | } | |
4028 | ||
4029 | if (pi.size != expect) { | |
4030 | conn_err(tconn, "expected HandShake length: %u, received: %u\n", | |
4031 | expect, pi.size); | |
4032 | return -1; | |
4033 | } | |
4034 | ||
4035 | rv = drbd_recv(tconn, &p->head.payload, expect); | |
4036 | ||
4037 | if (rv != expect) { | |
4038 | if (!signal_pending(current)) | |
4039 | conn_warn(tconn, "short read receiving handshake packet: l=%u\n", rv); | |
4040 | return 0; | |
4041 | } | |
4042 | ||
4043 | p->protocol_min = be32_to_cpu(p->protocol_min); | |
4044 | p->protocol_max = be32_to_cpu(p->protocol_max); | |
4045 | if (p->protocol_max == 0) | |
4046 | p->protocol_max = p->protocol_min; | |
4047 | ||
4048 | if (PRO_VERSION_MAX < p->protocol_min || | |
4049 | PRO_VERSION_MIN > p->protocol_max) | |
4050 | goto incompat; | |
4051 | ||
4052 | tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); | |
4053 | ||
4054 | conn_info(tconn, "Handshake successful: " | |
4055 | "Agreed network protocol version %d\n", tconn->agreed_pro_version); | |
4056 | ||
4057 | return 1; | |
4058 | ||
4059 | incompat: | |
4060 | conn_err(tconn, "incompatible DRBD dialects: " | |
4061 | "I support %d-%d, peer supports %d-%d\n", | |
4062 | PRO_VERSION_MIN, PRO_VERSION_MAX, | |
4063 | p->protocol_min, p->protocol_max); | |
4064 | return -1; | |
4065 | } | |
4066 | ||
4067 | #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) | |
4068 | static int drbd_do_auth(struct drbd_tconn *tconn) | |
4069 | { | |
4070 | dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); | |
4071 | dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); | |
4072 | return -1; | |
4073 | } | |
4074 | #else | |
4075 | #define CHALLENGE_LEN 64 | |
4076 | ||
4077 | /* Return value: | |
4078 | 1 - auth succeeded, | |
4079 | 0 - failed, try again (network error), | |
4080 | -1 - auth failed, don't try again. | |
4081 | */ | |
4082 | ||
4083 | static int drbd_do_auth(struct drbd_tconn *tconn) | |
4084 | { | |
4085 | char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ | |
4086 | struct scatterlist sg; | |
4087 | char *response = NULL; | |
4088 | char *right_response = NULL; | |
4089 | char *peers_ch = NULL; | |
4090 | unsigned int key_len = strlen(tconn->net_conf->shared_secret); | |
4091 | unsigned int resp_size; | |
4092 | struct hash_desc desc; | |
4093 | struct packet_info pi; | |
4094 | int rv; | |
4095 | ||
4096 | desc.tfm = tconn->cram_hmac_tfm; | |
4097 | desc.flags = 0; | |
4098 | ||
4099 | rv = crypto_hash_setkey(tconn->cram_hmac_tfm, | |
4100 | (u8 *)tconn->net_conf->shared_secret, key_len); | |
4101 | if (rv) { | |
4102 | conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv); | |
4103 | rv = -1; | |
4104 | goto fail; | |
4105 | } | |
4106 | ||
4107 | get_random_bytes(my_challenge, CHALLENGE_LEN); | |
4108 | ||
4109 | rv = conn_send_cmd2(tconn, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN); | |
4110 | if (!rv) | |
4111 | goto fail; | |
4112 | ||
4113 | rv = drbd_recv_header(tconn, &pi); | |
4114 | if (!rv) | |
4115 | goto fail; | |
4116 | ||
4117 | if (pi.cmd != P_AUTH_CHALLENGE) { | |
4118 | conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n", | |
4119 | cmdname(pi.cmd), pi.cmd); | |
4120 | rv = 0; | |
4121 | goto fail; | |
4122 | } | |
4123 | ||
4124 | if (pi.size > CHALLENGE_LEN * 2) { | |
4125 | conn_err(tconn, "expected AuthChallenge payload too big.\n"); | |
4126 | rv = -1; | |
4127 | goto fail; | |
4128 | } | |
4129 | ||
4130 | peers_ch = kmalloc(pi.size, GFP_NOIO); | |
4131 | if (peers_ch == NULL) { | |
4132 | conn_err(tconn, "kmalloc of peers_ch failed\n"); | |
4133 | rv = -1; | |
4134 | goto fail; | |
4135 | } | |
4136 | ||
4137 | rv = drbd_recv(tconn, peers_ch, pi.size); | |
4138 | ||
4139 | if (rv != pi.size) { | |
4140 | if (!signal_pending(current)) | |
4141 | conn_warn(tconn, "short read AuthChallenge: l=%u\n", rv); | |
4142 | rv = 0; | |
4143 | goto fail; | |
4144 | } | |
4145 | ||
4146 | resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm); | |
4147 | response = kmalloc(resp_size, GFP_NOIO); | |
4148 | if (response == NULL) { | |
4149 | conn_err(tconn, "kmalloc of response failed\n"); | |
4150 | rv = -1; | |
4151 | goto fail; | |
4152 | } | |
4153 | ||
4154 | sg_init_table(&sg, 1); | |
4155 | sg_set_buf(&sg, peers_ch, pi.size); | |
4156 | ||
4157 | rv = crypto_hash_digest(&desc, &sg, sg.length, response); | |
4158 | if (rv) { | |
4159 | conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); | |
4160 | rv = -1; | |
4161 | goto fail; | |
4162 | } | |
4163 | ||
4164 | rv = conn_send_cmd2(tconn, P_AUTH_RESPONSE, response, resp_size); | |
4165 | if (!rv) | |
4166 | goto fail; | |
4167 | ||
4168 | rv = drbd_recv_header(tconn, &pi); | |
4169 | if (!rv) | |
4170 | goto fail; | |
4171 | ||
4172 | if (pi.cmd != P_AUTH_RESPONSE) { | |
4173 | conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n", | |
4174 | cmdname(pi.cmd), pi.cmd); | |
4175 | rv = 0; | |
4176 | goto fail; | |
4177 | } | |
4178 | ||
4179 | if (pi.size != resp_size) { | |
4180 | conn_err(tconn, "expected AuthResponse payload of wrong size\n"); | |
4181 | rv = 0; | |
4182 | goto fail; | |
4183 | } | |
4184 | ||
4185 | rv = drbd_recv(tconn, response , resp_size); | |
4186 | ||
4187 | if (rv != resp_size) { | |
4188 | if (!signal_pending(current)) | |
4189 | conn_warn(tconn, "short read receiving AuthResponse: l=%u\n", rv); | |
4190 | rv = 0; | |
4191 | goto fail; | |
4192 | } | |
4193 | ||
4194 | right_response = kmalloc(resp_size, GFP_NOIO); | |
4195 | if (right_response == NULL) { | |
4196 | conn_err(tconn, "kmalloc of right_response failed\n"); | |
4197 | rv = -1; | |
4198 | goto fail; | |
4199 | } | |
4200 | ||
4201 | sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); | |
4202 | ||
4203 | rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); | |
4204 | if (rv) { | |
4205 | conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); | |
4206 | rv = -1; | |
4207 | goto fail; | |
4208 | } | |
4209 | ||
4210 | rv = !memcmp(response, right_response, resp_size); | |
4211 | ||
4212 | if (rv) | |
4213 | conn_info(tconn, "Peer authenticated using %d bytes of '%s' HMAC\n", | |
4214 | resp_size, tconn->net_conf->cram_hmac_alg); | |
4215 | else | |
4216 | rv = -1; | |
4217 | ||
4218 | fail: | |
4219 | kfree(peers_ch); | |
4220 | kfree(response); | |
4221 | kfree(right_response); | |
4222 | ||
4223 | return rv; | |
4224 | } | |
4225 | #endif | |
4226 | ||
4227 | int drbdd_init(struct drbd_thread *thi) | |
4228 | { | |
4229 | struct drbd_tconn *tconn = thi->mdev->tconn; | |
4230 | int h; | |
4231 | ||
4232 | conn_info(tconn, "receiver (re)started\n"); | |
4233 | ||
4234 | do { | |
4235 | h = drbd_connect(tconn); | |
4236 | if (h == 0) { | |
4237 | drbd_disconnect(tconn); | |
4238 | schedule_timeout_interruptible(HZ); | |
4239 | } | |
4240 | if (h == -1) { | |
4241 | conn_warn(tconn, "Discarding network configuration.\n"); | |
4242 | drbd_force_state(tconn->volume0, NS(conn, C_DISCONNECTING)); | |
4243 | } | |
4244 | } while (h == 0); | |
4245 | ||
4246 | if (h > 0) { | |
4247 | if (get_net_conf(tconn)) { | |
4248 | drbdd(tconn); | |
4249 | put_net_conf(tconn); | |
4250 | } | |
4251 | } | |
4252 | ||
4253 | drbd_disconnect(tconn); | |
4254 | ||
4255 | conn_info(tconn, "receiver terminated\n"); | |
4256 | return 0; | |
4257 | } | |
4258 | ||
4259 | /* ********* acknowledge sender ******** */ | |
4260 | ||
4261 | static int got_RqSReply(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4262 | { | |
4263 | struct p_req_state_reply *p = &mdev->tconn->meta.rbuf.req_state_reply; | |
4264 | ||
4265 | int retcode = be32_to_cpu(p->retcode); | |
4266 | ||
4267 | if (retcode >= SS_SUCCESS) { | |
4268 | set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); | |
4269 | } else { | |
4270 | set_bit(CL_ST_CHG_FAIL, &mdev->flags); | |
4271 | dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", | |
4272 | drbd_set_st_err_str(retcode), retcode); | |
4273 | } | |
4274 | wake_up(&mdev->state_wait); | |
4275 | ||
4276 | return true; | |
4277 | } | |
4278 | ||
4279 | static int got_Ping(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4280 | { | |
4281 | return drbd_send_ping_ack(mdev); | |
4282 | ||
4283 | } | |
4284 | ||
4285 | static int got_PingAck(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4286 | { | |
4287 | /* restore idle timeout */ | |
4288 | mdev->tconn->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ; | |
4289 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) | |
4290 | wake_up(&mdev->misc_wait); | |
4291 | ||
4292 | return true; | |
4293 | } | |
4294 | ||
4295 | static int got_IsInSync(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4296 | { | |
4297 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4298 | sector_t sector = be64_to_cpu(p->sector); | |
4299 | int blksize = be32_to_cpu(p->blksize); | |
4300 | ||
4301 | D_ASSERT(mdev->tconn->agreed_pro_version >= 89); | |
4302 | ||
4303 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4304 | ||
4305 | if (get_ldev(mdev)) { | |
4306 | drbd_rs_complete_io(mdev, sector); | |
4307 | drbd_set_in_sync(mdev, sector, blksize); | |
4308 | /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ | |
4309 | mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); | |
4310 | put_ldev(mdev); | |
4311 | } | |
4312 | dec_rs_pending(mdev); | |
4313 | atomic_add(blksize >> 9, &mdev->rs_sect_in); | |
4314 | ||
4315 | return true; | |
4316 | } | |
4317 | ||
4318 | static int | |
4319 | validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector, | |
4320 | struct rb_root *root, const char *func, | |
4321 | enum drbd_req_event what, bool missing_ok) | |
4322 | { | |
4323 | struct drbd_request *req; | |
4324 | struct bio_and_error m; | |
4325 | ||
4326 | spin_lock_irq(&mdev->tconn->req_lock); | |
4327 | req = find_request(mdev, root, id, sector, missing_ok, func); | |
4328 | if (unlikely(!req)) { | |
4329 | spin_unlock_irq(&mdev->tconn->req_lock); | |
4330 | return false; | |
4331 | } | |
4332 | __req_mod(req, what, &m); | |
4333 | spin_unlock_irq(&mdev->tconn->req_lock); | |
4334 | ||
4335 | if (m.bio) | |
4336 | complete_master_bio(mdev, &m); | |
4337 | return true; | |
4338 | } | |
4339 | ||
4340 | static int got_BlockAck(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4341 | { | |
4342 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4343 | sector_t sector = be64_to_cpu(p->sector); | |
4344 | int blksize = be32_to_cpu(p->blksize); | |
4345 | enum drbd_req_event what; | |
4346 | ||
4347 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4348 | ||
4349 | if (p->block_id == ID_SYNCER) { | |
4350 | drbd_set_in_sync(mdev, sector, blksize); | |
4351 | dec_rs_pending(mdev); | |
4352 | return true; | |
4353 | } | |
4354 | switch (cmd) { | |
4355 | case P_RS_WRITE_ACK: | |
4356 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); | |
4357 | what = WRITE_ACKED_BY_PEER_AND_SIS; | |
4358 | break; | |
4359 | case P_WRITE_ACK: | |
4360 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); | |
4361 | what = WRITE_ACKED_BY_PEER; | |
4362 | break; | |
4363 | case P_RECV_ACK: | |
4364 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B); | |
4365 | what = RECV_ACKED_BY_PEER; | |
4366 | break; | |
4367 | case P_DISCARD_ACK: | |
4368 | D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); | |
4369 | what = CONFLICT_DISCARDED_BY_PEER; | |
4370 | break; | |
4371 | default: | |
4372 | D_ASSERT(0); | |
4373 | return false; | |
4374 | } | |
4375 | ||
4376 | return validate_req_change_req_state(mdev, p->block_id, sector, | |
4377 | &mdev->write_requests, __func__, | |
4378 | what, false); | |
4379 | } | |
4380 | ||
4381 | static int got_NegAck(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4382 | { | |
4383 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4384 | sector_t sector = be64_to_cpu(p->sector); | |
4385 | int size = be32_to_cpu(p->blksize); | |
4386 | bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || | |
4387 | mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B; | |
4388 | bool found; | |
4389 | ||
4390 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4391 | ||
4392 | if (p->block_id == ID_SYNCER) { | |
4393 | dec_rs_pending(mdev); | |
4394 | drbd_rs_failed_io(mdev, sector, size); | |
4395 | return true; | |
4396 | } | |
4397 | ||
4398 | found = validate_req_change_req_state(mdev, p->block_id, sector, | |
4399 | &mdev->write_requests, __func__, | |
4400 | NEG_ACKED, missing_ok); | |
4401 | if (!found) { | |
4402 | /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. | |
4403 | The master bio might already be completed, therefore the | |
4404 | request is no longer in the collision hash. */ | |
4405 | /* In Protocol B we might already have got a P_RECV_ACK | |
4406 | but then get a P_NEG_ACK afterwards. */ | |
4407 | if (!missing_ok) | |
4408 | return false; | |
4409 | drbd_set_out_of_sync(mdev, sector, size); | |
4410 | } | |
4411 | return true; | |
4412 | } | |
4413 | ||
4414 | static int got_NegDReply(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4415 | { | |
4416 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4417 | sector_t sector = be64_to_cpu(p->sector); | |
4418 | ||
4419 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4420 | dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n", | |
4421 | (unsigned long long)sector, be32_to_cpu(p->blksize)); | |
4422 | ||
4423 | return validate_req_change_req_state(mdev, p->block_id, sector, | |
4424 | &mdev->read_requests, __func__, | |
4425 | NEG_ACKED, false); | |
4426 | } | |
4427 | ||
4428 | static int got_NegRSDReply(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4429 | { | |
4430 | sector_t sector; | |
4431 | int size; | |
4432 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4433 | ||
4434 | sector = be64_to_cpu(p->sector); | |
4435 | size = be32_to_cpu(p->blksize); | |
4436 | ||
4437 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4438 | ||
4439 | dec_rs_pending(mdev); | |
4440 | ||
4441 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
4442 | drbd_rs_complete_io(mdev, sector); | |
4443 | switch (cmd) { | |
4444 | case P_NEG_RS_DREPLY: | |
4445 | drbd_rs_failed_io(mdev, sector, size); | |
4446 | case P_RS_CANCEL: | |
4447 | break; | |
4448 | default: | |
4449 | D_ASSERT(0); | |
4450 | put_ldev(mdev); | |
4451 | return false; | |
4452 | } | |
4453 | put_ldev(mdev); | |
4454 | } | |
4455 | ||
4456 | return true; | |
4457 | } | |
4458 | ||
4459 | static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4460 | { | |
4461 | struct p_barrier_ack *p = &mdev->tconn->meta.rbuf.barrier_ack; | |
4462 | ||
4463 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); | |
4464 | ||
4465 | if (mdev->state.conn == C_AHEAD && | |
4466 | atomic_read(&mdev->ap_in_flight) == 0 && | |
4467 | !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { | |
4468 | mdev->start_resync_timer.expires = jiffies + HZ; | |
4469 | add_timer(&mdev->start_resync_timer); | |
4470 | } | |
4471 | ||
4472 | return true; | |
4473 | } | |
4474 | ||
4475 | static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4476 | { | |
4477 | struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack; | |
4478 | struct drbd_work *w; | |
4479 | sector_t sector; | |
4480 | int size; | |
4481 | ||
4482 | sector = be64_to_cpu(p->sector); | |
4483 | size = be32_to_cpu(p->blksize); | |
4484 | ||
4485 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | |
4486 | ||
4487 | if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) | |
4488 | drbd_ov_oos_found(mdev, sector, size); | |
4489 | else | |
4490 | ov_oos_print(mdev); | |
4491 | ||
4492 | if (!get_ldev(mdev)) | |
4493 | return true; | |
4494 | ||
4495 | drbd_rs_complete_io(mdev, sector); | |
4496 | dec_rs_pending(mdev); | |
4497 | ||
4498 | --mdev->ov_left; | |
4499 | ||
4500 | /* let's advance progress step marks only for every other megabyte */ | |
4501 | if ((mdev->ov_left & 0x200) == 0x200) | |
4502 | drbd_advance_rs_marks(mdev, mdev->ov_left); | |
4503 | ||
4504 | if (mdev->ov_left == 0) { | |
4505 | w = kmalloc(sizeof(*w), GFP_NOIO); | |
4506 | if (w) { | |
4507 | w->cb = w_ov_finished; | |
4508 | w->mdev = mdev; | |
4509 | drbd_queue_work_front(&mdev->tconn->data.work, w); | |
4510 | } else { | |
4511 | dev_err(DEV, "kmalloc(w) failed."); | |
4512 | ov_oos_print(mdev); | |
4513 | drbd_resync_finished(mdev); | |
4514 | } | |
4515 | } | |
4516 | put_ldev(mdev); | |
4517 | return true; | |
4518 | } | |
4519 | ||
4520 | static int got_skip(struct drbd_conf *mdev, enum drbd_packet cmd) | |
4521 | { | |
4522 | return true; | |
4523 | } | |
4524 | ||
4525 | struct asender_cmd { | |
4526 | size_t pkt_size; | |
4527 | int (*process)(struct drbd_conf *mdev, enum drbd_packet cmd); | |
4528 | }; | |
4529 | ||
4530 | static struct asender_cmd *get_asender_cmd(int cmd) | |
4531 | { | |
4532 | static struct asender_cmd asender_tbl[] = { | |
4533 | /* anything missing from this table is in | |
4534 | * the drbd_cmd_handler (drbd_default_handler) table, | |
4535 | * see the beginning of drbdd() */ | |
4536 | [P_PING] = { sizeof(struct p_header), got_Ping }, | |
4537 | [P_PING_ACK] = { sizeof(struct p_header), got_PingAck }, | |
4538 | [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
4539 | [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
4540 | [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
4541 | [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
4542 | [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, | |
4543 | [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, | |
4544 | [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply}, | |
4545 | [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, | |
4546 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, | |
4547 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | |
4548 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | |
4549 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, | |
4550 | [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, | |
4551 | [P_MAX_CMD] = { 0, NULL }, | |
4552 | }; | |
4553 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | |
4554 | return NULL; | |
4555 | return &asender_tbl[cmd]; | |
4556 | } | |
4557 | ||
4558 | int drbd_asender(struct drbd_thread *thi) | |
4559 | { | |
4560 | struct drbd_conf *mdev = thi->mdev; | |
4561 | struct p_header *h = &mdev->tconn->meta.rbuf.header; | |
4562 | struct asender_cmd *cmd = NULL; | |
4563 | struct packet_info pi; | |
4564 | ||
4565 | int rv; | |
4566 | void *buf = h; | |
4567 | int received = 0; | |
4568 | int expect = sizeof(struct p_header); | |
4569 | int ping_timeout_active = 0; | |
4570 | int empty; | |
4571 | ||
4572 | current->policy = SCHED_RR; /* Make this a realtime task! */ | |
4573 | current->rt_priority = 2; /* more important than all other tasks */ | |
4574 | ||
4575 | while (get_t_state(thi) == RUNNING) { | |
4576 | drbd_thread_current_set_cpu(thi); | |
4577 | if (test_and_clear_bit(SEND_PING, &mdev->tconn->flags)) { | |
4578 | if (!drbd_send_ping(mdev)) { | |
4579 | dev_err(DEV, "drbd_send_ping has failed\n"); | |
4580 | goto reconnect; | |
4581 | } | |
4582 | mdev->tconn->meta.socket->sk->sk_rcvtimeo = | |
4583 | mdev->tconn->net_conf->ping_timeo*HZ/10; | |
4584 | ping_timeout_active = 1; | |
4585 | } | |
4586 | ||
4587 | /* conditionally cork; | |
4588 | * it may hurt latency if we cork without much to send */ | |
4589 | if (!mdev->tconn->net_conf->no_cork && | |
4590 | 3 < atomic_read(&mdev->unacked_cnt)) | |
4591 | drbd_tcp_cork(mdev->tconn->meta.socket); | |
4592 | while (1) { | |
4593 | clear_bit(SIGNAL_ASENDER, &mdev->tconn->flags); | |
4594 | flush_signals(current); | |
4595 | if (!drbd_process_done_ee(mdev)) | |
4596 | goto reconnect; | |
4597 | /* to avoid race with newly queued ACKs */ | |
4598 | set_bit(SIGNAL_ASENDER, &mdev->tconn->flags); | |
4599 | spin_lock_irq(&mdev->tconn->req_lock); | |
4600 | empty = list_empty(&mdev->done_ee); | |
4601 | spin_unlock_irq(&mdev->tconn->req_lock); | |
4602 | /* new ack may have been queued right here, | |
4603 | * but then there is also a signal pending, | |
4604 | * and we start over... */ | |
4605 | if (empty) | |
4606 | break; | |
4607 | } | |
4608 | /* but unconditionally uncork unless disabled */ | |
4609 | if (!mdev->tconn->net_conf->no_cork) | |
4610 | drbd_tcp_uncork(mdev->tconn->meta.socket); | |
4611 | ||
4612 | /* short circuit, recv_msg would return EINTR anyways. */ | |
4613 | if (signal_pending(current)) | |
4614 | continue; | |
4615 | ||
4616 | rv = drbd_recv_short(mdev->tconn->meta.socket, buf, expect-received, 0); | |
4617 | clear_bit(SIGNAL_ASENDER, &mdev->tconn->flags); | |
4618 | ||
4619 | flush_signals(current); | |
4620 | ||
4621 | /* Note: | |
4622 | * -EINTR (on meta) we got a signal | |
4623 | * -EAGAIN (on meta) rcvtimeo expired | |
4624 | * -ECONNRESET other side closed the connection | |
4625 | * -ERESTARTSYS (on data) we got a signal | |
4626 | * rv < 0 other than above: unexpected error! | |
4627 | * rv == expected: full header or command | |
4628 | * rv < expected: "woken" by signal during receive | |
4629 | * rv == 0 : "connection shut down by peer" | |
4630 | */ | |
4631 | if (likely(rv > 0)) { | |
4632 | received += rv; | |
4633 | buf += rv; | |
4634 | } else if (rv == 0) { | |
4635 | dev_err(DEV, "meta connection shut down by peer.\n"); | |
4636 | goto reconnect; | |
4637 | } else if (rv == -EAGAIN) { | |
4638 | /* If the data socket received something meanwhile, | |
4639 | * that is good enough: peer is still alive. */ | |
4640 | if (time_after(mdev->tconn->last_received, | |
4641 | jiffies - mdev->tconn->meta.socket->sk->sk_rcvtimeo)) | |
4642 | continue; | |
4643 | if (ping_timeout_active) { | |
4644 | dev_err(DEV, "PingAck did not arrive in time.\n"); | |
4645 | goto reconnect; | |
4646 | } | |
4647 | set_bit(SEND_PING, &mdev->tconn->flags); | |
4648 | continue; | |
4649 | } else if (rv == -EINTR) { | |
4650 | continue; | |
4651 | } else { | |
4652 | dev_err(DEV, "sock_recvmsg returned %d\n", rv); | |
4653 | goto reconnect; | |
4654 | } | |
4655 | ||
4656 | if (received == expect && cmd == NULL) { | |
4657 | if (!decode_header(mdev->tconn, h, &pi)) | |
4658 | goto reconnect; | |
4659 | cmd = get_asender_cmd(pi.cmd); | |
4660 | if (unlikely(cmd == NULL)) { | |
4661 | dev_err(DEV, "unknown command %d on meta (l: %d)\n", | |
4662 | pi.cmd, pi.size); | |
4663 | goto disconnect; | |
4664 | } | |
4665 | expect = cmd->pkt_size; | |
4666 | if (pi.size != expect - sizeof(struct p_header)) { | |
4667 | dev_err(DEV, "Wrong packet size on meta (c: %d, l: %d)\n", | |
4668 | pi.cmd, pi.size); | |
4669 | goto reconnect; | |
4670 | } | |
4671 | } | |
4672 | if (received == expect) { | |
4673 | mdev->tconn->last_received = jiffies; | |
4674 | D_ASSERT(cmd != NULL); | |
4675 | if (!cmd->process(mdev, pi.cmd)) | |
4676 | goto reconnect; | |
4677 | ||
4678 | /* the idle_timeout (ping-int) | |
4679 | * has been restored in got_PingAck() */ | |
4680 | if (cmd == get_asender_cmd(P_PING_ACK)) | |
4681 | ping_timeout_active = 0; | |
4682 | ||
4683 | buf = h; | |
4684 | received = 0; | |
4685 | expect = sizeof(struct p_header); | |
4686 | cmd = NULL; | |
4687 | } | |
4688 | } | |
4689 | ||
4690 | if (0) { | |
4691 | reconnect: | |
4692 | drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); | |
4693 | drbd_md_sync(mdev); | |
4694 | } | |
4695 | if (0) { | |
4696 | disconnect: | |
4697 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
4698 | drbd_md_sync(mdev); | |
4699 | } | |
4700 | clear_bit(SIGNAL_ASENDER, &mdev->tconn->flags); | |
4701 | ||
4702 | D_ASSERT(mdev->state.conn < C_CONNECTED); | |
4703 | dev_info(DEV, "asender terminated\n"); | |
4704 | ||
4705 | return 0; | |
4706 | } |