]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
drbd: drbd_connected(): Return an error code upon failure.
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
53 int size;
54 int vnr;
55};
56
b411b363
PR
57enum finish_epoch {
58 FE_STILL_LIVE,
59 FE_DESTROYED,
60 FE_RECYCLED,
61};
62
a4fbda8e
PR
63enum mdev_or_conn {
64 MDEV,
65 CONN,
66};
67
65d11ed6 68static int drbd_do_handshake(struct drbd_tconn *tconn);
13e6037d 69static int drbd_do_auth(struct drbd_tconn *tconn);
360cc740 70static int drbd_disconnected(int vnr, void *p, void *data);
b411b363
PR
71
72static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
99920dc5 73static int e_end_block(struct drbd_work *, int);
b411b363 74
b411b363
PR
75
76#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
77
45bb912b
LE
78/*
79 * some helper functions to deal with single linked page lists,
80 * page->private being our "next" pointer.
81 */
82
83/* If at least n pages are linked at head, get n pages off.
84 * Otherwise, don't modify head, and return NULL.
85 * Locking is the responsibility of the caller.
86 */
87static struct page *page_chain_del(struct page **head, int n)
88{
89 struct page *page;
90 struct page *tmp;
91
92 BUG_ON(!n);
93 BUG_ON(!head);
94
95 page = *head;
23ce4227
PR
96
97 if (!page)
98 return NULL;
99
45bb912b
LE
100 while (page) {
101 tmp = page_chain_next(page);
102 if (--n == 0)
103 break; /* found sufficient pages */
104 if (tmp == NULL)
105 /* insufficient pages, don't use any of them. */
106 return NULL;
107 page = tmp;
108 }
109
110 /* add end of list marker for the returned list */
111 set_page_private(page, 0);
112 /* actual return value, and adjustment of head */
113 page = *head;
114 *head = tmp;
115 return page;
116}
117
118/* may be used outside of locks to find the tail of a (usually short)
119 * "private" page chain, before adding it back to a global chain head
120 * with page_chain_add() under a spinlock. */
121static struct page *page_chain_tail(struct page *page, int *len)
122{
123 struct page *tmp;
124 int i = 1;
125 while ((tmp = page_chain_next(page)))
126 ++i, page = tmp;
127 if (len)
128 *len = i;
129 return page;
130}
131
132static int page_chain_free(struct page *page)
133{
134 struct page *tmp;
135 int i = 0;
136 page_chain_for_each_safe(page, tmp) {
137 put_page(page);
138 ++i;
139 }
140 return i;
141}
142
143static void page_chain_add(struct page **head,
144 struct page *chain_first, struct page *chain_last)
145{
146#if 1
147 struct page *tmp;
148 tmp = page_chain_tail(chain_first, NULL);
149 BUG_ON(tmp != chain_last);
150#endif
151
152 /* add chain to head */
153 set_page_private(chain_last, (unsigned long)*head);
154 *head = chain_first;
155}
156
157static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
b411b363
PR
158{
159 struct page *page = NULL;
45bb912b
LE
160 struct page *tmp = NULL;
161 int i = 0;
b411b363
PR
162
163 /* Yes, testing drbd_pp_vacant outside the lock is racy.
164 * So what. It saves a spin_lock. */
45bb912b 165 if (drbd_pp_vacant >= number) {
b411b363 166 spin_lock(&drbd_pp_lock);
45bb912b
LE
167 page = page_chain_del(&drbd_pp_pool, number);
168 if (page)
169 drbd_pp_vacant -= number;
b411b363 170 spin_unlock(&drbd_pp_lock);
45bb912b
LE
171 if (page)
172 return page;
b411b363 173 }
45bb912b 174
b411b363
PR
175 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
176 * "criss-cross" setup, that might cause write-out on some other DRBD,
177 * which in turn might block on the other node at this very place. */
45bb912b
LE
178 for (i = 0; i < number; i++) {
179 tmp = alloc_page(GFP_TRY);
180 if (!tmp)
181 break;
182 set_page_private(tmp, (unsigned long)page);
183 page = tmp;
184 }
185
186 if (i == number)
187 return page;
188
189 /* Not enough pages immediately available this time.
190 * No need to jump around here, drbd_pp_alloc will retry this
191 * function "soon". */
192 if (page) {
193 tmp = page_chain_tail(page, NULL);
194 spin_lock(&drbd_pp_lock);
195 page_chain_add(&drbd_pp_pool, page, tmp);
196 drbd_pp_vacant += i;
197 spin_unlock(&drbd_pp_lock);
198 }
199 return NULL;
b411b363
PR
200}
201
b411b363
PR
202static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
203{
db830c46 204 struct drbd_peer_request *peer_req;
b411b363
PR
205 struct list_head *le, *tle;
206
207 /* The EEs are always appended to the end of the list. Since
208 they are sent in order over the wire, they have to finish
209 in order. As soon as we see the first not finished we can
210 stop to examine the list... */
211
212 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46
AG
213 peer_req = list_entry(le, struct drbd_peer_request, w.list);
214 if (drbd_ee_has_active_page(peer_req))
b411b363
PR
215 break;
216 list_move(le, to_be_freed);
217 }
218}
219
220static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
221{
222 LIST_HEAD(reclaimed);
db830c46 223 struct drbd_peer_request *peer_req, *t;
b411b363 224
87eeee41 225 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 226 reclaim_net_ee(mdev, &reclaimed);
87eeee41 227 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 228
db830c46
AG
229 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
230 drbd_free_net_ee(mdev, peer_req);
b411b363
PR
231}
232
233/**
45bb912b 234 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
b411b363 235 * @mdev: DRBD device.
45bb912b
LE
236 * @number: number of pages requested
237 * @retry: whether to retry, if not enough pages are available right now
238 *
239 * Tries to allocate number pages, first from our own page pool, then from
240 * the kernel, unless this allocation would exceed the max_buffers setting.
241 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 242 *
45bb912b 243 * Returns a page chain linked via page->private.
b411b363 244 */
45bb912b 245static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
b411b363
PR
246{
247 struct page *page = NULL;
248 DEFINE_WAIT(wait);
249
45bb912b
LE
250 /* Yes, we may run up to @number over max_buffers. If we
251 * follow it strictly, the admin will get it wrong anyways. */
89e58e75 252 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
45bb912b 253 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
b411b363 254
45bb912b 255 while (page == NULL) {
b411b363
PR
256 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
257
258 drbd_kick_lo_and_reclaim_net(mdev);
259
89e58e75 260 if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
45bb912b 261 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
b411b363
PR
262 if (page)
263 break;
264 }
265
266 if (!retry)
267 break;
268
269 if (signal_pending(current)) {
270 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
271 break;
272 }
273
274 schedule();
275 }
276 finish_wait(&drbd_pp_wait, &wait);
277
45bb912b
LE
278 if (page)
279 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
280 return page;
281}
282
283/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
87eeee41 284 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
285 * Either links the page chain back to the global pool,
286 * or returns all pages to the system. */
435f0740 287static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 288{
435f0740 289 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 290 int i;
435f0740 291
81a5d60e 292 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
293 i = page_chain_free(page);
294 else {
295 struct page *tmp;
296 tmp = page_chain_tail(page, &i);
297 spin_lock(&drbd_pp_lock);
298 page_chain_add(&drbd_pp_pool, page, tmp);
299 drbd_pp_vacant += i;
300 spin_unlock(&drbd_pp_lock);
b411b363 301 }
435f0740 302 i = atomic_sub_return(i, a);
45bb912b 303 if (i < 0)
435f0740
LE
304 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
305 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
306 wake_up(&drbd_pp_wait);
307}
308
309/*
310You need to hold the req_lock:
311 _drbd_wait_ee_list_empty()
312
313You must not have the req_lock:
314 drbd_free_ee()
315 drbd_alloc_ee()
316 drbd_init_ee()
317 drbd_release_ee()
318 drbd_ee_fix_bhs()
319 drbd_process_done_ee()
320 drbd_clear_done_ee()
321 drbd_wait_ee_list_empty()
322*/
323
f6ffca9f
AG
324struct drbd_peer_request *
325drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
326 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 327{
db830c46 328 struct drbd_peer_request *peer_req;
b411b363 329 struct page *page;
45bb912b 330 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 331
0cf9d27e 332 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
333 return NULL;
334
db830c46
AG
335 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
336 if (!peer_req) {
b411b363
PR
337 if (!(gfp_mask & __GFP_NOWARN))
338 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
339 return NULL;
340 }
341
45bb912b
LE
342 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
343 if (!page)
344 goto fail;
b411b363 345
db830c46
AG
346 drbd_clear_interval(&peer_req->i);
347 peer_req->i.size = data_size;
348 peer_req->i.sector = sector;
349 peer_req->i.local = false;
350 peer_req->i.waiting = false;
351
352 peer_req->epoch = NULL;
a21e9298 353 peer_req->w.mdev = mdev;
db830c46
AG
354 peer_req->pages = page;
355 atomic_set(&peer_req->pending_bios, 0);
356 peer_req->flags = 0;
9a8e7753
AG
357 /*
358 * The block_id is opaque to the receiver. It is not endianness
359 * converted, and sent back to the sender unchanged.
360 */
db830c46 361 peer_req->block_id = id;
b411b363 362
db830c46 363 return peer_req;
b411b363 364
45bb912b 365 fail:
db830c46 366 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
367 return NULL;
368}
369
db830c46 370void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 371 int is_net)
b411b363 372{
db830c46
AG
373 if (peer_req->flags & EE_HAS_DIGEST)
374 kfree(peer_req->digest);
375 drbd_pp_free(mdev, peer_req->pages, is_net);
376 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
377 D_ASSERT(drbd_interval_empty(&peer_req->i));
378 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
379}
380
381int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
382{
383 LIST_HEAD(work_list);
db830c46 384 struct drbd_peer_request *peer_req, *t;
b411b363 385 int count = 0;
435f0740 386 int is_net = list == &mdev->net_ee;
b411b363 387
87eeee41 388 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 389 list_splice_init(list, &work_list);
87eeee41 390 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 391
db830c46
AG
392 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
393 drbd_free_some_ee(mdev, peer_req, is_net);
b411b363
PR
394 count++;
395 }
396 return count;
397}
398
399
32862ec7 400/* See also comments in _req_mod(,BARRIER_ACKED)
b411b363
PR
401 * and receive_Barrier.
402 *
403 * Move entries from net_ee to done_ee, if ready.
404 * Grab done_ee, call all callbacks, free the entries.
405 * The callbacks typically send out ACKs.
406 */
407static int drbd_process_done_ee(struct drbd_conf *mdev)
408{
409 LIST_HEAD(work_list);
410 LIST_HEAD(reclaimed);
db830c46 411 struct drbd_peer_request *peer_req, *t;
e2b3032b 412 int err = 0;
b411b363 413
87eeee41 414 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
415 reclaim_net_ee(mdev, &reclaimed);
416 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 417 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 418
db830c46
AG
419 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
420 drbd_free_net_ee(mdev, peer_req);
b411b363
PR
421
422 /* possible callbacks here:
7be8da07 423 * e_end_block, and e_end_resync_block, e_send_discard_write.
b411b363
PR
424 * all ignore the last argument.
425 */
db830c46 426 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
427 int err2;
428
b411b363 429 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
430 err2 = peer_req->w.cb(&peer_req->w, !!err);
431 if (!err)
432 err = err2;
db830c46 433 drbd_free_ee(mdev, peer_req);
b411b363
PR
434 }
435 wake_up(&mdev->ee_wait);
436
e2b3032b 437 return err;
b411b363
PR
438}
439
440void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
441{
442 DEFINE_WAIT(wait);
443
444 /* avoids spin_lock/unlock
445 * and calling prepare_to_wait in the fast path */
446 while (!list_empty(head)) {
447 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 448 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 449 io_schedule();
b411b363 450 finish_wait(&mdev->ee_wait, &wait);
87eeee41 451 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
452 }
453}
454
455void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
456{
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 458 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 459 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
460}
461
462/* see also kernel_accept; which is only present since 2.6.18.
463 * also we want to log which part of it failed, exactly */
7653620d 464static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
b411b363
PR
465{
466 struct sock *sk = sock->sk;
467 int err = 0;
468
469 *what = "listen";
470 err = sock->ops->listen(sock, 5);
471 if (err < 0)
472 goto out;
473
474 *what = "sock_create_lite";
475 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
476 newsock);
477 if (err < 0)
478 goto out;
479
480 *what = "accept";
481 err = sock->ops->accept(sock, *newsock, 0);
482 if (err < 0) {
483 sock_release(*newsock);
484 *newsock = NULL;
485 goto out;
486 }
487 (*newsock)->ops = sock->ops;
488
489out:
490 return err;
491}
492
dbd9eea0 493static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
494{
495 mm_segment_t oldfs;
496 struct kvec iov = {
497 .iov_base = buf,
498 .iov_len = size,
499 };
500 struct msghdr msg = {
501 .msg_iovlen = 1,
502 .msg_iov = (struct iovec *)&iov,
503 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
504 };
505 int rv;
506
507 oldfs = get_fs();
508 set_fs(KERNEL_DS);
509 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
510 set_fs(oldfs);
511
512 return rv;
513}
514
de0ff338 515static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363
PR
516{
517 mm_segment_t oldfs;
518 struct kvec iov = {
519 .iov_base = buf,
520 .iov_len = size,
521 };
522 struct msghdr msg = {
523 .msg_iovlen = 1,
524 .msg_iov = (struct iovec *)&iov,
525 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
526 };
527 int rv;
528
529 oldfs = get_fs();
530 set_fs(KERNEL_DS);
531
532 for (;;) {
de0ff338 533 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
b411b363
PR
534 if (rv == size)
535 break;
536
537 /* Note:
538 * ECONNRESET other side closed the connection
539 * ERESTARTSYS (on sock) we got a signal
540 */
541
542 if (rv < 0) {
543 if (rv == -ECONNRESET)
de0ff338 544 conn_info(tconn, "sock was reset by peer\n");
b411b363 545 else if (rv != -ERESTARTSYS)
de0ff338 546 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
547 break;
548 } else if (rv == 0) {
de0ff338 549 conn_info(tconn, "sock was shut down by peer\n");
b411b363
PR
550 break;
551 } else {
552 /* signal came in, or peer/link went down,
553 * after we read a partial message
554 */
555 /* D_ASSERT(signal_pending(current)); */
556 break;
557 }
558 };
559
560 set_fs(oldfs);
561
562 if (rv != size)
bbeb641c 563 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363
PR
564
565 return rv;
566}
567
c6967746
AG
568static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
569{
570 int err;
571
572 err = drbd_recv(tconn, buf, size);
573 if (err != size) {
574 if (err >= 0)
575 err = -EIO;
576 } else
577 err = 0;
578 return err;
579}
580
a5c31904
AG
581static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
582{
583 int err;
584
585 err = drbd_recv_all(tconn, buf, size);
586 if (err && !signal_pending(current))
587 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
588 return err;
589}
590
5dbf1673
LE
591/* quoting tcp(7):
592 * On individual connections, the socket buffer size must be set prior to the
593 * listen(2) or connect(2) calls in order to have it take effect.
594 * This is our wrapper to do so.
595 */
596static void drbd_setbufsize(struct socket *sock, unsigned int snd,
597 unsigned int rcv)
598{
599 /* open coded SO_SNDBUF, SO_RCVBUF */
600 if (snd) {
601 sock->sk->sk_sndbuf = snd;
602 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
603 }
604 if (rcv) {
605 sock->sk->sk_rcvbuf = rcv;
606 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
607 }
608}
609
eac3e990 610static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
611{
612 const char *what;
613 struct socket *sock;
614 struct sockaddr_in6 src_in6;
615 int err;
616 int disconnect_on_error = 1;
617
eac3e990 618 if (!get_net_conf(tconn))
b411b363
PR
619 return NULL;
620
621 what = "sock_create_kern";
eac3e990 622 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
b411b363
PR
623 SOCK_STREAM, IPPROTO_TCP, &sock);
624 if (err < 0) {
625 sock = NULL;
626 goto out;
627 }
628
629 sock->sk->sk_rcvtimeo =
eac3e990
PR
630 sock->sk->sk_sndtimeo = tconn->net_conf->try_connect_int*HZ;
631 drbd_setbufsize(sock, tconn->net_conf->sndbuf_size,
632 tconn->net_conf->rcvbuf_size);
b411b363
PR
633
634 /* explicitly bind to the configured IP as source IP
635 * for the outgoing connections.
636 * This is needed for multihomed hosts and to be
637 * able to use lo: interfaces for drbd.
638 * Make sure to use 0 as port number, so linux selects
639 * a free one dynamically.
640 */
eac3e990
PR
641 memcpy(&src_in6, tconn->net_conf->my_addr,
642 min_t(int, tconn->net_conf->my_addr_len, sizeof(src_in6)));
643 if (((struct sockaddr *)tconn->net_conf->my_addr)->sa_family == AF_INET6)
b411b363
PR
644 src_in6.sin6_port = 0;
645 else
646 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
647
648 what = "bind before connect";
649 err = sock->ops->bind(sock,
650 (struct sockaddr *) &src_in6,
eac3e990 651 tconn->net_conf->my_addr_len);
b411b363
PR
652 if (err < 0)
653 goto out;
654
655 /* connect may fail, peer not yet available.
656 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 disconnect_on_error = 0;
658 what = "connect";
659 err = sock->ops->connect(sock,
eac3e990
PR
660 (struct sockaddr *)tconn->net_conf->peer_addr,
661 tconn->net_conf->peer_addr_len, 0);
b411b363
PR
662
663out:
664 if (err < 0) {
665 if (sock) {
666 sock_release(sock);
667 sock = NULL;
668 }
669 switch (-err) {
670 /* timeout, busy, signal pending */
671 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
672 case EINTR: case ERESTARTSYS:
673 /* peer not (yet) available, network problem */
674 case ECONNREFUSED: case ENETUNREACH:
675 case EHOSTDOWN: case EHOSTUNREACH:
676 disconnect_on_error = 0;
677 break;
678 default:
eac3e990 679 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
680 }
681 if (disconnect_on_error)
bbeb641c 682 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 683 }
eac3e990 684 put_net_conf(tconn);
b411b363
PR
685 return sock;
686}
687
7653620d 688static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
b411b363
PR
689{
690 int timeo, err;
691 struct socket *s_estab = NULL, *s_listen;
692 const char *what;
693
7653620d 694 if (!get_net_conf(tconn))
b411b363
PR
695 return NULL;
696
697 what = "sock_create_kern";
7653620d 698 err = sock_create_kern(((struct sockaddr *)tconn->net_conf->my_addr)->sa_family,
b411b363
PR
699 SOCK_STREAM, IPPROTO_TCP, &s_listen);
700 if (err) {
701 s_listen = NULL;
702 goto out;
703 }
704
7653620d 705 timeo = tconn->net_conf->try_connect_int * HZ;
b411b363
PR
706 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
707
708 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
709 s_listen->sk->sk_rcvtimeo = timeo;
710 s_listen->sk->sk_sndtimeo = timeo;
7653620d
PR
711 drbd_setbufsize(s_listen, tconn->net_conf->sndbuf_size,
712 tconn->net_conf->rcvbuf_size);
b411b363
PR
713
714 what = "bind before listen";
715 err = s_listen->ops->bind(s_listen,
7653620d
PR
716 (struct sockaddr *) tconn->net_conf->my_addr,
717 tconn->net_conf->my_addr_len);
b411b363
PR
718 if (err < 0)
719 goto out;
720
7653620d 721 err = drbd_accept(&what, s_listen, &s_estab);
b411b363
PR
722
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
7653620d 728 conn_err(tconn, "%s failed, err = %d\n", what, err);
bbeb641c 729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
7653620d 732 put_net_conf(tconn);
b411b363
PR
733
734 return s_estab;
735}
736
d38e787e 737static int drbd_send_fp(struct drbd_tconn *tconn, struct socket *sock, enum drbd_packet cmd)
b411b363 738{
d38e787e 739 struct p_header *h = &tconn->data.sbuf.header;
b411b363 740
ecf2363c 741 return !_conn_send_cmd(tconn, 0, sock, cmd, h, sizeof(*h), 0);
b411b363
PR
742}
743
a25b63f1 744static enum drbd_packet drbd_recv_fp(struct drbd_tconn *tconn, struct socket *sock)
b411b363 745{
a25b63f1 746 struct p_header80 *h = &tconn->data.rbuf.header.h80;
b411b363
PR
747 int rr;
748
dbd9eea0 749 rr = drbd_recv_short(sock, h, sizeof(*h), 0);
b411b363 750
ca9bc12b 751 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
b411b363
PR
752 return be16_to_cpu(h->command);
753
754 return 0xffff;
755}
756
757/**
758 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
759 * @sock: pointer to the pointer to the socket.
760 */
dbd9eea0 761static int drbd_socket_okay(struct socket **sock)
b411b363
PR
762{
763 int rr;
764 char tb[4];
765
766 if (!*sock)
81e84650 767 return false;
b411b363 768
dbd9eea0 769 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
770
771 if (rr > 0 || rr == -EAGAIN) {
81e84650 772 return true;
b411b363
PR
773 } else {
774 sock_release(*sock);
775 *sock = NULL;
81e84650 776 return false;
b411b363
PR
777 }
778}
2325eb66
PR
779/* Gets called if a connection is established, or if a new minor gets created
780 in a connection */
781int drbd_connected(int vnr, void *p, void *data)
907599e0
PR
782{
783 struct drbd_conf *mdev = (struct drbd_conf *)p;
0829f5ed 784 int err;
907599e0
PR
785
786 atomic_set(&mdev->packet_seq, 0);
787 mdev->peer_seq = 0;
788
8410da8f
PR
789 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
790 &mdev->tconn->cstate_mutex :
791 &mdev->own_state_mutex;
792
0829f5ed
AG
793 err = drbd_send_sync_param(mdev);
794 if (!err)
795 err = drbd_send_sizes(mdev, 0, 0);
796 if (!err)
797 err = drbd_send_uuids(mdev);
798 if (!err)
799 err = drbd_send_state(mdev);
907599e0
PR
800 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
801 clear_bit(RESIZE_PENDING, &mdev->flags);
0829f5ed 802 return err;
907599e0
PR
803}
804
b411b363
PR
805/*
806 * return values:
807 * 1 yes, we have a valid connection
808 * 0 oops, did not work out, please try again
809 * -1 peer talks different language,
810 * no point in trying again, please go standalone.
811 * -2 We do not have a network config...
812 */
907599e0 813static int drbd_connect(struct drbd_tconn *tconn)
b411b363
PR
814{
815 struct socket *s, *sock, *msock;
816 int try, h, ok;
817
bbeb641c 818 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
819 return -2;
820
907599e0
PR
821 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
822 tconn->agreed_pro_version = 99;
fd340c12
PR
823 /* agreed_pro_version must be smaller than 100 so we send the old
824 header (h80) in the first packet and in the handshake packet. */
b411b363
PR
825
826 sock = NULL;
827 msock = NULL;
828
829 do {
830 for (try = 0;;) {
831 /* 3 tries, this should take less than a second! */
907599e0 832 s = drbd_try_connect(tconn);
b411b363
PR
833 if (s || ++try >= 3)
834 break;
835 /* give the other side time to call bind() & listen() */
20ee6390 836 schedule_timeout_interruptible(HZ / 10);
b411b363
PR
837 }
838
839 if (s) {
840 if (!sock) {
907599e0 841 drbd_send_fp(tconn, s, P_HAND_SHAKE_S);
b411b363
PR
842 sock = s;
843 s = NULL;
844 } else if (!msock) {
907599e0 845 drbd_send_fp(tconn, s, P_HAND_SHAKE_M);
b411b363
PR
846 msock = s;
847 s = NULL;
848 } else {
907599e0 849 conn_err(tconn, "Logic error in drbd_connect()\n");
b411b363
PR
850 goto out_release_sockets;
851 }
852 }
853
854 if (sock && msock) {
907599e0 855 schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
dbd9eea0
PR
856 ok = drbd_socket_okay(&sock);
857 ok = drbd_socket_okay(&msock) && ok;
b411b363
PR
858 if (ok)
859 break;
860 }
861
862retry:
907599e0 863 s = drbd_wait_for_connect(tconn);
b411b363 864 if (s) {
907599e0 865 try = drbd_recv_fp(tconn, s);
dbd9eea0
PR
866 drbd_socket_okay(&sock);
867 drbd_socket_okay(&msock);
b411b363
PR
868 switch (try) {
869 case P_HAND_SHAKE_S:
870 if (sock) {
907599e0 871 conn_warn(tconn, "initial packet S crossed\n");
b411b363
PR
872 sock_release(sock);
873 }
874 sock = s;
875 break;
876 case P_HAND_SHAKE_M:
877 if (msock) {
907599e0 878 conn_warn(tconn, "initial packet M crossed\n");
b411b363
PR
879 sock_release(msock);
880 }
881 msock = s;
907599e0 882 set_bit(DISCARD_CONCURRENT, &tconn->flags);
b411b363
PR
883 break;
884 default:
907599e0 885 conn_warn(tconn, "Error receiving initial packet\n");
b411b363
PR
886 sock_release(s);
887 if (random32() & 1)
888 goto retry;
889 }
890 }
891
bbeb641c 892 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
893 goto out_release_sockets;
894 if (signal_pending(current)) {
895 flush_signals(current);
896 smp_rmb();
907599e0 897 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
898 goto out_release_sockets;
899 }
900
901 if (sock && msock) {
dbd9eea0
PR
902 ok = drbd_socket_okay(&sock);
903 ok = drbd_socket_okay(&msock) && ok;
b411b363
PR
904 if (ok)
905 break;
906 }
907 } while (1);
908
909 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
910 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
911
912 sock->sk->sk_allocation = GFP_NOIO;
913 msock->sk->sk_allocation = GFP_NOIO;
914
915 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
916 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
917
b411b363 918 /* NOT YET ...
907599e0 919 * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
b411b363
PR
920 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
921 * first set it to the P_HAND_SHAKE timeout,
922 * which we set to 4x the configured ping_timeout. */
923 sock->sk->sk_sndtimeo =
907599e0 924 sock->sk->sk_rcvtimeo = tconn->net_conf->ping_timeo*4*HZ/10;
b411b363 925
907599e0
PR
926 msock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
927 msock->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
b411b363
PR
928
929 /* we don't want delays.
25985edc 930 * we use TCP_CORK where appropriate, though */
b411b363
PR
931 drbd_tcp_nodelay(sock);
932 drbd_tcp_nodelay(msock);
933
907599e0
PR
934 tconn->data.socket = sock;
935 tconn->meta.socket = msock;
936 tconn->last_received = jiffies;
b411b363 937
907599e0 938 h = drbd_do_handshake(tconn);
b411b363
PR
939 if (h <= 0)
940 return h;
941
907599e0 942 if (tconn->cram_hmac_tfm) {
b411b363 943 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 944 switch (drbd_do_auth(tconn)) {
b10d96cb 945 case -1:
907599e0 946 conn_err(tconn, "Authentication of peer failed\n");
b411b363 947 return -1;
b10d96cb 948 case 0:
907599e0 949 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 950 return 0;
b411b363
PR
951 }
952 }
953
bbeb641c 954 if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
955 return 0;
956
907599e0 957 sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
b411b363
PR
958 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
959
907599e0 960 drbd_thread_start(&tconn->asender);
b411b363 961
387eb308 962 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 963 return -1;
b411b363 964
907599e0 965 return !idr_for_each(&tconn->volumes, drbd_connected, tconn);
b411b363
PR
966
967out_release_sockets:
968 if (sock)
969 sock_release(sock);
970 if (msock)
971 sock_release(msock);
972 return -1;
973}
974
8172f3e9 975static int decode_header(struct drbd_tconn *tconn, struct p_header *h, struct packet_info *pi)
b411b363 976{
fd340c12 977 if (h->h80.magic == cpu_to_be32(DRBD_MAGIC)) {
77351055
PR
978 pi->cmd = be16_to_cpu(h->h80.command);
979 pi->size = be16_to_cpu(h->h80.length);
eefc2f7d 980 pi->vnr = 0;
ca9bc12b 981 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
77351055
PR
982 pi->cmd = be16_to_cpu(h->h95.command);
983 pi->size = be32_to_cpu(h->h95.length) & 0x00ffffff;
984 pi->vnr = 0;
02918be2 985 } else {
ce243853 986 conn_err(tconn, "magic?? on data m: 0x%08x c: %d l: %d\n",
004352fa
LE
987 be32_to_cpu(h->h80.magic),
988 be16_to_cpu(h->h80.command),
989 be16_to_cpu(h->h80.length));
8172f3e9 990 return -EINVAL;
b411b363 991 }
8172f3e9 992 return 0;
257d0af6
PR
993}
994
9ba7aa00 995static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 996{
9ba7aa00 997 struct p_header *h = &tconn->data.rbuf.header;
69bc7bc3 998 int err;
257d0af6 999
a5c31904
AG
1000 err = drbd_recv_all_warn(tconn, h, sizeof(*h));
1001 if (err)
69bc7bc3 1002 return err;
257d0af6 1003
69bc7bc3 1004 err = decode_header(tconn, h, pi);
9ba7aa00 1005 tconn->last_received = jiffies;
b411b363 1006
69bc7bc3 1007 return err;
b411b363
PR
1008}
1009
2451fc3b 1010static void drbd_flush(struct drbd_conf *mdev)
b411b363
PR
1011{
1012 int rv;
1013
1014 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
fbd9b09a 1015 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
dd3932ed 1016 NULL);
b411b363
PR
1017 if (rv) {
1018 dev_err(DEV, "local disk flush failed with status %d\n", rv);
1019 /* would rather check on EOPNOTSUPP, but that is not reliable.
1020 * don't try again for ANY return value != 0
1021 * if (rv == -EOPNOTSUPP) */
1022 drbd_bump_write_ordering(mdev, WO_drain_io);
1023 }
1024 put_ldev(mdev);
1025 }
b411b363
PR
1026}
1027
1028/**
1029 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1030 * @mdev: DRBD device.
1031 * @epoch: Epoch object.
1032 * @ev: Epoch event.
1033 */
1034static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1035 struct drbd_epoch *epoch,
1036 enum epoch_event ev)
1037{
2451fc3b 1038 int epoch_size;
b411b363 1039 struct drbd_epoch *next_epoch;
b411b363
PR
1040 enum finish_epoch rv = FE_STILL_LIVE;
1041
1042 spin_lock(&mdev->epoch_lock);
1043 do {
1044 next_epoch = NULL;
b411b363
PR
1045
1046 epoch_size = atomic_read(&epoch->epoch_size);
1047
1048 switch (ev & ~EV_CLEANUP) {
1049 case EV_PUT:
1050 atomic_dec(&epoch->active);
1051 break;
1052 case EV_GOT_BARRIER_NR:
1053 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1054 break;
1055 case EV_BECAME_LAST:
1056 /* nothing to do*/
1057 break;
1058 }
1059
b411b363
PR
1060 if (epoch_size != 0 &&
1061 atomic_read(&epoch->active) == 0 &&
2451fc3b 1062 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
b411b363
PR
1063 if (!(ev & EV_CLEANUP)) {
1064 spin_unlock(&mdev->epoch_lock);
1065 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1066 spin_lock(&mdev->epoch_lock);
1067 }
1068 dec_unacked(mdev);
1069
1070 if (mdev->current_epoch != epoch) {
1071 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1072 list_del(&epoch->list);
1073 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1074 mdev->epochs--;
b411b363
PR
1075 kfree(epoch);
1076
1077 if (rv == FE_STILL_LIVE)
1078 rv = FE_DESTROYED;
1079 } else {
1080 epoch->flags = 0;
1081 atomic_set(&epoch->epoch_size, 0);
698f9315 1082 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1083 if (rv == FE_STILL_LIVE)
1084 rv = FE_RECYCLED;
2451fc3b 1085 wake_up(&mdev->ee_wait);
b411b363
PR
1086 }
1087 }
1088
1089 if (!next_epoch)
1090 break;
1091
1092 epoch = next_epoch;
1093 } while (1);
1094
1095 spin_unlock(&mdev->epoch_lock);
1096
b411b363
PR
1097 return rv;
1098}
1099
1100/**
1101 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1102 * @mdev: DRBD device.
1103 * @wo: Write ordering method to try.
1104 */
1105void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1106{
1107 enum write_ordering_e pwo;
1108 static char *write_ordering_str[] = {
1109 [WO_none] = "none",
1110 [WO_drain_io] = "drain",
1111 [WO_bdev_flush] = "flush",
b411b363
PR
1112 };
1113
1114 pwo = mdev->write_ordering;
1115 wo = min(pwo, wo);
b411b363
PR
1116 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1117 wo = WO_drain_io;
1118 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1119 wo = WO_none;
1120 mdev->write_ordering = wo;
2451fc3b 1121 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
b411b363
PR
1122 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1123}
1124
45bb912b 1125/**
fbe29dec 1126 * drbd_submit_peer_request()
45bb912b 1127 * @mdev: DRBD device.
db830c46 1128 * @peer_req: peer request
45bb912b 1129 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1130 *
1131 * May spread the pages to multiple bios,
1132 * depending on bio_add_page restrictions.
1133 *
1134 * Returns 0 if all bios have been submitted,
1135 * -ENOMEM if we could not allocate enough bios,
1136 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1137 * single page to an empty bio (which should never happen and likely indicates
1138 * that the lower level IO stack is in some way broken). This has been observed
1139 * on certain Xen deployments.
45bb912b
LE
1140 */
1141/* TODO allocate from our own bio_set. */
fbe29dec
AG
1142int drbd_submit_peer_request(struct drbd_conf *mdev,
1143 struct drbd_peer_request *peer_req,
1144 const unsigned rw, const int fault_type)
45bb912b
LE
1145{
1146 struct bio *bios = NULL;
1147 struct bio *bio;
db830c46
AG
1148 struct page *page = peer_req->pages;
1149 sector_t sector = peer_req->i.sector;
1150 unsigned ds = peer_req->i.size;
45bb912b
LE
1151 unsigned n_bios = 0;
1152 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1153 int err = -ENOMEM;
45bb912b
LE
1154
1155 /* In most cases, we will only need one bio. But in case the lower
1156 * level restrictions happen to be different at this offset on this
1157 * side than those of the sending peer, we may need to submit the
da4a75d2
LE
1158 * request in more than one bio.
1159 *
1160 * Plain bio_alloc is good enough here, this is no DRBD internally
1161 * generated bio, but a bio allocated on behalf of the peer.
1162 */
45bb912b
LE
1163next_bio:
1164 bio = bio_alloc(GFP_NOIO, nr_pages);
1165 if (!bio) {
1166 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1167 goto fail;
1168 }
db830c46 1169 /* > peer_req->i.sector, unless this is the first bio */
45bb912b
LE
1170 bio->bi_sector = sector;
1171 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1172 bio->bi_rw = rw;
db830c46 1173 bio->bi_private = peer_req;
fcefa62e 1174 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1175
1176 bio->bi_next = bios;
1177 bios = bio;
1178 ++n_bios;
1179
1180 page_chain_for_each(page) {
1181 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1182 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1183 /* A single page must always be possible!
1184 * But in case it fails anyways,
1185 * we deal with it, and complain (below). */
1186 if (bio->bi_vcnt == 0) {
1187 dev_err(DEV,
1188 "bio_add_page failed for len=%u, "
1189 "bi_vcnt=0 (bi_sector=%llu)\n",
1190 len, (unsigned long long)bio->bi_sector);
1191 err = -ENOSPC;
1192 goto fail;
1193 }
45bb912b
LE
1194 goto next_bio;
1195 }
1196 ds -= len;
1197 sector += len >> 9;
1198 --nr_pages;
1199 }
1200 D_ASSERT(page == NULL);
1201 D_ASSERT(ds == 0);
1202
db830c46 1203 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1204 do {
1205 bio = bios;
1206 bios = bios->bi_next;
1207 bio->bi_next = NULL;
1208
45bb912b 1209 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1210 } while (bios);
45bb912b
LE
1211 return 0;
1212
1213fail:
1214 while (bios) {
1215 bio = bios;
1216 bios = bios->bi_next;
1217 bio_put(bio);
1218 }
10f6d992 1219 return err;
45bb912b
LE
1220}
1221
53840641 1222static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1223 struct drbd_peer_request *peer_req)
53840641 1224{
db830c46 1225 struct drbd_interval *i = &peer_req->i;
53840641
AG
1226
1227 drbd_remove_interval(&mdev->write_requests, i);
1228 drbd_clear_interval(i);
1229
6c852bec 1230 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1231 if (i->waiting)
1232 wake_up(&mdev->misc_wait);
1233}
1234
d8763023
AG
1235static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packet cmd,
1236 unsigned int data_size)
b411b363 1237{
2451fc3b 1238 int rv;
e42325a5 1239 struct p_barrier *p = &mdev->tconn->data.rbuf.barrier;
b411b363
PR
1240 struct drbd_epoch *epoch;
1241
b411b363
PR
1242 inc_unacked(mdev);
1243
b411b363
PR
1244 mdev->current_epoch->barrier_nr = p->barrier;
1245 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1246
1247 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1248 * the activity log, which means it would not be resynced in case the
1249 * R_PRIMARY crashes now.
1250 * Therefore we must send the barrier_ack after the barrier request was
1251 * completed. */
1252 switch (mdev->write_ordering) {
b411b363
PR
1253 case WO_none:
1254 if (rv == FE_RECYCLED)
82bc0194 1255 return 0;
2451fc3b
PR
1256
1257 /* receiver context, in the writeout path of the other node.
1258 * avoid potential distributed deadlock */
1259 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1260 if (epoch)
1261 break;
1262 else
1263 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1264 /* Fall through */
b411b363
PR
1265
1266 case WO_bdev_flush:
1267 case WO_drain_io:
b411b363 1268 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
2451fc3b
PR
1269 drbd_flush(mdev);
1270
1271 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1272 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1273 if (epoch)
1274 break;
b411b363
PR
1275 }
1276
2451fc3b
PR
1277 epoch = mdev->current_epoch;
1278 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1279
1280 D_ASSERT(atomic_read(&epoch->active) == 0);
1281 D_ASSERT(epoch->flags == 0);
b411b363 1282
82bc0194 1283 return 0;
2451fc3b
PR
1284 default:
1285 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
82bc0194 1286 return -EIO;
b411b363
PR
1287 }
1288
1289 epoch->flags = 0;
1290 atomic_set(&epoch->epoch_size, 0);
1291 atomic_set(&epoch->active, 0);
1292
1293 spin_lock(&mdev->epoch_lock);
1294 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1295 list_add(&epoch->list, &mdev->current_epoch->list);
1296 mdev->current_epoch = epoch;
1297 mdev->epochs++;
b411b363
PR
1298 } else {
1299 /* The current_epoch got recycled while we allocated this one... */
1300 kfree(epoch);
1301 }
1302 spin_unlock(&mdev->epoch_lock);
1303
82bc0194 1304 return 0;
b411b363
PR
1305}
1306
1307/* used from receive_RSDataReply (recv_resync_read)
1308 * and from receive_Data */
f6ffca9f
AG
1309static struct drbd_peer_request *
1310read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1311 int data_size) __must_hold(local)
b411b363 1312{
6666032a 1313 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1314 struct drbd_peer_request *peer_req;
b411b363 1315 struct page *page;
a5c31904 1316 int dgs, ds, err;
a0638456
PR
1317 void *dig_in = mdev->tconn->int_dig_in;
1318 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1319 unsigned long *data;
b411b363 1320
a0638456
PR
1321 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1322 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
b411b363
PR
1323
1324 if (dgs) {
a5c31904
AG
1325 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1326 if (err)
b411b363 1327 return NULL;
b411b363
PR
1328 }
1329
1330 data_size -= dgs;
1331
841ce241
AG
1332 if (!expect(data_size != 0))
1333 return NULL;
1334 if (!expect(IS_ALIGNED(data_size, 512)))
1335 return NULL;
1336 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1337 return NULL;
b411b363 1338
6666032a
LE
1339 /* even though we trust out peer,
1340 * we sometimes have to double check. */
1341 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1342 dev_err(DEV, "request from peer beyond end of local disk: "
1343 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1344 (unsigned long long)capacity,
1345 (unsigned long long)sector, data_size);
1346 return NULL;
1347 }
1348
b411b363
PR
1349 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1350 * "criss-cross" setup, that might cause write-out on some other DRBD,
1351 * which in turn might block on the other node at this very place. */
db830c46
AG
1352 peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1353 if (!peer_req)
b411b363 1354 return NULL;
45bb912b 1355
b411b363 1356 ds = data_size;
db830c46 1357 page = peer_req->pages;
45bb912b
LE
1358 page_chain_for_each(page) {
1359 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1360 data = kmap(page);
a5c31904 1361 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1362 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1363 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1364 data[0] = data[0] ^ (unsigned long)-1;
1365 }
b411b363 1366 kunmap(page);
a5c31904 1367 if (err) {
db830c46 1368 drbd_free_ee(mdev, peer_req);
b411b363
PR
1369 return NULL;
1370 }
a5c31904 1371 ds -= len;
b411b363
PR
1372 }
1373
1374 if (dgs) {
db830c46 1375 drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
b411b363 1376 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1377 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1378 (unsigned long long)sector, data_size);
db830c46 1379 drbd_free_ee(mdev, peer_req);
b411b363
PR
1380 return NULL;
1381 }
1382 }
1383 mdev->recv_cnt += data_size>>9;
db830c46 1384 return peer_req;
b411b363
PR
1385}
1386
1387/* drbd_drain_block() just takes a data block
1388 * out of the socket input buffer, and discards it.
1389 */
1390static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1391{
1392 struct page *page;
a5c31904 1393 int err = 0;
b411b363
PR
1394 void *data;
1395
c3470cde 1396 if (!data_size)
fc5be839 1397 return 0;
c3470cde 1398
45bb912b 1399 page = drbd_pp_alloc(mdev, 1, 1);
b411b363
PR
1400
1401 data = kmap(page);
1402 while (data_size) {
fc5be839
AG
1403 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1404
a5c31904
AG
1405 err = drbd_recv_all_warn(mdev->tconn, data, len);
1406 if (err)
b411b363 1407 break;
a5c31904 1408 data_size -= len;
b411b363
PR
1409 }
1410 kunmap(page);
435f0740 1411 drbd_pp_free(mdev, page, 0);
fc5be839 1412 return err;
b411b363
PR
1413}
1414
1415static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1416 sector_t sector, int data_size)
1417{
1418 struct bio_vec *bvec;
1419 struct bio *bio;
a5c31904 1420 int dgs, err, i, expect;
a0638456
PR
1421 void *dig_in = mdev->tconn->int_dig_in;
1422 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1423
a0638456
PR
1424 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1425 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
b411b363
PR
1426
1427 if (dgs) {
a5c31904
AG
1428 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1429 if (err)
1430 return err;
b411b363
PR
1431 }
1432
1433 data_size -= dgs;
1434
1435 /* optimistically update recv_cnt. if receiving fails below,
1436 * we disconnect anyways, and counters will be reset. */
1437 mdev->recv_cnt += data_size>>9;
1438
1439 bio = req->master_bio;
1440 D_ASSERT(sector == bio->bi_sector);
1441
1442 bio_for_each_segment(bvec, bio, i) {
a5c31904 1443 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
b411b363 1444 expect = min_t(int, data_size, bvec->bv_len);
a5c31904 1445 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
b411b363 1446 kunmap(bvec->bv_page);
a5c31904
AG
1447 if (err)
1448 return err;
1449 data_size -= expect;
b411b363
PR
1450 }
1451
1452 if (dgs) {
a0638456 1453 drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
b411b363
PR
1454 if (memcmp(dig_in, dig_vv, dgs)) {
1455 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1456 return -EINVAL;
b411b363
PR
1457 }
1458 }
1459
1460 D_ASSERT(data_size == 0);
28284cef 1461 return 0;
b411b363
PR
1462}
1463
1464/* e_end_resync_block() is called via
1465 * drbd_process_done_ee() by asender only */
99920dc5 1466static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1467{
8050e6d0
AG
1468 struct drbd_peer_request *peer_req =
1469 container_of(w, struct drbd_peer_request, w);
00d56944 1470 struct drbd_conf *mdev = w->mdev;
db830c46 1471 sector_t sector = peer_req->i.sector;
99920dc5 1472 int err;
b411b363 1473
db830c46 1474 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1475
db830c46
AG
1476 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1477 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1478 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1479 } else {
1480 /* Record failure to sync */
db830c46 1481 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1482
99920dc5 1483 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1484 }
1485 dec_unacked(mdev);
1486
99920dc5 1487 return err;
b411b363
PR
1488}
1489
1490static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1491{
db830c46 1492 struct drbd_peer_request *peer_req;
b411b363 1493
db830c46
AG
1494 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1495 if (!peer_req)
45bb912b 1496 goto fail;
b411b363
PR
1497
1498 dec_rs_pending(mdev);
1499
b411b363
PR
1500 inc_unacked(mdev);
1501 /* corresponding dec_unacked() in e_end_resync_block()
1502 * respective _drbd_clear_done_ee */
1503
db830c46 1504 peer_req->w.cb = e_end_resync_block;
45bb912b 1505
87eeee41 1506 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1507 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1508 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1509
0f0601f4 1510 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1511 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1512 return 0;
b411b363 1513
10f6d992
LE
1514 /* don't care for the reason here */
1515 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1516 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1517 list_del(&peer_req->w.list);
87eeee41 1518 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1519
db830c46 1520 drbd_free_ee(mdev, peer_req);
45bb912b
LE
1521fail:
1522 put_ldev(mdev);
e1c1b0fc 1523 return -EIO;
b411b363
PR
1524}
1525
668eebc6 1526static struct drbd_request *
bc9c5c41
AG
1527find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1528 sector_t sector, bool missing_ok, const char *func)
51624585 1529{
51624585
AG
1530 struct drbd_request *req;
1531
bc9c5c41
AG
1532 /* Request object according to our peer */
1533 req = (struct drbd_request *)(unsigned long)id;
5e472264 1534 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1535 return req;
c3afd8f5
AG
1536 if (!missing_ok) {
1537 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1538 (unsigned long)id, (unsigned long long)sector);
1539 }
51624585
AG
1540 return NULL;
1541}
1542
d8763023
AG
1543static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1544 unsigned int data_size)
b411b363
PR
1545{
1546 struct drbd_request *req;
1547 sector_t sector;
82bc0194 1548 int err;
e42325a5 1549 struct p_data *p = &mdev->tconn->data.rbuf.data;
b411b363
PR
1550
1551 sector = be64_to_cpu(p->sector);
1552
87eeee41 1553 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1554 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1555 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1556 if (unlikely(!req))
82bc0194 1557 return -EIO;
b411b363 1558
24c4830c 1559 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1560 * special casing it there for the various failure cases.
1561 * still no race with drbd_fail_pending_reads */
82bc0194
AG
1562 err = recv_dless_read(mdev, req, sector, data_size);
1563 if (!err)
8554df1c 1564 req_mod(req, DATA_RECEIVED);
b411b363
PR
1565 /* else: nothing. handled from drbd_disconnect...
1566 * I don't think we may complete this just yet
1567 * in case we are "on-disconnect: freeze" */
1568
82bc0194 1569 return err;
b411b363
PR
1570}
1571
d8763023
AG
1572static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
1573 unsigned int data_size)
b411b363
PR
1574{
1575 sector_t sector;
82bc0194 1576 int err;
e42325a5 1577 struct p_data *p = &mdev->tconn->data.rbuf.data;
b411b363
PR
1578
1579 sector = be64_to_cpu(p->sector);
1580 D_ASSERT(p->block_id == ID_SYNCER);
1581
1582 if (get_ldev(mdev)) {
1583 /* data is submitted to disk within recv_resync_read.
1584 * corresponding put_ldev done below on error,
fcefa62e 1585 * or in drbd_peer_request_endio. */
82bc0194 1586 err = recv_resync_read(mdev, sector, data_size);
b411b363
PR
1587 } else {
1588 if (__ratelimit(&drbd_ratelimit_state))
1589 dev_err(DEV, "Can not write resync data to local disk.\n");
1590
82bc0194 1591 err = drbd_drain_block(mdev, data_size);
b411b363 1592
2b2bf214 1593 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
b411b363
PR
1594 }
1595
778f271d
PR
1596 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1597
82bc0194 1598 return err;
b411b363
PR
1599}
1600
99920dc5 1601static int w_restart_write(struct drbd_work *w, int cancel)
7be8da07
AG
1602{
1603 struct drbd_request *req = container_of(w, struct drbd_request, w);
1604 struct drbd_conf *mdev = w->mdev;
1605 struct bio *bio;
1606 unsigned long start_time;
1607 unsigned long flags;
1608
1609 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1610 if (!expect(req->rq_state & RQ_POSTPONED)) {
1611 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
99920dc5 1612 return -EIO;
7be8da07
AG
1613 }
1614 bio = req->master_bio;
1615 start_time = req->start_time;
1616 /* Postponed requests will not have their master_bio completed! */
1617 __req_mod(req, DISCARD_WRITE, NULL);
1618 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1619
1620 while (__drbd_make_request(mdev, bio, start_time))
1621 /* retry */ ;
99920dc5 1622 return 0;
7be8da07
AG
1623}
1624
1625static void restart_conflicting_writes(struct drbd_conf *mdev,
1626 sector_t sector, int size)
1627{
1628 struct drbd_interval *i;
1629 struct drbd_request *req;
1630
1631 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1632 if (!i->local)
1633 continue;
1634 req = container_of(i, struct drbd_request, i);
1635 if (req->rq_state & RQ_LOCAL_PENDING ||
1636 !(req->rq_state & RQ_POSTPONED))
1637 continue;
1638 if (expect(list_empty(&req->w.list))) {
1639 req->w.mdev = mdev;
1640 req->w.cb = w_restart_write;
1641 drbd_queue_work(&mdev->tconn->data.work, &req->w);
1642 }
1643 }
1644}
1645
b411b363
PR
1646/* e_end_block() is called via drbd_process_done_ee().
1647 * this means this function only runs in the asender thread
1648 */
99920dc5 1649static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1650{
8050e6d0
AG
1651 struct drbd_peer_request *peer_req =
1652 container_of(w, struct drbd_peer_request, w);
00d56944 1653 struct drbd_conf *mdev = w->mdev;
db830c46 1654 sector_t sector = peer_req->i.sector;
99920dc5 1655 int err = 0, pcmd;
b411b363 1656
89e58e75 1657 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
db830c46 1658 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1659 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1660 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1661 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1662 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1663 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1664 if (pcmd == P_RS_WRITE_ACK)
db830c46 1665 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1666 } else {
99920dc5 1667 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1668 /* we expect it to be marked out of sync anyways...
1669 * maybe assert this? */
1670 }
1671 dec_unacked(mdev);
1672 }
1673 /* we delete from the conflict detection hash _after_ we sent out the
1674 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
89e58e75 1675 if (mdev->tconn->net_conf->two_primaries) {
87eeee41 1676 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1677 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1678 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1679 if (peer_req->flags & EE_RESTART_REQUESTS)
1680 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1681 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1682 } else
db830c46 1683 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1684
db830c46 1685 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1686
99920dc5 1687 return err;
b411b363
PR
1688}
1689
7be8da07 1690static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1691{
7be8da07 1692 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1693 struct drbd_peer_request *peer_req =
1694 container_of(w, struct drbd_peer_request, w);
99920dc5 1695 int err;
b411b363 1696
99920dc5 1697 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1698 dec_unacked(mdev);
1699
99920dc5 1700 return err;
b411b363
PR
1701}
1702
99920dc5 1703static int e_send_discard_write(struct drbd_work *w, int unused)
7be8da07
AG
1704{
1705 return e_send_ack(w, P_DISCARD_WRITE);
1706}
1707
99920dc5 1708static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1709{
1710 struct drbd_tconn *tconn = w->mdev->tconn;
1711
1712 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1713 P_RETRY_WRITE : P_DISCARD_WRITE);
1714}
1715
3e394da1
AG
1716static bool seq_greater(u32 a, u32 b)
1717{
1718 /*
1719 * We assume 32-bit wrap-around here.
1720 * For 24-bit wrap-around, we would have to shift:
1721 * a <<= 8; b <<= 8;
1722 */
1723 return (s32)a - (s32)b > 0;
1724}
1725
1726static u32 seq_max(u32 a, u32 b)
1727{
1728 return seq_greater(a, b) ? a : b;
1729}
1730
7be8da07
AG
1731static bool need_peer_seq(struct drbd_conf *mdev)
1732{
1733 struct drbd_tconn *tconn = mdev->tconn;
1734
1735 /*
1736 * We only need to keep track of the last packet_seq number of our peer
1737 * if we are in dual-primary mode and we have the discard flag set; see
1738 * handle_write_conflicts().
1739 */
1740 return tconn->net_conf->two_primaries &&
1741 test_bit(DISCARD_CONCURRENT, &tconn->flags);
1742}
1743
43ae077d 1744static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1745{
3c13b680 1746 unsigned int newest_peer_seq;
3e394da1 1747
7be8da07
AG
1748 if (need_peer_seq(mdev)) {
1749 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1750 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1751 mdev->peer_seq = newest_peer_seq;
7be8da07 1752 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1753 /* wake up only if we actually changed mdev->peer_seq */
1754 if (peer_seq == newest_peer_seq)
7be8da07
AG
1755 wake_up(&mdev->seq_wait);
1756 }
3e394da1
AG
1757}
1758
b411b363
PR
1759/* Called from receive_Data.
1760 * Synchronize packets on sock with packets on msock.
1761 *
1762 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1763 * packet traveling on msock, they are still processed in the order they have
1764 * been sent.
1765 *
1766 * Note: we don't care for Ack packets overtaking P_DATA packets.
1767 *
1768 * In case packet_seq is larger than mdev->peer_seq number, there are
1769 * outstanding packets on the msock. We wait for them to arrive.
1770 * In case we are the logically next packet, we update mdev->peer_seq
1771 * ourselves. Correctly handles 32bit wrap around.
1772 *
1773 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1774 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1775 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1776 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1777 *
1778 * returns 0 if we may process the packet,
1779 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1780static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1781{
1782 DEFINE_WAIT(wait);
b411b363 1783 long timeout;
7be8da07
AG
1784 int ret;
1785
1786 if (!need_peer_seq(mdev))
1787 return 0;
1788
b411b363
PR
1789 spin_lock(&mdev->peer_seq_lock);
1790 for (;;) {
7be8da07
AG
1791 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1792 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1793 ret = 0;
b411b363 1794 break;
7be8da07 1795 }
b411b363
PR
1796 if (signal_pending(current)) {
1797 ret = -ERESTARTSYS;
1798 break;
1799 }
7be8da07 1800 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1801 spin_unlock(&mdev->peer_seq_lock);
71b1c1eb
AG
1802 timeout = mdev->tconn->net_conf->ping_timeo*HZ/10;
1803 timeout = schedule_timeout(timeout);
b411b363 1804 spin_lock(&mdev->peer_seq_lock);
7be8da07 1805 if (!timeout) {
b411b363 1806 ret = -ETIMEDOUT;
71b1c1eb 1807 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
1808 break;
1809 }
1810 }
b411b363 1811 spin_unlock(&mdev->peer_seq_lock);
7be8da07 1812 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
1813 return ret;
1814}
1815
688593c5
LE
1816/* see also bio_flags_to_wire()
1817 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1818 * flags and back. We may replicate to other kernel versions. */
1819static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 1820{
688593c5
LE
1821 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1822 (dpf & DP_FUA ? REQ_FUA : 0) |
1823 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1824 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
1825}
1826
7be8da07
AG
1827static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
1828 unsigned int size)
1829{
1830 struct drbd_interval *i;
1831
1832 repeat:
1833 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1834 struct drbd_request *req;
1835 struct bio_and_error m;
1836
1837 if (!i->local)
1838 continue;
1839 req = container_of(i, struct drbd_request, i);
1840 if (!(req->rq_state & RQ_POSTPONED))
1841 continue;
1842 req->rq_state &= ~RQ_POSTPONED;
1843 __req_mod(req, NEG_ACKED, &m);
1844 spin_unlock_irq(&mdev->tconn->req_lock);
1845 if (m.bio)
1846 complete_master_bio(mdev, &m);
1847 spin_lock_irq(&mdev->tconn->req_lock);
1848 goto repeat;
1849 }
1850}
1851
1852static int handle_write_conflicts(struct drbd_conf *mdev,
1853 struct drbd_peer_request *peer_req)
1854{
1855 struct drbd_tconn *tconn = mdev->tconn;
1856 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
1857 sector_t sector = peer_req->i.sector;
1858 const unsigned int size = peer_req->i.size;
1859 struct drbd_interval *i;
1860 bool equal;
1861 int err;
1862
1863 /*
1864 * Inserting the peer request into the write_requests tree will prevent
1865 * new conflicting local requests from being added.
1866 */
1867 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1868
1869 repeat:
1870 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1871 if (i == &peer_req->i)
1872 continue;
1873
1874 if (!i->local) {
1875 /*
1876 * Our peer has sent a conflicting remote request; this
1877 * should not happen in a two-node setup. Wait for the
1878 * earlier peer request to complete.
1879 */
1880 err = drbd_wait_misc(mdev, i);
1881 if (err)
1882 goto out;
1883 goto repeat;
1884 }
1885
1886 equal = i->sector == sector && i->size == size;
1887 if (resolve_conflicts) {
1888 /*
1889 * If the peer request is fully contained within the
1890 * overlapping request, it can be discarded; otherwise,
1891 * it will be retried once all overlapping requests
1892 * have completed.
1893 */
1894 bool discard = i->sector <= sector && i->sector +
1895 (i->size >> 9) >= sector + (size >> 9);
1896
1897 if (!equal)
1898 dev_alert(DEV, "Concurrent writes detected: "
1899 "local=%llus +%u, remote=%llus +%u, "
1900 "assuming %s came first\n",
1901 (unsigned long long)i->sector, i->size,
1902 (unsigned long long)sector, size,
1903 discard ? "local" : "remote");
1904
1905 inc_unacked(mdev);
1906 peer_req->w.cb = discard ? e_send_discard_write :
1907 e_send_retry_write;
1908 list_add_tail(&peer_req->w.list, &mdev->done_ee);
1909 wake_asender(mdev->tconn);
1910
1911 err = -ENOENT;
1912 goto out;
1913 } else {
1914 struct drbd_request *req =
1915 container_of(i, struct drbd_request, i);
1916
1917 if (!equal)
1918 dev_alert(DEV, "Concurrent writes detected: "
1919 "local=%llus +%u, remote=%llus +%u\n",
1920 (unsigned long long)i->sector, i->size,
1921 (unsigned long long)sector, size);
1922
1923 if (req->rq_state & RQ_LOCAL_PENDING ||
1924 !(req->rq_state & RQ_POSTPONED)) {
1925 /*
1926 * Wait for the node with the discard flag to
1927 * decide if this request will be discarded or
1928 * retried. Requests that are discarded will
1929 * disappear from the write_requests tree.
1930 *
1931 * In addition, wait for the conflicting
1932 * request to finish locally before submitting
1933 * the conflicting peer request.
1934 */
1935 err = drbd_wait_misc(mdev, &req->i);
1936 if (err) {
1937 _conn_request_state(mdev->tconn,
1938 NS(conn, C_TIMEOUT),
1939 CS_HARD);
1940 fail_postponed_requests(mdev, sector, size);
1941 goto out;
1942 }
1943 goto repeat;
1944 }
1945 /*
1946 * Remember to restart the conflicting requests after
1947 * the new peer request has completed.
1948 */
1949 peer_req->flags |= EE_RESTART_REQUESTS;
1950 }
1951 }
1952 err = 0;
1953
1954 out:
1955 if (err)
1956 drbd_remove_epoch_entry_interval(mdev, peer_req);
1957 return err;
1958}
1959
b411b363 1960/* mirrored write */
d8763023
AG
1961static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
1962 unsigned int data_size)
b411b363
PR
1963{
1964 sector_t sector;
db830c46 1965 struct drbd_peer_request *peer_req;
e42325a5 1966 struct p_data *p = &mdev->tconn->data.rbuf.data;
7be8da07 1967 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
1968 int rw = WRITE;
1969 u32 dp_flags;
7be8da07 1970 int err;
b411b363 1971
7be8da07 1972 if (!get_ldev(mdev)) {
82bc0194
AG
1973 int err2;
1974
7be8da07 1975 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2b2bf214 1976 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
b411b363 1977 atomic_inc(&mdev->current_epoch->epoch_size);
82bc0194
AG
1978 err2 = drbd_drain_block(mdev, data_size);
1979 if (!err)
1980 err = err2;
1981 return err;
b411b363
PR
1982 }
1983
fcefa62e
AG
1984 /*
1985 * Corresponding put_ldev done either below (on various errors), or in
1986 * drbd_peer_request_endio, if we successfully submit the data at the
1987 * end of this function.
1988 */
b411b363
PR
1989
1990 sector = be64_to_cpu(p->sector);
db830c46
AG
1991 peer_req = read_in_block(mdev, p->block_id, sector, data_size);
1992 if (!peer_req) {
b411b363 1993 put_ldev(mdev);
82bc0194 1994 return -EIO;
b411b363
PR
1995 }
1996
db830c46 1997 peer_req->w.cb = e_end_block;
b411b363 1998
688593c5
LE
1999 dp_flags = be32_to_cpu(p->dp_flags);
2000 rw |= wire_flags_to_bio(mdev, dp_flags);
2001
2002 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2003 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2004
b411b363 2005 spin_lock(&mdev->epoch_lock);
db830c46
AG
2006 peer_req->epoch = mdev->current_epoch;
2007 atomic_inc(&peer_req->epoch->epoch_size);
2008 atomic_inc(&peer_req->epoch->active);
b411b363
PR
2009 spin_unlock(&mdev->epoch_lock);
2010
7be8da07
AG
2011 if (mdev->tconn->net_conf->two_primaries) {
2012 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2013 if (err)
b411b363 2014 goto out_interrupted;
87eeee41 2015 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2016 err = handle_write_conflicts(mdev, peer_req);
2017 if (err) {
2018 spin_unlock_irq(&mdev->tconn->req_lock);
2019 if (err == -ENOENT) {
b411b363 2020 put_ldev(mdev);
82bc0194 2021 return 0;
b411b363 2022 }
7be8da07 2023 goto out_interrupted;
b411b363 2024 }
7be8da07
AG
2025 } else
2026 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2027 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2028 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2029
89e58e75 2030 switch (mdev->tconn->net_conf->wire_protocol) {
b411b363
PR
2031 case DRBD_PROT_C:
2032 inc_unacked(mdev);
2033 /* corresponding dec_unacked() in e_end_block()
2034 * respective _drbd_clear_done_ee */
2035 break;
2036 case DRBD_PROT_B:
2037 /* I really don't like it that the receiver thread
2038 * sends on the msock, but anyways */
db830c46 2039 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2040 break;
2041 case DRBD_PROT_A:
2042 /* nothing to do */
2043 break;
2044 }
2045
6719fb03 2046 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2047 /* In case we have the only disk of the cluster, */
db830c46
AG
2048 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2049 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2050 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2051 drbd_al_begin_io(mdev, peer_req->i.sector);
b411b363
PR
2052 }
2053
82bc0194
AG
2054 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2055 if (!err)
2056 return 0;
b411b363 2057
10f6d992
LE
2058 /* don't care for the reason here */
2059 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2060 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2061 list_del(&peer_req->w.list);
2062 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2063 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46
AG
2064 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2065 drbd_al_complete_io(mdev, peer_req->i.sector);
22cc37a9 2066
b411b363 2067out_interrupted:
db830c46 2068 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2069 put_ldev(mdev);
db830c46 2070 drbd_free_ee(mdev, peer_req);
82bc0194 2071 return err;
b411b363
PR
2072}
2073
0f0601f4
LE
2074/* We may throttle resync, if the lower device seems to be busy,
2075 * and current sync rate is above c_min_rate.
2076 *
2077 * To decide whether or not the lower device is busy, we use a scheme similar
2078 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2079 * (more than 64 sectors) of activity we cannot account for with our own resync
2080 * activity, it obviously is "busy".
2081 *
2082 * The current sync rate used here uses only the most recent two step marks,
2083 * to have a short time average so we can react faster.
2084 */
e3555d85 2085int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2086{
2087 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2088 unsigned long db, dt, dbdt;
e3555d85 2089 struct lc_element *tmp;
0f0601f4
LE
2090 int curr_events;
2091 int throttle = 0;
2092
2093 /* feature disabled? */
f399002e 2094 if (mdev->ldev->dc.c_min_rate == 0)
0f0601f4
LE
2095 return 0;
2096
e3555d85
PR
2097 spin_lock_irq(&mdev->al_lock);
2098 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2099 if (tmp) {
2100 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2101 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2102 spin_unlock_irq(&mdev->al_lock);
2103 return 0;
2104 }
2105 /* Do not slow down if app IO is already waiting for this extent */
2106 }
2107 spin_unlock_irq(&mdev->al_lock);
2108
0f0601f4
LE
2109 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2110 (int)part_stat_read(&disk->part0, sectors[1]) -
2111 atomic_read(&mdev->rs_sect_ev);
e3555d85 2112
0f0601f4
LE
2113 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2114 unsigned long rs_left;
2115 int i;
2116
2117 mdev->rs_last_events = curr_events;
2118
2119 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2120 * approx. */
2649f080
LE
2121 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2122
2123 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2124 rs_left = mdev->ov_left;
2125 else
2126 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2127
2128 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2129 if (!dt)
2130 dt++;
2131 db = mdev->rs_mark_left[i] - rs_left;
2132 dbdt = Bit2KB(db/dt);
2133
f399002e 2134 if (dbdt > mdev->ldev->dc.c_min_rate)
0f0601f4
LE
2135 throttle = 1;
2136 }
2137 return throttle;
2138}
2139
2140
d8763023
AG
2141static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
2142 unsigned int digest_size)
b411b363
PR
2143{
2144 sector_t sector;
2145 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 2146 struct drbd_peer_request *peer_req;
b411b363 2147 struct digest_info *di = NULL;
b18b37be 2148 int size, verb;
b411b363 2149 unsigned int fault_type;
e42325a5 2150 struct p_block_req *p = &mdev->tconn->data.rbuf.block_req;
b411b363
PR
2151
2152 sector = be64_to_cpu(p->sector);
2153 size = be32_to_cpu(p->blksize);
2154
c670a398 2155 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2156 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2157 (unsigned long long)sector, size);
82bc0194 2158 return -EINVAL;
b411b363
PR
2159 }
2160 if (sector + (size>>9) > capacity) {
2161 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2162 (unsigned long long)sector, size);
82bc0194 2163 return -EINVAL;
b411b363
PR
2164 }
2165
2166 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be
PR
2167 verb = 1;
2168 switch (cmd) {
2169 case P_DATA_REQUEST:
2170 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2171 break;
2172 case P_RS_DATA_REQUEST:
2173 case P_CSUM_RS_REQUEST:
2174 case P_OV_REQUEST:
2175 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2176 break;
2177 case P_OV_REPLY:
2178 verb = 0;
2179 dec_rs_pending(mdev);
2180 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2181 break;
2182 default:
2183 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2184 cmdname(cmd));
2185 }
2186 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2187 dev_err(DEV, "Can not satisfy peer's read request, "
2188 "no local data.\n");
b18b37be 2189
a821cc4a 2190 /* drain possibly payload */
82bc0194 2191 return drbd_drain_block(mdev, digest_size);
b411b363
PR
2192 }
2193
2194 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2195 * "criss-cross" setup, that might cause write-out on some other DRBD,
2196 * which in turn might block on the other node at this very place. */
db830c46
AG
2197 peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2198 if (!peer_req) {
b411b363 2199 put_ldev(mdev);
82bc0194 2200 return -ENOMEM;
b411b363
PR
2201 }
2202
02918be2 2203 switch (cmd) {
b411b363 2204 case P_DATA_REQUEST:
db830c46 2205 peer_req->w.cb = w_e_end_data_req;
b411b363 2206 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2207 /* application IO, don't drbd_rs_begin_io */
2208 goto submit;
2209
b411b363 2210 case P_RS_DATA_REQUEST:
db830c46 2211 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2212 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2213 /* used in the sector offset progress display */
2214 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2215 break;
2216
2217 case P_OV_REPLY:
2218 case P_CSUM_RS_REQUEST:
2219 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2220 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2221 if (!di)
2222 goto out_free_e;
2223
2224 di->digest_size = digest_size;
2225 di->digest = (((char *)di)+sizeof(struct digest_info));
2226
db830c46
AG
2227 peer_req->digest = di;
2228 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2229
de0ff338 2230 if (drbd_recv(mdev->tconn, di->digest, digest_size) != digest_size)
b411b363
PR
2231 goto out_free_e;
2232
02918be2 2233 if (cmd == P_CSUM_RS_REQUEST) {
31890f4a 2234 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2235 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2236 /* used in the sector offset progress display */
2237 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
02918be2 2238 } else if (cmd == P_OV_REPLY) {
2649f080
LE
2239 /* track progress, we may need to throttle */
2240 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2241 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2242 dec_rs_pending(mdev);
0f0601f4
LE
2243 /* drbd_rs_begin_io done when we sent this request,
2244 * but accounting still needs to be done. */
2245 goto submit_for_resync;
b411b363
PR
2246 }
2247 break;
2248
2249 case P_OV_REQUEST:
b411b363 2250 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2251 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2252 unsigned long now = jiffies;
2253 int i;
b411b363
PR
2254 mdev->ov_start_sector = sector;
2255 mdev->ov_position = sector;
30b743a2
LE
2256 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2257 mdev->rs_total = mdev->ov_left;
de228bba
LE
2258 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2259 mdev->rs_mark_left[i] = mdev->ov_left;
2260 mdev->rs_mark_time[i] = now;
2261 }
b411b363
PR
2262 dev_info(DEV, "Online Verify start sector: %llu\n",
2263 (unsigned long long)sector);
2264 }
db830c46 2265 peer_req->w.cb = w_e_end_ov_req;
b411b363 2266 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2267 break;
2268
b411b363
PR
2269 default:
2270 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
02918be2 2271 cmdname(cmd));
b411b363 2272 fault_type = DRBD_FAULT_MAX;
80a40e43 2273 goto out_free_e;
b411b363
PR
2274 }
2275
0f0601f4
LE
2276 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2277 * wrt the receiver, but it is not as straightforward as it may seem.
2278 * Various places in the resync start and stop logic assume resync
2279 * requests are processed in order, requeuing this on the worker thread
2280 * introduces a bunch of new code for synchronization between threads.
2281 *
2282 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2283 * "forever", throttling after drbd_rs_begin_io will lock that extent
2284 * for application writes for the same time. For now, just throttle
2285 * here, where the rest of the code expects the receiver to sleep for
2286 * a while, anyways.
2287 */
2288
2289 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2290 * this defers syncer requests for some time, before letting at least
2291 * on request through. The resync controller on the receiving side
2292 * will adapt to the incoming rate accordingly.
2293 *
2294 * We cannot throttle here if remote is Primary/SyncTarget:
2295 * we would also throttle its application reads.
2296 * In that case, throttling is done on the SyncTarget only.
2297 */
e3555d85
PR
2298 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2299 schedule_timeout_uninterruptible(HZ/10);
2300 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2301 goto out_free_e;
b411b363 2302
0f0601f4
LE
2303submit_for_resync:
2304 atomic_add(size >> 9, &mdev->rs_sect_ev);
2305
80a40e43 2306submit:
b411b363 2307 inc_unacked(mdev);
87eeee41 2308 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2309 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2310 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2311
fbe29dec 2312 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2313 return 0;
b411b363 2314
10f6d992
LE
2315 /* don't care for the reason here */
2316 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2317 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2318 list_del(&peer_req->w.list);
87eeee41 2319 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2320 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2321
b411b363 2322out_free_e:
b411b363 2323 put_ldev(mdev);
db830c46 2324 drbd_free_ee(mdev, peer_req);
82bc0194 2325 return -EIO;
b411b363
PR
2326}
2327
2328static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2329{
2330 int self, peer, rv = -100;
2331 unsigned long ch_self, ch_peer;
2332
2333 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2334 peer = mdev->p_uuid[UI_BITMAP] & 1;
2335
2336 ch_peer = mdev->p_uuid[UI_SIZE];
2337 ch_self = mdev->comm_bm_set;
2338
89e58e75 2339 switch (mdev->tconn->net_conf->after_sb_0p) {
b411b363
PR
2340 case ASB_CONSENSUS:
2341 case ASB_DISCARD_SECONDARY:
2342 case ASB_CALL_HELPER:
2343 dev_err(DEV, "Configuration error.\n");
2344 break;
2345 case ASB_DISCONNECT:
2346 break;
2347 case ASB_DISCARD_YOUNGER_PRI:
2348 if (self == 0 && peer == 1) {
2349 rv = -1;
2350 break;
2351 }
2352 if (self == 1 && peer == 0) {
2353 rv = 1;
2354 break;
2355 }
2356 /* Else fall through to one of the other strategies... */
2357 case ASB_DISCARD_OLDER_PRI:
2358 if (self == 0 && peer == 1) {
2359 rv = 1;
2360 break;
2361 }
2362 if (self == 1 && peer == 0) {
2363 rv = -1;
2364 break;
2365 }
2366 /* Else fall through to one of the other strategies... */
ad19bf6e 2367 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2368 "Using discard-least-changes instead\n");
2369 case ASB_DISCARD_ZERO_CHG:
2370 if (ch_peer == 0 && ch_self == 0) {
25703f83 2371 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
b411b363
PR
2372 ? -1 : 1;
2373 break;
2374 } else {
2375 if (ch_peer == 0) { rv = 1; break; }
2376 if (ch_self == 0) { rv = -1; break; }
2377 }
89e58e75 2378 if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2379 break;
2380 case ASB_DISCARD_LEAST_CHG:
2381 if (ch_self < ch_peer)
2382 rv = -1;
2383 else if (ch_self > ch_peer)
2384 rv = 1;
2385 else /* ( ch_self == ch_peer ) */
2386 /* Well, then use something else. */
25703f83 2387 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
b411b363
PR
2388 ? -1 : 1;
2389 break;
2390 case ASB_DISCARD_LOCAL:
2391 rv = -1;
2392 break;
2393 case ASB_DISCARD_REMOTE:
2394 rv = 1;
2395 }
2396
2397 return rv;
2398}
2399
2400static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2401{
6184ea21 2402 int hg, rv = -100;
b411b363 2403
89e58e75 2404 switch (mdev->tconn->net_conf->after_sb_1p) {
b411b363
PR
2405 case ASB_DISCARD_YOUNGER_PRI:
2406 case ASB_DISCARD_OLDER_PRI:
2407 case ASB_DISCARD_LEAST_CHG:
2408 case ASB_DISCARD_LOCAL:
2409 case ASB_DISCARD_REMOTE:
2410 dev_err(DEV, "Configuration error.\n");
2411 break;
2412 case ASB_DISCONNECT:
2413 break;
2414 case ASB_CONSENSUS:
2415 hg = drbd_asb_recover_0p(mdev);
2416 if (hg == -1 && mdev->state.role == R_SECONDARY)
2417 rv = hg;
2418 if (hg == 1 && mdev->state.role == R_PRIMARY)
2419 rv = hg;
2420 break;
2421 case ASB_VIOLENTLY:
2422 rv = drbd_asb_recover_0p(mdev);
2423 break;
2424 case ASB_DISCARD_SECONDARY:
2425 return mdev->state.role == R_PRIMARY ? 1 : -1;
2426 case ASB_CALL_HELPER:
2427 hg = drbd_asb_recover_0p(mdev);
2428 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2429 enum drbd_state_rv rv2;
2430
2431 drbd_set_role(mdev, R_SECONDARY, 0);
b411b363
PR
2432 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2433 * we might be here in C_WF_REPORT_PARAMS which is transient.
2434 * we do not need to wait for the after state change work either. */
bb437946
AG
2435 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2436 if (rv2 != SS_SUCCESS) {
b411b363
PR
2437 drbd_khelper(mdev, "pri-lost-after-sb");
2438 } else {
2439 dev_warn(DEV, "Successfully gave up primary role.\n");
2440 rv = hg;
2441 }
2442 } else
2443 rv = hg;
2444 }
2445
2446 return rv;
2447}
2448
2449static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2450{
6184ea21 2451 int hg, rv = -100;
b411b363 2452
89e58e75 2453 switch (mdev->tconn->net_conf->after_sb_2p) {
b411b363
PR
2454 case ASB_DISCARD_YOUNGER_PRI:
2455 case ASB_DISCARD_OLDER_PRI:
2456 case ASB_DISCARD_LEAST_CHG:
2457 case ASB_DISCARD_LOCAL:
2458 case ASB_DISCARD_REMOTE:
2459 case ASB_CONSENSUS:
2460 case ASB_DISCARD_SECONDARY:
2461 dev_err(DEV, "Configuration error.\n");
2462 break;
2463 case ASB_VIOLENTLY:
2464 rv = drbd_asb_recover_0p(mdev);
2465 break;
2466 case ASB_DISCONNECT:
2467 break;
2468 case ASB_CALL_HELPER:
2469 hg = drbd_asb_recover_0p(mdev);
2470 if (hg == -1) {
bb437946
AG
2471 enum drbd_state_rv rv2;
2472
b411b363
PR
2473 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2474 * we might be here in C_WF_REPORT_PARAMS which is transient.
2475 * we do not need to wait for the after state change work either. */
bb437946
AG
2476 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2477 if (rv2 != SS_SUCCESS) {
b411b363
PR
2478 drbd_khelper(mdev, "pri-lost-after-sb");
2479 } else {
2480 dev_warn(DEV, "Successfully gave up primary role.\n");
2481 rv = hg;
2482 }
2483 } else
2484 rv = hg;
2485 }
2486
2487 return rv;
2488}
2489
2490static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2491 u64 bits, u64 flags)
2492{
2493 if (!uuid) {
2494 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2495 return;
2496 }
2497 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2498 text,
2499 (unsigned long long)uuid[UI_CURRENT],
2500 (unsigned long long)uuid[UI_BITMAP],
2501 (unsigned long long)uuid[UI_HISTORY_START],
2502 (unsigned long long)uuid[UI_HISTORY_END],
2503 (unsigned long long)bits,
2504 (unsigned long long)flags);
2505}
2506
2507/*
2508 100 after split brain try auto recover
2509 2 C_SYNC_SOURCE set BitMap
2510 1 C_SYNC_SOURCE use BitMap
2511 0 no Sync
2512 -1 C_SYNC_TARGET use BitMap
2513 -2 C_SYNC_TARGET set BitMap
2514 -100 after split brain, disconnect
2515-1000 unrelated data
4a23f264
PR
2516-1091 requires proto 91
2517-1096 requires proto 96
b411b363
PR
2518 */
2519static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2520{
2521 u64 self, peer;
2522 int i, j;
2523
2524 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2525 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2526
2527 *rule_nr = 10;
2528 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2529 return 0;
2530
2531 *rule_nr = 20;
2532 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2533 peer != UUID_JUST_CREATED)
2534 return -2;
2535
2536 *rule_nr = 30;
2537 if (self != UUID_JUST_CREATED &&
2538 (peer == UUID_JUST_CREATED || peer == (u64)0))
2539 return 2;
2540
2541 if (self == peer) {
2542 int rct, dc; /* roles at crash time */
2543
2544 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2545
31890f4a 2546 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2547 return -1091;
b411b363
PR
2548
2549 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2550 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2551 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2552 drbd_uuid_set_bm(mdev, 0UL);
2553
2554 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2555 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2556 *rule_nr = 34;
2557 } else {
2558 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2559 *rule_nr = 36;
2560 }
2561
2562 return 1;
2563 }
2564
2565 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2566
31890f4a 2567 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2568 return -1091;
b411b363
PR
2569
2570 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2571 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2572 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2573
2574 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2575 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2576 mdev->p_uuid[UI_BITMAP] = 0UL;
2577
2578 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2579 *rule_nr = 35;
2580 } else {
2581 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2582 *rule_nr = 37;
2583 }
2584
2585 return -1;
2586 }
2587
2588 /* Common power [off|failure] */
2589 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2590 (mdev->p_uuid[UI_FLAGS] & 2);
2591 /* lowest bit is set when we were primary,
2592 * next bit (weight 2) is set when peer was primary */
2593 *rule_nr = 40;
2594
2595 switch (rct) {
2596 case 0: /* !self_pri && !peer_pri */ return 0;
2597 case 1: /* self_pri && !peer_pri */ return 1;
2598 case 2: /* !self_pri && peer_pri */ return -1;
2599 case 3: /* self_pri && peer_pri */
25703f83 2600 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
b411b363
PR
2601 return dc ? -1 : 1;
2602 }
2603 }
2604
2605 *rule_nr = 50;
2606 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2607 if (self == peer)
2608 return -1;
2609
2610 *rule_nr = 51;
2611 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2612 if (self == peer) {
31890f4a 2613 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2614 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2615 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2616 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2617 /* The last P_SYNC_UUID did not get though. Undo the last start of
2618 resync as sync source modifications of the peer's UUIDs. */
2619
31890f4a 2620 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2621 return -1091;
b411b363
PR
2622
2623 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2624 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264
PR
2625
2626 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2627 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2628
b411b363
PR
2629 return -1;
2630 }
2631 }
2632
2633 *rule_nr = 60;
2634 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2635 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2636 peer = mdev->p_uuid[i] & ~((u64)1);
2637 if (self == peer)
2638 return -2;
2639 }
2640
2641 *rule_nr = 70;
2642 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2643 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2644 if (self == peer)
2645 return 1;
2646
2647 *rule_nr = 71;
2648 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2649 if (self == peer) {
31890f4a 2650 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2651 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2652 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2653 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2654 /* The last P_SYNC_UUID did not get though. Undo the last start of
2655 resync as sync source modifications of our UUIDs. */
2656
31890f4a 2657 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2658 return -1091;
b411b363
PR
2659
2660 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2661 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2662
4a23f264 2663 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2664 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2665 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2666
2667 return 1;
2668 }
2669 }
2670
2671
2672 *rule_nr = 80;
d8c2a36b 2673 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2674 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2675 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2676 if (self == peer)
2677 return 2;
2678 }
2679
2680 *rule_nr = 90;
2681 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2682 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2683 if (self == peer && self != ((u64)0))
2684 return 100;
2685
2686 *rule_nr = 100;
2687 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2688 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2689 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2690 peer = mdev->p_uuid[j] & ~((u64)1);
2691 if (self == peer)
2692 return -100;
2693 }
2694 }
2695
2696 return -1000;
2697}
2698
2699/* drbd_sync_handshake() returns the new conn state on success, or
2700 CONN_MASK (-1) on failure.
2701 */
2702static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2703 enum drbd_disk_state peer_disk) __must_hold(local)
2704{
2705 int hg, rule_nr;
2706 enum drbd_conns rv = C_MASK;
2707 enum drbd_disk_state mydisk;
2708
2709 mydisk = mdev->state.disk;
2710 if (mydisk == D_NEGOTIATING)
2711 mydisk = mdev->new_state_tmp.disk;
2712
2713 dev_info(DEV, "drbd_sync_handshake:\n");
2714 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2715 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2716 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2717
2718 hg = drbd_uuid_compare(mdev, &rule_nr);
2719
2720 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2721
2722 if (hg == -1000) {
2723 dev_alert(DEV, "Unrelated data, aborting!\n");
2724 return C_MASK;
2725 }
4a23f264
PR
2726 if (hg < -1000) {
2727 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2728 return C_MASK;
2729 }
2730
2731 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2732 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2733 int f = (hg == -100) || abs(hg) == 2;
2734 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2735 if (f)
2736 hg = hg*2;
2737 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2738 hg > 0 ? "source" : "target");
2739 }
2740
3a11a487
AG
2741 if (abs(hg) == 100)
2742 drbd_khelper(mdev, "initial-split-brain");
2743
89e58e75 2744 if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
b411b363
PR
2745 int pcount = (mdev->state.role == R_PRIMARY)
2746 + (peer_role == R_PRIMARY);
2747 int forced = (hg == -100);
2748
2749 switch (pcount) {
2750 case 0:
2751 hg = drbd_asb_recover_0p(mdev);
2752 break;
2753 case 1:
2754 hg = drbd_asb_recover_1p(mdev);
2755 break;
2756 case 2:
2757 hg = drbd_asb_recover_2p(mdev);
2758 break;
2759 }
2760 if (abs(hg) < 100) {
2761 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2762 "automatically solved. Sync from %s node\n",
2763 pcount, (hg < 0) ? "peer" : "this");
2764 if (forced) {
2765 dev_warn(DEV, "Doing a full sync, since"
2766 " UUIDs where ambiguous.\n");
2767 hg = hg*2;
2768 }
2769 }
2770 }
2771
2772 if (hg == -100) {
89e58e75 2773 if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 2774 hg = -1;
89e58e75 2775 if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
2776 hg = 1;
2777
2778 if (abs(hg) < 100)
2779 dev_warn(DEV, "Split-Brain detected, manually solved. "
2780 "Sync from %s node\n",
2781 (hg < 0) ? "peer" : "this");
2782 }
2783
2784 if (hg == -100) {
580b9767
LE
2785 /* FIXME this log message is not correct if we end up here
2786 * after an attempted attach on a diskless node.
2787 * We just refuse to attach -- well, we drop the "connection"
2788 * to that disk, in a way... */
3a11a487 2789 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
2790 drbd_khelper(mdev, "split-brain");
2791 return C_MASK;
2792 }
2793
2794 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2795 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2796 return C_MASK;
2797 }
2798
2799 if (hg < 0 && /* by intention we do not use mydisk here. */
2800 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
89e58e75 2801 switch (mdev->tconn->net_conf->rr_conflict) {
b411b363
PR
2802 case ASB_CALL_HELPER:
2803 drbd_khelper(mdev, "pri-lost");
2804 /* fall through */
2805 case ASB_DISCONNECT:
2806 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2807 return C_MASK;
2808 case ASB_VIOLENTLY:
2809 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2810 "assumption\n");
2811 }
2812 }
2813
8169e41b 2814 if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
2815 if (hg == 0)
2816 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2817 else
2818 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2819 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2820 abs(hg) >= 2 ? "full" : "bit-map based");
2821 return C_MASK;
2822 }
2823
b411b363
PR
2824 if (abs(hg) >= 2) {
2825 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
2826 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2827 BM_LOCKED_SET_ALLOWED))
b411b363
PR
2828 return C_MASK;
2829 }
2830
2831 if (hg > 0) { /* become sync source. */
2832 rv = C_WF_BITMAP_S;
2833 } else if (hg < 0) { /* become sync target */
2834 rv = C_WF_BITMAP_T;
2835 } else {
2836 rv = C_CONNECTED;
2837 if (drbd_bm_total_weight(mdev)) {
2838 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2839 drbd_bm_total_weight(mdev));
2840 }
2841 }
2842
2843 return rv;
2844}
2845
2846/* returns 1 if invalid */
2847static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2848{
2849 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2850 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2851 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2852 return 0;
2853
2854 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2855 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2856 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2857 return 1;
2858
2859 /* everything else is valid if they are equal on both sides. */
2860 if (peer == self)
2861 return 0;
2862
2863 /* everything es is invalid. */
2864 return 1;
2865}
2866
7204624c 2867static int receive_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd,
d8763023 2868 unsigned int data_size)
b411b363 2869{
7204624c 2870 struct p_protocol *p = &tconn->data.rbuf.protocol;
b411b363 2871 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
cf14c2e9 2872 int p_want_lose, p_two_primaries, cf;
b411b363
PR
2873 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2874
b411b363
PR
2875 p_proto = be32_to_cpu(p->protocol);
2876 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2877 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2878 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 2879 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9
PR
2880 cf = be32_to_cpu(p->conn_flags);
2881 p_want_lose = cf & CF_WANT_LOSE;
2882
7204624c 2883 clear_bit(CONN_DRY_RUN, &tconn->flags);
cf14c2e9
PR
2884
2885 if (cf & CF_DRY_RUN)
7204624c 2886 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 2887
7204624c
PR
2888 if (p_proto != tconn->net_conf->wire_protocol) {
2889 conn_err(tconn, "incompatible communication protocols\n");
b411b363
PR
2890 goto disconnect;
2891 }
2892
7204624c
PR
2893 if (cmp_after_sb(p_after_sb_0p, tconn->net_conf->after_sb_0p)) {
2894 conn_err(tconn, "incompatible after-sb-0pri settings\n");
b411b363
PR
2895 goto disconnect;
2896 }
2897
7204624c
PR
2898 if (cmp_after_sb(p_after_sb_1p, tconn->net_conf->after_sb_1p)) {
2899 conn_err(tconn, "incompatible after-sb-1pri settings\n");
b411b363
PR
2900 goto disconnect;
2901 }
2902
7204624c
PR
2903 if (cmp_after_sb(p_after_sb_2p, tconn->net_conf->after_sb_2p)) {
2904 conn_err(tconn, "incompatible after-sb-2pri settings\n");
b411b363
PR
2905 goto disconnect;
2906 }
2907
7204624c
PR
2908 if (p_want_lose && tconn->net_conf->want_lose) {
2909 conn_err(tconn, "both sides have the 'want_lose' flag set\n");
b411b363
PR
2910 goto disconnect;
2911 }
2912
7204624c
PR
2913 if (p_two_primaries != tconn->net_conf->two_primaries) {
2914 conn_err(tconn, "incompatible setting of the two-primaries options\n");
b411b363
PR
2915 goto disconnect;
2916 }
2917
7204624c
PR
2918 if (tconn->agreed_pro_version >= 87) {
2919 unsigned char *my_alg = tconn->net_conf->integrity_alg;
82bc0194 2920 int err;
b411b363 2921
82bc0194
AG
2922 err = drbd_recv_all(tconn, p_integrity_alg, data_size);
2923 if (err)
2924 return err;
b411b363
PR
2925
2926 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2927 if (strcmp(p_integrity_alg, my_alg)) {
7204624c 2928 conn_err(tconn, "incompatible setting of the data-integrity-alg\n");
b411b363
PR
2929 goto disconnect;
2930 }
7204624c 2931 conn_info(tconn, "data-integrity-alg: %s\n",
b411b363
PR
2932 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2933 }
2934
82bc0194 2935 return 0;
b411b363
PR
2936
2937disconnect:
7204624c 2938 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 2939 return -EIO;
b411b363
PR
2940}
2941
2942/* helper function
2943 * input: alg name, feature name
2944 * return: NULL (alg name was "")
2945 * ERR_PTR(error) if something goes wrong
2946 * or the crypto hash ptr, if it worked out ok. */
2947struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2948 const char *alg, const char *name)
2949{
2950 struct crypto_hash *tfm;
2951
2952 if (!alg[0])
2953 return NULL;
2954
2955 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2956 if (IS_ERR(tfm)) {
2957 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2958 alg, name, PTR_ERR(tfm));
2959 return tfm;
2960 }
2961 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2962 crypto_free_hash(tfm);
2963 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2964 return ERR_PTR(-EINVAL);
2965 }
2966 return tfm;
2967}
2968
d8763023
AG
2969static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
2970 unsigned int packet_size)
b411b363 2971{
e42325a5 2972 struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95;
b411b363
PR
2973 unsigned int header_size, data_size, exp_max_sz;
2974 struct crypto_hash *verify_tfm = NULL;
2975 struct crypto_hash *csums_tfm = NULL;
31890f4a 2976 const int apv = mdev->tconn->agreed_pro_version;
778f271d
PR
2977 int *rs_plan_s = NULL;
2978 int fifo_size = 0;
82bc0194 2979 int err;
b411b363
PR
2980
2981 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2982 : apv == 88 ? sizeof(struct p_rs_param)
2983 + SHARED_SECRET_MAX
8e26f9cc
PR
2984 : apv <= 94 ? sizeof(struct p_rs_param_89)
2985 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 2986
02918be2 2987 if (packet_size > exp_max_sz) {
b411b363 2988 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
02918be2 2989 packet_size, exp_max_sz);
82bc0194 2990 return -EIO;
b411b363
PR
2991 }
2992
2993 if (apv <= 88) {
257d0af6 2994 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header);
02918be2 2995 data_size = packet_size - header_size;
8e26f9cc 2996 } else if (apv <= 94) {
257d0af6 2997 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header);
02918be2 2998 data_size = packet_size - header_size;
b411b363 2999 D_ASSERT(data_size == 0);
8e26f9cc 3000 } else {
257d0af6 3001 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header);
02918be2 3002 data_size = packet_size - header_size;
b411b363
PR
3003 D_ASSERT(data_size == 0);
3004 }
3005
3006 /* initialize verify_alg and csums_alg */
3007 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3008
82bc0194
AG
3009 err = drbd_recv_all(mdev->tconn, &p->head.payload, header_size);
3010 if (err)
3011 return err;
b411b363 3012
f399002e
LE
3013 if (get_ldev(mdev)) {
3014 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3015 put_ldev(mdev);
3016 }
b411b363
PR
3017
3018 if (apv >= 88) {
3019 if (apv == 88) {
3020 if (data_size > SHARED_SECRET_MAX) {
3021 dev_err(DEV, "verify-alg too long, "
3022 "peer wants %u, accepting only %u byte\n",
3023 data_size, SHARED_SECRET_MAX);
82bc0194 3024 return -EIO;
b411b363
PR
3025 }
3026
82bc0194
AG
3027 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3028 if (err)
3029 return err;
b411b363
PR
3030
3031 /* we expect NUL terminated string */
3032 /* but just in case someone tries to be evil */
3033 D_ASSERT(p->verify_alg[data_size-1] == 0);
3034 p->verify_alg[data_size-1] = 0;
3035
3036 } else /* apv >= 89 */ {
3037 /* we still expect NUL terminated strings */
3038 /* but just in case someone tries to be evil */
3039 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3040 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3041 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3042 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3043 }
3044
f399002e 3045 if (strcmp(mdev->tconn->net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3046 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3047 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
f399002e 3048 mdev->tconn->net_conf->verify_alg, p->verify_alg);
b411b363
PR
3049 goto disconnect;
3050 }
3051 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3052 p->verify_alg, "verify-alg");
3053 if (IS_ERR(verify_tfm)) {
3054 verify_tfm = NULL;
3055 goto disconnect;
3056 }
3057 }
3058
f399002e 3059 if (apv >= 89 && strcmp(mdev->tconn->net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3060 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3061 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
f399002e 3062 mdev->tconn->net_conf->csums_alg, p->csums_alg);
b411b363
PR
3063 goto disconnect;
3064 }
3065 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3066 p->csums_alg, "csums-alg");
3067 if (IS_ERR(csums_tfm)) {
3068 csums_tfm = NULL;
3069 goto disconnect;
3070 }
3071 }
3072
f399002e
LE
3073 if (apv > 94 && get_ldev(mdev)) {
3074 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3075 mdev->ldev->dc.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3076 mdev->ldev->dc.c_delay_target = be32_to_cpu(p->c_delay_target);
3077 mdev->ldev->dc.c_fill_target = be32_to_cpu(p->c_fill_target);
3078 mdev->ldev->dc.c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3079
f399002e 3080 fifo_size = (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
778f271d
PR
3081 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
3082 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
3083 if (!rs_plan_s) {
3084 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3085 put_ldev(mdev);
778f271d
PR
3086 goto disconnect;
3087 }
3088 }
f399002e 3089 put_ldev(mdev);
8e26f9cc 3090 }
b411b363
PR
3091
3092 spin_lock(&mdev->peer_seq_lock);
3093 /* lock against drbd_nl_syncer_conf() */
3094 if (verify_tfm) {
f399002e
LE
3095 strcpy(mdev->tconn->net_conf->verify_alg, p->verify_alg);
3096 mdev->tconn->net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3097 crypto_free_hash(mdev->tconn->verify_tfm);
3098 mdev->tconn->verify_tfm = verify_tfm;
b411b363
PR
3099 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3100 }
3101 if (csums_tfm) {
f399002e
LE
3102 strcpy(mdev->tconn->net_conf->csums_alg, p->csums_alg);
3103 mdev->tconn->net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3104 crypto_free_hash(mdev->tconn->csums_tfm);
3105 mdev->tconn->csums_tfm = csums_tfm;
b411b363
PR
3106 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3107 }
778f271d
PR
3108 if (fifo_size != mdev->rs_plan_s.size) {
3109 kfree(mdev->rs_plan_s.values);
3110 mdev->rs_plan_s.values = rs_plan_s;
3111 mdev->rs_plan_s.size = fifo_size;
3112 mdev->rs_planed = 0;
3113 }
b411b363
PR
3114 spin_unlock(&mdev->peer_seq_lock);
3115 }
82bc0194 3116 return 0;
b411b363 3117
b411b363
PR
3118disconnect:
3119 /* just for completeness: actually not needed,
3120 * as this is not reached if csums_tfm was ok. */
3121 crypto_free_hash(csums_tfm);
3122 /* but free the verify_tfm again, if csums_tfm did not work out */
3123 crypto_free_hash(verify_tfm);
38fa9988 3124 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3125 return -EIO;
b411b363
PR
3126}
3127
b411b363
PR
3128/* warn if the arguments differ by more than 12.5% */
3129static void warn_if_differ_considerably(struct drbd_conf *mdev,
3130 const char *s, sector_t a, sector_t b)
3131{
3132 sector_t d;
3133 if (a == 0 || b == 0)
3134 return;
3135 d = (a > b) ? (a - b) : (b - a);
3136 if (d > (a>>3) || d > (b>>3))
3137 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3138 (unsigned long long)a, (unsigned long long)b);
3139}
3140
d8763023
AG
3141static int receive_sizes(struct drbd_conf *mdev, enum drbd_packet cmd,
3142 unsigned int data_size)
b411b363 3143{
e42325a5 3144 struct p_sizes *p = &mdev->tconn->data.rbuf.sizes;
b411b363 3145 enum determine_dev_size dd = unchanged;
b411b363
PR
3146 sector_t p_size, p_usize, my_usize;
3147 int ldsc = 0; /* local disk size changed */
e89b591c 3148 enum dds_flags ddsf;
b411b363 3149
b411b363
PR
3150 p_size = be64_to_cpu(p->d_size);
3151 p_usize = be64_to_cpu(p->u_size);
3152
b411b363
PR
3153 /* just store the peer's disk size for now.
3154 * we still need to figure out whether we accept that. */
3155 mdev->p_size = p_size;
3156
b411b363
PR
3157 if (get_ldev(mdev)) {
3158 warn_if_differ_considerably(mdev, "lower level device sizes",
3159 p_size, drbd_get_max_capacity(mdev->ldev));
3160 warn_if_differ_considerably(mdev, "user requested size",
3161 p_usize, mdev->ldev->dc.disk_size);
3162
3163 /* if this is the first connect, or an otherwise expected
3164 * param exchange, choose the minimum */
3165 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3166 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3167 p_usize);
3168
3169 my_usize = mdev->ldev->dc.disk_size;
3170
3171 if (mdev->ldev->dc.disk_size != p_usize) {
3172 mdev->ldev->dc.disk_size = p_usize;
3173 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3174 (unsigned long)mdev->ldev->dc.disk_size);
3175 }
3176
3177 /* Never shrink a device with usable data during connect.
3178 But allow online shrinking if we are connected. */
a393db6f 3179 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
b411b363
PR
3180 drbd_get_capacity(mdev->this_bdev) &&
3181 mdev->state.disk >= D_OUTDATED &&
3182 mdev->state.conn < C_CONNECTED) {
3183 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3184 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
3185 mdev->ldev->dc.disk_size = my_usize;
3186 put_ldev(mdev);
82bc0194 3187 return -EIO;
b411b363
PR
3188 }
3189 put_ldev(mdev);
3190 }
b411b363 3191
e89b591c 3192 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3193 if (get_ldev(mdev)) {
24c4830c 3194 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
3195 put_ldev(mdev);
3196 if (dd == dev_size_error)
82bc0194 3197 return -EIO;
b411b363
PR
3198 drbd_md_sync(mdev);
3199 } else {
3200 /* I am diskless, need to accept the peer's size. */
3201 drbd_set_my_capacity(mdev, p_size);
3202 }
3203
99432fcc
PR
3204 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3205 drbd_reconsider_max_bio_size(mdev);
3206
b411b363
PR
3207 if (get_ldev(mdev)) {
3208 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3209 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3210 ldsc = 1;
3211 }
3212
b411b363
PR
3213 put_ldev(mdev);
3214 }
3215
3216 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3217 if (be64_to_cpu(p->c_size) !=
3218 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3219 /* we have different sizes, probably peer
3220 * needs to know my new size... */
e89b591c 3221 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3222 }
3223 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3224 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3225 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3226 mdev->state.disk >= D_INCONSISTENT) {
3227 if (ddsf & DDSF_NO_RESYNC)
3228 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3229 else
3230 resync_after_online_grow(mdev);
3231 } else
b411b363
PR
3232 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3233 }
3234 }
3235
82bc0194 3236 return 0;
b411b363
PR
3237}
3238
d8763023
AG
3239static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd,
3240 unsigned int data_size)
b411b363 3241{
e42325a5 3242 struct p_uuids *p = &mdev->tconn->data.rbuf.uuids;
b411b363 3243 u64 *p_uuid;
62b0da3a 3244 int i, updated_uuids = 0;
b411b363 3245
b411b363
PR
3246 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3247
3248 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3249 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3250
3251 kfree(mdev->p_uuid);
3252 mdev->p_uuid = p_uuid;
3253
3254 if (mdev->state.conn < C_CONNECTED &&
3255 mdev->state.disk < D_INCONSISTENT &&
3256 mdev->state.role == R_PRIMARY &&
3257 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3258 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3259 (unsigned long long)mdev->ed_uuid);
38fa9988 3260 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3261 return -EIO;
b411b363
PR
3262 }
3263
3264 if (get_ldev(mdev)) {
3265 int skip_initial_sync =
3266 mdev->state.conn == C_CONNECTED &&
31890f4a 3267 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3268 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3269 (p_uuid[UI_FLAGS] & 8);
3270 if (skip_initial_sync) {
3271 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3272 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3273 "clear_n_write from receive_uuids",
3274 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3275 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3276 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3277 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3278 CS_VERBOSE, NULL);
3279 drbd_md_sync(mdev);
62b0da3a 3280 updated_uuids = 1;
b411b363
PR
3281 }
3282 put_ldev(mdev);
18a50fa2
PR
3283 } else if (mdev->state.disk < D_INCONSISTENT &&
3284 mdev->state.role == R_PRIMARY) {
3285 /* I am a diskless primary, the peer just created a new current UUID
3286 for me. */
62b0da3a 3287 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3288 }
3289
3290 /* Before we test for the disk state, we should wait until an eventually
3291 ongoing cluster wide state change is finished. That is important if
3292 we are primary and are detaching from our disk. We need to see the
3293 new disk state... */
8410da8f
PR
3294 mutex_lock(mdev->state_mutex);
3295 mutex_unlock(mdev->state_mutex);
b411b363 3296 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3297 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3298
3299 if (updated_uuids)
3300 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3301
82bc0194 3302 return 0;
b411b363
PR
3303}
3304
3305/**
3306 * convert_state() - Converts the peer's view of the cluster state to our point of view
3307 * @ps: The state as seen by the peer.
3308 */
3309static union drbd_state convert_state(union drbd_state ps)
3310{
3311 union drbd_state ms;
3312
3313 static enum drbd_conns c_tab[] = {
3314 [C_CONNECTED] = C_CONNECTED,
3315
3316 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3317 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3318 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3319 [C_VERIFY_S] = C_VERIFY_T,
3320 [C_MASK] = C_MASK,
3321 };
3322
3323 ms.i = ps.i;
3324
3325 ms.conn = c_tab[ps.conn];
3326 ms.peer = ps.role;
3327 ms.role = ps.peer;
3328 ms.pdsk = ps.disk;
3329 ms.disk = ps.pdsk;
3330 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3331
3332 return ms;
3333}
3334
d8763023
AG
3335static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3336 unsigned int data_size)
b411b363 3337{
e42325a5 3338 struct p_req_state *p = &mdev->tconn->data.rbuf.req_state;
b411b363 3339 union drbd_state mask, val;
bf885f8a 3340 enum drbd_state_rv rv;
b411b363 3341
b411b363
PR
3342 mask.i = be32_to_cpu(p->mask);
3343 val.i = be32_to_cpu(p->val);
3344
25703f83 3345 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
8410da8f 3346 mutex_is_locked(mdev->state_mutex)) {
b411b363 3347 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3348 return 0;
b411b363
PR
3349 }
3350
3351 mask = convert_state(mask);
3352 val = convert_state(val);
3353
dfafcc8a
PR
3354 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3355 drbd_send_sr_reply(mdev, rv);
b411b363 3356
b411b363
PR
3357 drbd_md_sync(mdev);
3358
82bc0194 3359 return 0;
b411b363
PR
3360}
3361
dfafcc8a
PR
3362static int receive_req_conn_state(struct drbd_tconn *tconn, enum drbd_packet cmd,
3363 unsigned int data_size)
3364{
3365 struct p_req_state *p = &tconn->data.rbuf.req_state;
3366 union drbd_state mask, val;
3367 enum drbd_state_rv rv;
3368
3369 mask.i = be32_to_cpu(p->mask);
3370 val.i = be32_to_cpu(p->val);
3371
3372 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3373 mutex_is_locked(&tconn->cstate_mutex)) {
3374 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3375 return 0;
dfafcc8a
PR
3376 }
3377
3378 mask = convert_state(mask);
3379 val = convert_state(val);
3380
3381 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY);
3382 conn_send_sr_reply(tconn, rv);
3383
82bc0194 3384 return 0;
dfafcc8a
PR
3385}
3386
d8763023
AG
3387static int receive_state(struct drbd_conf *mdev, enum drbd_packet cmd,
3388 unsigned int data_size)
b411b363 3389{
e42325a5 3390 struct p_state *p = &mdev->tconn->data.rbuf.state;
4ac4aada 3391 union drbd_state os, ns, peer_state;
b411b363 3392 enum drbd_disk_state real_peer_disk;
65d922c3 3393 enum chg_state_flags cs_flags;
b411b363
PR
3394 int rv;
3395
b411b363
PR
3396 peer_state.i = be32_to_cpu(p->state);
3397
3398 real_peer_disk = peer_state.disk;
3399 if (peer_state.disk == D_NEGOTIATING) {
3400 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3401 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3402 }
3403
87eeee41 3404 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3405 retry:
4ac4aada 3406 os = ns = mdev->state;
87eeee41 3407 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3408
e9ef7bb6
LE
3409 /* peer says his disk is uptodate, while we think it is inconsistent,
3410 * and this happens while we think we have a sync going on. */
3411 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3412 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3413 /* If we are (becoming) SyncSource, but peer is still in sync
3414 * preparation, ignore its uptodate-ness to avoid flapping, it
3415 * will change to inconsistent once the peer reaches active
3416 * syncing states.
3417 * It may have changed syncer-paused flags, however, so we
3418 * cannot ignore this completely. */
3419 if (peer_state.conn > C_CONNECTED &&
3420 peer_state.conn < C_SYNC_SOURCE)
3421 real_peer_disk = D_INCONSISTENT;
3422
3423 /* if peer_state changes to connected at the same time,
3424 * it explicitly notifies us that it finished resync.
3425 * Maybe we should finish it up, too? */
3426 else if (os.conn >= C_SYNC_SOURCE &&
3427 peer_state.conn == C_CONNECTED) {
3428 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3429 drbd_resync_finished(mdev);
82bc0194 3430 return 0;
e9ef7bb6
LE
3431 }
3432 }
3433
3434 /* peer says his disk is inconsistent, while we think it is uptodate,
3435 * and this happens while the peer still thinks we have a sync going on,
3436 * but we think we are already done with the sync.
3437 * We ignore this to avoid flapping pdsk.
3438 * This should not happen, if the peer is a recent version of drbd. */
3439 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3440 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3441 real_peer_disk = D_UP_TO_DATE;
3442
4ac4aada
LE
3443 if (ns.conn == C_WF_REPORT_PARAMS)
3444 ns.conn = C_CONNECTED;
b411b363 3445
67531718
PR
3446 if (peer_state.conn == C_AHEAD)
3447 ns.conn = C_BEHIND;
3448
b411b363
PR
3449 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3450 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3451 int cr; /* consider resync */
3452
3453 /* if we established a new connection */
4ac4aada 3454 cr = (os.conn < C_CONNECTED);
b411b363
PR
3455 /* if we had an established connection
3456 * and one of the nodes newly attaches a disk */
4ac4aada 3457 cr |= (os.conn == C_CONNECTED &&
b411b363 3458 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3459 os.disk == D_NEGOTIATING));
b411b363
PR
3460 /* if we have both been inconsistent, and the peer has been
3461 * forced to be UpToDate with --overwrite-data */
3462 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3463 /* if we had been plain connected, and the admin requested to
3464 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3465 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3466 (peer_state.conn >= C_STARTING_SYNC_S &&
3467 peer_state.conn <= C_WF_BITMAP_T));
3468
3469 if (cr)
4ac4aada 3470 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3471
3472 put_ldev(mdev);
4ac4aada
LE
3473 if (ns.conn == C_MASK) {
3474 ns.conn = C_CONNECTED;
b411b363 3475 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3476 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3477 } else if (peer_state.disk == D_NEGOTIATING) {
3478 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3479 peer_state.disk = D_DISKLESS;
580b9767 3480 real_peer_disk = D_DISKLESS;
b411b363 3481 } else {
8169e41b 3482 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3483 return -EIO;
4ac4aada 3484 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3485 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3486 return -EIO;
b411b363
PR
3487 }
3488 }
3489 }
3490
87eeee41 3491 spin_lock_irq(&mdev->tconn->req_lock);
4ac4aada 3492 if (mdev->state.i != os.i)
b411b363
PR
3493 goto retry;
3494 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3495 ns.peer = peer_state.role;
3496 ns.pdsk = real_peer_disk;
3497 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3498 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3499 ns.disk = mdev->new_state_tmp.disk;
4ac4aada
LE
3500 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3501 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3502 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3503 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3504 for temporal network outages! */
87eeee41 3505 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3506 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3507 tl_clear(mdev->tconn);
481c6f50
PR
3508 drbd_uuid_new_current(mdev);
3509 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3510 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3511 return -EIO;
481c6f50 3512 }
65d922c3 3513 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
b411b363 3514 ns = mdev->state;
87eeee41 3515 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3516
3517 if (rv < SS_SUCCESS) {
38fa9988 3518 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3519 return -EIO;
b411b363
PR
3520 }
3521
4ac4aada
LE
3522 if (os.conn > C_WF_REPORT_PARAMS) {
3523 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3524 peer_state.disk != D_NEGOTIATING ) {
3525 /* we want resync, peer has not yet decided to sync... */
3526 /* Nowadays only used when forcing a node into primary role and
3527 setting its disk to UpToDate with that */
3528 drbd_send_uuids(mdev);
3529 drbd_send_state(mdev);
3530 }
3531 }
3532
89e58e75 3533 mdev->tconn->net_conf->want_lose = 0;
b411b363
PR
3534
3535 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3536
82bc0194 3537 return 0;
b411b363
PR
3538}
3539
d8763023
AG
3540static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packet cmd,
3541 unsigned int data_size)
b411b363 3542{
e42325a5 3543 struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid;
b411b363
PR
3544
3545 wait_event(mdev->misc_wait,
3546 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 3547 mdev->state.conn == C_BEHIND ||
b411b363
PR
3548 mdev->state.conn < C_CONNECTED ||
3549 mdev->state.disk < D_NEGOTIATING);
3550
3551 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3552
b411b363
PR
3553 /* Here the _drbd_uuid_ functions are right, current should
3554 _not_ be rotated into the history */
3555 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3556 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3557 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3558
62b0da3a 3559 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
3560 drbd_start_resync(mdev, C_SYNC_TARGET);
3561
3562 put_ldev(mdev);
3563 } else
3564 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3565
82bc0194 3566 return 0;
b411b363
PR
3567}
3568
2c46407d
AG
3569/**
3570 * receive_bitmap_plain
3571 *
3572 * Return 0 when done, 1 when another iteration is needed, and a negative error
3573 * code upon failure.
3574 */
3575static int
02918be2
PR
3576receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3577 unsigned long *buffer, struct bm_xfer_ctx *c)
b411b363
PR
3578{
3579 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3580 unsigned want = num_words * sizeof(long);
2c46407d 3581 int err;
b411b363 3582
02918be2
PR
3583 if (want != data_size) {
3584 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
2c46407d 3585 return -EIO;
b411b363
PR
3586 }
3587 if (want == 0)
2c46407d 3588 return 0;
82bc0194
AG
3589 err = drbd_recv_all(mdev->tconn, buffer, want);
3590 if (err)
2c46407d 3591 return err;
b411b363
PR
3592
3593 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3594
3595 c->word_offset += num_words;
3596 c->bit_offset = c->word_offset * BITS_PER_LONG;
3597 if (c->bit_offset > c->bm_bits)
3598 c->bit_offset = c->bm_bits;
3599
2c46407d 3600 return 1;
b411b363
PR
3601}
3602
2c46407d
AG
3603/**
3604 * recv_bm_rle_bits
3605 *
3606 * Return 0 when done, 1 when another iteration is needed, and a negative error
3607 * code upon failure.
3608 */
3609static int
b411b363
PR
3610recv_bm_rle_bits(struct drbd_conf *mdev,
3611 struct p_compressed_bm *p,
c6d25cfe
PR
3612 struct bm_xfer_ctx *c,
3613 unsigned int len)
b411b363
PR
3614{
3615 struct bitstream bs;
3616 u64 look_ahead;
3617 u64 rl;
3618 u64 tmp;
3619 unsigned long s = c->bit_offset;
3620 unsigned long e;
b411b363
PR
3621 int toggle = DCBP_get_start(p);
3622 int have;
3623 int bits;
3624
3625 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3626
3627 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3628 if (bits < 0)
2c46407d 3629 return -EIO;
b411b363
PR
3630
3631 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3632 bits = vli_decode_bits(&rl, look_ahead);
3633 if (bits <= 0)
2c46407d 3634 return -EIO;
b411b363
PR
3635
3636 if (toggle) {
3637 e = s + rl -1;
3638 if (e >= c->bm_bits) {
3639 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 3640 return -EIO;
b411b363
PR
3641 }
3642 _drbd_bm_set_bits(mdev, s, e);
3643 }
3644
3645 if (have < bits) {
3646 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3647 have, bits, look_ahead,
3648 (unsigned int)(bs.cur.b - p->code),
3649 (unsigned int)bs.buf_len);
2c46407d 3650 return -EIO;
b411b363
PR
3651 }
3652 look_ahead >>= bits;
3653 have -= bits;
3654
3655 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3656 if (bits < 0)
2c46407d 3657 return -EIO;
b411b363
PR
3658 look_ahead |= tmp << have;
3659 have += bits;
3660 }
3661
3662 c->bit_offset = s;
3663 bm_xfer_ctx_bit_to_word_offset(c);
3664
2c46407d 3665 return (s != c->bm_bits);
b411b363
PR
3666}
3667
2c46407d
AG
3668/**
3669 * decode_bitmap_c
3670 *
3671 * Return 0 when done, 1 when another iteration is needed, and a negative error
3672 * code upon failure.
3673 */
3674static int
b411b363
PR
3675decode_bitmap_c(struct drbd_conf *mdev,
3676 struct p_compressed_bm *p,
c6d25cfe
PR
3677 struct bm_xfer_ctx *c,
3678 unsigned int len)
b411b363
PR
3679{
3680 if (DCBP_get_code(p) == RLE_VLI_Bits)
c6d25cfe 3681 return recv_bm_rle_bits(mdev, p, c, len);
b411b363
PR
3682
3683 /* other variants had been implemented for evaluation,
3684 * but have been dropped as this one turned out to be "best"
3685 * during all our tests. */
3686
3687 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 3688 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 3689 return -EIO;
b411b363
PR
3690}
3691
3692void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3693 const char *direction, struct bm_xfer_ctx *c)
3694{
3695 /* what would it take to transfer it "plaintext" */
c012949a 3696 unsigned plain = sizeof(struct p_header) *
b411b363
PR
3697 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3698 + c->bm_words * sizeof(long);
3699 unsigned total = c->bytes[0] + c->bytes[1];
3700 unsigned r;
3701
3702 /* total can not be zero. but just in case: */
3703 if (total == 0)
3704 return;
3705
3706 /* don't report if not compressed */
3707 if (total >= plain)
3708 return;
3709
3710 /* total < plain. check for overflow, still */
3711 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3712 : (1000 * total / plain);
3713
3714 if (r > 1000)
3715 r = 1000;
3716
3717 r = 1000 - r;
3718 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3719 "total %u; compression: %u.%u%%\n",
3720 direction,
3721 c->bytes[1], c->packets[1],
3722 c->bytes[0], c->packets[0],
3723 total, r/10, r % 10);
3724}
3725
3726/* Since we are processing the bitfield from lower addresses to higher,
3727 it does not matter if the process it in 32 bit chunks or 64 bit
3728 chunks as long as it is little endian. (Understand it as byte stream,
3729 beginning with the lowest byte...) If we would use big endian
3730 we would need to process it from the highest address to the lowest,
3731 in order to be agnostic to the 32 vs 64 bits issue.
3732
3733 returns 0 on failure, 1 if we successfully received it. */
d8763023
AG
3734static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packet cmd,
3735 unsigned int data_size)
b411b363
PR
3736{
3737 struct bm_xfer_ctx c;
3738 void *buffer;
2c46407d 3739 int err;
257d0af6 3740 struct p_header *h = &mdev->tconn->data.rbuf.header;
77351055 3741 struct packet_info pi;
b411b363 3742
20ceb2b2
LE
3743 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3744 /* you are supposed to send additional out-of-sync information
3745 * if you actually set bits during this phase */
b411b363
PR
3746
3747 /* maybe we should use some per thread scratch page,
3748 * and allocate that during initial device creation? */
3749 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3750 if (!buffer) {
3751 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
82bc0194 3752 err = -ENOMEM;
b411b363
PR
3753 goto out;
3754 }
3755
3756 c = (struct bm_xfer_ctx) {
3757 .bm_bits = drbd_bm_bits(mdev),
3758 .bm_words = drbd_bm_words(mdev),
3759 };
3760
2c46407d 3761 for(;;) {
02918be2 3762 if (cmd == P_BITMAP) {
2c46407d 3763 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
02918be2 3764 } else if (cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
3765 /* MAYBE: sanity check that we speak proto >= 90,
3766 * and the feature is enabled! */
3767 struct p_compressed_bm *p;
3768
02918be2 3769 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
b411b363 3770 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 3771 err = -EIO;
b411b363
PR
3772 goto out;
3773 }
3774 /* use the page buff */
3775 p = buffer;
3776 memcpy(p, h, sizeof(*h));
82bc0194
AG
3777 err = drbd_recv_all(mdev->tconn, p->head.payload, data_size);
3778 if (err)
3779 goto out;
004352fa
LE
3780 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3781 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
82bc0194 3782 err = -EIO;
78fcbdae 3783 goto out;
b411b363 3784 }
c6d25cfe 3785 err = decode_bitmap_c(mdev, p, &c, data_size);
b411b363 3786 } else {
02918be2 3787 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
82bc0194 3788 err = -EIO;
b411b363
PR
3789 goto out;
3790 }
3791
02918be2 3792 c.packets[cmd == P_BITMAP]++;
257d0af6 3793 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header) + data_size;
b411b363 3794
2c46407d
AG
3795 if (err <= 0) {
3796 if (err < 0)
3797 goto out;
b411b363 3798 break;
2c46407d 3799 }
82bc0194
AG
3800 err = drbd_recv_header(mdev->tconn, &pi);
3801 if (err)
b411b363 3802 goto out;
77351055
PR
3803 cmd = pi.cmd;
3804 data_size = pi.size;
2c46407d 3805 }
b411b363
PR
3806
3807 INFO_bm_xfer_stats(mdev, "receive", &c);
3808
3809 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
3810 enum drbd_state_rv rv;
3811
82bc0194
AG
3812 err = drbd_send_bitmap(mdev);
3813 if (err)
b411b363
PR
3814 goto out;
3815 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
3816 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3817 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
3818 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3819 /* admin may have requested C_DISCONNECTING,
3820 * other threads may have noticed network errors */
3821 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3822 drbd_conn_str(mdev->state.conn));
3823 }
82bc0194 3824 err = 0;
b411b363 3825
b411b363 3826 out:
20ceb2b2 3827 drbd_bm_unlock(mdev);
82bc0194 3828 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363
PR
3829 drbd_start_resync(mdev, C_SYNC_SOURCE);
3830 free_page((unsigned long) buffer);
82bc0194 3831 return err;
b411b363
PR
3832}
3833
2de876ef 3834static int _tconn_receive_skip(struct drbd_tconn *tconn, unsigned int data_size)
b411b363
PR
3835{
3836 /* TODO zero copy sink :) */
3837 static char sink[128];
3838 int size, want, r;
3839
02918be2 3840 size = data_size;
b411b363
PR
3841 while (size > 0) {
3842 want = min_t(int, size, sizeof(sink));
2de876ef
PR
3843 r = drbd_recv(tconn, sink, want);
3844 if (r <= 0)
841ce241 3845 break;
b411b363
PR
3846 size -= r;
3847 }
82bc0194 3848 return size ? -EIO : 0;
b411b363
PR
3849}
3850
2de876ef
PR
3851static int receive_skip(struct drbd_conf *mdev, enum drbd_packet cmd,
3852 unsigned int data_size)
3853{
3854 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3855 cmd, data_size);
3856
3857 return _tconn_receive_skip(mdev->tconn, data_size);
3858}
3859
3860static int tconn_receive_skip(struct drbd_tconn *tconn, enum drbd_packet cmd, unsigned int data_size)
3861{
3862 conn_warn(tconn, "skipping packet for non existing volume type %d, l: %d!\n",
3863 cmd, data_size);
3864
3865 return _tconn_receive_skip(tconn, data_size);
3866}
3867
d8763023
AG
3868static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packet cmd,
3869 unsigned int data_size)
0ced55a3 3870{
e7f52dfb
LE
3871 /* Make sure we've acked all the TCP data associated
3872 * with the data requests being unplugged */
e42325a5 3873 drbd_tcp_quickack(mdev->tconn->data.socket);
0ced55a3 3874
82bc0194 3875 return 0;
0ced55a3
PR
3876}
3877
d8763023
AG
3878static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packet cmd,
3879 unsigned int data_size)
73a01a18 3880{
e42325a5 3881 struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc;
73a01a18 3882
f735e363
LE
3883 switch (mdev->state.conn) {
3884 case C_WF_SYNC_UUID:
3885 case C_WF_BITMAP_T:
3886 case C_BEHIND:
3887 break;
3888 default:
3889 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3890 drbd_conn_str(mdev->state.conn));
3891 }
3892
73a01a18
PR
3893 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3894
82bc0194 3895 return 0;
73a01a18
PR
3896}
3897
02918be2
PR
3898struct data_cmd {
3899 int expect_payload;
3900 size_t pkt_size;
a4fbda8e 3901 enum mdev_or_conn fa_type; /* first argument's type */
d9ae84e7
PR
3902 union {
3903 int (*mdev_fn)(struct drbd_conf *, enum drbd_packet cmd,
3904 unsigned int to_receive);
3905 int (*conn_fn)(struct drbd_tconn *, enum drbd_packet cmd,
3906 unsigned int to_receive);
3907 };
02918be2
PR
3908};
3909
3910static struct data_cmd drbd_cmd_handler[] = {
d9ae84e7
PR
3911 [P_DATA] = { 1, sizeof(struct p_data), MDEV, { receive_Data } },
3912 [P_DATA_REPLY] = { 1, sizeof(struct p_data), MDEV, { receive_DataReply } },
3913 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), MDEV, { receive_RSDataReply } } ,
3914 [P_BARRIER] = { 0, sizeof(struct p_barrier), MDEV, { receive_Barrier } } ,
3915 [P_BITMAP] = { 1, sizeof(struct p_header), MDEV, { receive_bitmap } } ,
3916 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header), MDEV, { receive_bitmap } } ,
3917 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header), MDEV, { receive_UnplugRemote } },
3918 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), MDEV, { receive_DataRequest } },
3919 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), MDEV, { receive_DataRequest } },
3920 [P_SYNC_PARAM] = { 1, sizeof(struct p_header), MDEV, { receive_SyncParam } },
3921 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header), MDEV, { receive_SyncParam } },
7204624c 3922 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), CONN, { .conn_fn = receive_protocol } },
d9ae84e7
PR
3923 [P_UUIDS] = { 0, sizeof(struct p_uuids), MDEV, { receive_uuids } },
3924 [P_SIZES] = { 0, sizeof(struct p_sizes), MDEV, { receive_sizes } },
3925 [P_STATE] = { 0, sizeof(struct p_state), MDEV, { receive_state } },
3926 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), MDEV, { receive_req_state } },
3927 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), MDEV, { receive_sync_uuid } },
3928 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), MDEV, { receive_DataRequest } },
3929 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), MDEV, { receive_DataRequest } },
3930 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), MDEV, { receive_DataRequest } },
3931 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), MDEV, { receive_skip } },
3932 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), MDEV, { receive_out_of_sync } },
dfafcc8a 3933 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), CONN, { .conn_fn = receive_req_conn_state } },
b411b363
PR
3934};
3935
02918be2 3936/* All handler functions that expect a sub-header get that sub-heder in
e42325a5 3937 mdev->tconn->data.rbuf.header.head.payload.
02918be2 3938
e42325a5 3939 Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual
02918be2
PR
3940 p_header, but they may not rely on that. Since there is also p_header95 !
3941 */
b411b363 3942
eefc2f7d 3943static void drbdd(struct drbd_tconn *tconn)
b411b363 3944{
eefc2f7d 3945 struct p_header *header = &tconn->data.rbuf.header;
77351055 3946 struct packet_info pi;
02918be2 3947 size_t shs; /* sub header size */
82bc0194 3948 int err;
b411b363 3949
eefc2f7d
PR
3950 while (get_t_state(&tconn->receiver) == RUNNING) {
3951 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 3952 if (drbd_recv_header(tconn, &pi))
02918be2 3953 goto err_out;
b411b363 3954
6e849ce8 3955 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) ||
d9ae84e7 3956 !drbd_cmd_handler[pi.cmd].mdev_fn)) {
eefc2f7d 3957 conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size);
02918be2 3958 goto err_out;
0b33a916 3959 }
b411b363 3960
77351055
PR
3961 shs = drbd_cmd_handler[pi.cmd].pkt_size - sizeof(struct p_header);
3962 if (pi.size - shs > 0 && !drbd_cmd_handler[pi.cmd].expect_payload) {
eefc2f7d 3963 conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size);
02918be2 3964 goto err_out;
b411b363 3965 }
b411b363 3966
c13f7e1a 3967 if (shs) {
a5c31904
AG
3968 err = drbd_recv_all_warn(tconn, &header->payload, shs);
3969 if (err)
c13f7e1a 3970 goto err_out;
c13f7e1a
LE
3971 }
3972
a4fbda8e 3973 if (drbd_cmd_handler[pi.cmd].fa_type == CONN) {
82bc0194 3974 err = drbd_cmd_handler[pi.cmd].conn_fn(tconn, pi.cmd, pi.size - shs);
d9ae84e7
PR
3975 } else {
3976 struct drbd_conf *mdev = vnr_to_mdev(tconn, pi.vnr);
82bc0194 3977 err = mdev ?
d9ae84e7
PR
3978 drbd_cmd_handler[pi.cmd].mdev_fn(mdev, pi.cmd, pi.size - shs) :
3979 tconn_receive_skip(tconn, pi.cmd, pi.size - shs);
3980 }
b411b363 3981
82bc0194 3982 if (unlikely(err)) {
eefc2f7d 3983 conn_err(tconn, "error receiving %s, l: %d!\n",
77351055 3984 cmdname(pi.cmd), pi.size);
02918be2 3985 goto err_out;
b411b363
PR
3986 }
3987 }
82bc0194 3988 return;
b411b363 3989
82bc0194
AG
3990 err_out:
3991 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
3992}
3993
0e29d163 3994void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
3995{
3996 struct drbd_wq_barrier barr;
3997
3998 barr.w.cb = w_prev_work_done;
0e29d163 3999 barr.w.tconn = tconn;
b411b363 4000 init_completion(&barr.done);
0e29d163 4001 drbd_queue_work(&tconn->data.work, &barr.w);
b411b363
PR
4002 wait_for_completion(&barr.done);
4003}
4004
360cc740 4005static void drbd_disconnect(struct drbd_tconn *tconn)
b411b363 4006{
bbeb641c 4007 enum drbd_conns oc;
b411b363 4008 int rv = SS_UNKNOWN_ERROR;
b411b363 4009
bbeb641c 4010 if (tconn->cstate == C_STANDALONE)
b411b363 4011 return;
b411b363
PR
4012
4013 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4014 drbd_thread_stop(&tconn->asender);
4015 drbd_free_sock(tconn);
4016
4017 idr_for_each(&tconn->volumes, drbd_disconnected, tconn);
4018
4019 conn_info(tconn, "Connection closed\n");
4020
4021 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4022 oc = tconn->cstate;
4023 if (oc >= C_UNCONNECTED)
4024 rv = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4025
360cc740
PR
4026 spin_unlock_irq(&tconn->req_lock);
4027
bbeb641c 4028 if (oc == C_DISCONNECTING) {
360cc740
PR
4029 wait_event(tconn->net_cnt_wait, atomic_read(&tconn->net_cnt) == 0);
4030
4031 crypto_free_hash(tconn->cram_hmac_tfm);
4032 tconn->cram_hmac_tfm = NULL;
4033
4034 kfree(tconn->net_conf);
4035 tconn->net_conf = NULL;
bbeb641c 4036 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE);
360cc740
PR
4037 }
4038}
4039
4040static int drbd_disconnected(int vnr, void *p, void *data)
4041{
4042 struct drbd_conf *mdev = (struct drbd_conf *)p;
4043 enum drbd_fencing_p fp;
4044 unsigned int i;
b411b363 4045
85719573 4046 /* wait for current activity to cease. */
87eeee41 4047 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4048 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4049 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4050 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4051 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4052
4053 /* We do not have data structures that would allow us to
4054 * get the rs_pending_cnt down to 0 again.
4055 * * On C_SYNC_TARGET we do not have any data structures describing
4056 * the pending RSDataRequest's we have sent.
4057 * * On C_SYNC_SOURCE there is no data structure that tracks
4058 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4059 * And no, it is not the sum of the reference counts in the
4060 * resync_LRU. The resync_LRU tracks the whole operation including
4061 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4062 * on the fly. */
4063 drbd_rs_cancel_all(mdev);
4064 mdev->rs_total = 0;
4065 mdev->rs_failed = 0;
4066 atomic_set(&mdev->rs_pending_cnt, 0);
4067 wake_up(&mdev->misc_wait);
4068
7fde2be9
PR
4069 del_timer(&mdev->request_timer);
4070
b411b363 4071 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4072 resync_timer_fn((unsigned long)mdev);
4073
b411b363
PR
4074 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4075 * w_make_resync_request etc. which may still be on the worker queue
4076 * to be "canceled" */
a21e9298 4077 drbd_flush_workqueue(mdev);
b411b363
PR
4078
4079 /* This also does reclaim_net_ee(). If we do this too early, we might
4080 * miss some resync ee and pages.*/
4081 drbd_process_done_ee(mdev);
4082
4083 kfree(mdev->p_uuid);
4084 mdev->p_uuid = NULL;
4085
fb22c402 4086 if (!is_susp(mdev->state))
2f5cdd0b 4087 tl_clear(mdev->tconn);
b411b363 4088
b411b363
PR
4089 drbd_md_sync(mdev);
4090
4091 fp = FP_DONT_CARE;
4092 if (get_ldev(mdev)) {
4093 fp = mdev->ldev->dc.fencing;
4094 put_ldev(mdev);
4095 }
4096
87f7be4c
PR
4097 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
4098 drbd_try_outdate_peer_async(mdev);
b411b363 4099
20ceb2b2
LE
4100 /* serialize with bitmap writeout triggered by the state change,
4101 * if any. */
4102 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4103
b411b363
PR
4104 /* tcp_close and release of sendpage pages can be deferred. I don't
4105 * want to use SO_LINGER, because apparently it can be deferred for
4106 * more than 20 seconds (longest time I checked).
4107 *
4108 * Actually we don't care for exactly when the network stack does its
4109 * put_page(), but release our reference on these pages right here.
4110 */
4111 i = drbd_release_ee(mdev, &mdev->net_ee);
4112 if (i)
4113 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4114 i = atomic_read(&mdev->pp_in_use_by_net);
4115 if (i)
4116 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4117 i = atomic_read(&mdev->pp_in_use);
4118 if (i)
45bb912b 4119 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4120
4121 D_ASSERT(list_empty(&mdev->read_ee));
4122 D_ASSERT(list_empty(&mdev->active_ee));
4123 D_ASSERT(list_empty(&mdev->sync_ee));
4124 D_ASSERT(list_empty(&mdev->done_ee));
4125
4126 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4127 atomic_set(&mdev->current_epoch->epoch_size, 0);
4128 D_ASSERT(list_empty(&mdev->current_epoch->list));
360cc740
PR
4129
4130 return 0;
b411b363
PR
4131}
4132
4133/*
4134 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4135 * we can agree on is stored in agreed_pro_version.
4136 *
4137 * feature flags and the reserved array should be enough room for future
4138 * enhancements of the handshake protocol, and possible plugins...
4139 *
4140 * for now, they are expected to be zero, but ignored.
4141 */
8a22cccc 4142static int drbd_send_handshake(struct drbd_tconn *tconn)
b411b363 4143{
e6b3ea83 4144 /* ASSERT current == mdev->tconn->receiver ... */
8a22cccc 4145 struct p_handshake *p = &tconn->data.sbuf.handshake;
e8d17b01 4146 int err;
b411b363 4147
8a22cccc
PR
4148 if (mutex_lock_interruptible(&tconn->data.mutex)) {
4149 conn_err(tconn, "interrupted during initial handshake\n");
e8d17b01 4150 return -EINTR;
b411b363
PR
4151 }
4152
8a22cccc
PR
4153 if (tconn->data.socket == NULL) {
4154 mutex_unlock(&tconn->data.mutex);
e8d17b01 4155 return -EIO;
b411b363
PR
4156 }
4157
4158 memset(p, 0, sizeof(*p));
4159 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4160 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
e8d17b01 4161 err = _conn_send_cmd(tconn, 0, tconn->data.socket, P_HAND_SHAKE,
ecf2363c 4162 &p->head, sizeof(*p), 0);
8a22cccc 4163 mutex_unlock(&tconn->data.mutex);
e8d17b01 4164 return err;
b411b363
PR
4165}
4166
4167/*
4168 * return values:
4169 * 1 yes, we have a valid connection
4170 * 0 oops, did not work out, please try again
4171 * -1 peer talks different language,
4172 * no point in trying again, please go standalone.
4173 */
65d11ed6 4174static int drbd_do_handshake(struct drbd_tconn *tconn)
b411b363 4175{
65d11ed6
PR
4176 /* ASSERT current == tconn->receiver ... */
4177 struct p_handshake *p = &tconn->data.rbuf.handshake;
02918be2 4178 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
77351055 4179 struct packet_info pi;
a5c31904 4180 int err;
b411b363 4181
e8d17b01
AG
4182 err = drbd_send_handshake(tconn);
4183 if (err)
b411b363
PR
4184 return 0;
4185
69bc7bc3
AG
4186 err = drbd_recv_header(tconn, &pi);
4187 if (err)
b411b363
PR
4188 return 0;
4189
77351055 4190 if (pi.cmd != P_HAND_SHAKE) {
65d11ed6 4191 conn_err(tconn, "expected HandShake packet, received: %s (0x%04x)\n",
77351055 4192 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4193 return -1;
4194 }
4195
77351055 4196 if (pi.size != expect) {
65d11ed6 4197 conn_err(tconn, "expected HandShake length: %u, received: %u\n",
77351055 4198 expect, pi.size);
b411b363
PR
4199 return -1;
4200 }
4201
a5c31904
AG
4202 err = drbd_recv_all_warn(tconn, &p->head.payload, expect);
4203 if (err)
b411b363 4204 return 0;
b411b363 4205
b411b363
PR
4206 p->protocol_min = be32_to_cpu(p->protocol_min);
4207 p->protocol_max = be32_to_cpu(p->protocol_max);
4208 if (p->protocol_max == 0)
4209 p->protocol_max = p->protocol_min;
4210
4211 if (PRO_VERSION_MAX < p->protocol_min ||
4212 PRO_VERSION_MIN > p->protocol_max)
4213 goto incompat;
4214
65d11ed6 4215 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4216
65d11ed6
PR
4217 conn_info(tconn, "Handshake successful: "
4218 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4219
4220 return 1;
4221
4222 incompat:
65d11ed6 4223 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4224 "I support %d-%d, peer supports %d-%d\n",
4225 PRO_VERSION_MIN, PRO_VERSION_MAX,
4226 p->protocol_min, p->protocol_max);
4227 return -1;
4228}
4229
4230#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4231static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363
PR
4232{
4233 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4234 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4235 return -1;
b411b363
PR
4236}
4237#else
4238#define CHALLENGE_LEN 64
b10d96cb
JT
4239
4240/* Return value:
4241 1 - auth succeeded,
4242 0 - failed, try again (network error),
4243 -1 - auth failed, don't try again.
4244*/
4245
13e6037d 4246static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363
PR
4247{
4248 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4249 struct scatterlist sg;
4250 char *response = NULL;
4251 char *right_response = NULL;
4252 char *peers_ch = NULL;
13e6037d 4253 unsigned int key_len = strlen(tconn->net_conf->shared_secret);
b411b363
PR
4254 unsigned int resp_size;
4255 struct hash_desc desc;
77351055 4256 struct packet_info pi;
69bc7bc3 4257 int err, rv;
b411b363 4258
13e6037d 4259 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4260 desc.flags = 0;
4261
13e6037d
PR
4262 rv = crypto_hash_setkey(tconn->cram_hmac_tfm,
4263 (u8 *)tconn->net_conf->shared_secret, key_len);
b411b363 4264 if (rv) {
13e6037d 4265 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4266 rv = -1;
b411b363
PR
4267 goto fail;
4268 }
4269
4270 get_random_bytes(my_challenge, CHALLENGE_LEN);
4271
ce9879cb 4272 rv = !conn_send_cmd2(tconn, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
b411b363
PR
4273 if (!rv)
4274 goto fail;
4275
69bc7bc3
AG
4276 err = drbd_recv_header(tconn, &pi);
4277 if (err) {
4278 rv = 0;
b411b363 4279 goto fail;
69bc7bc3 4280 }
b411b363 4281
77351055 4282 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4283 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
77351055 4284 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4285 rv = 0;
4286 goto fail;
4287 }
4288
77351055 4289 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4290 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4291 rv = -1;
b411b363
PR
4292 goto fail;
4293 }
4294
77351055 4295 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4296 if (peers_ch == NULL) {
13e6037d 4297 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4298 rv = -1;
b411b363
PR
4299 goto fail;
4300 }
4301
a5c31904
AG
4302 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4303 if (err) {
b411b363
PR
4304 rv = 0;
4305 goto fail;
4306 }
4307
13e6037d 4308 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4309 response = kmalloc(resp_size, GFP_NOIO);
4310 if (response == NULL) {
13e6037d 4311 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4312 rv = -1;
b411b363
PR
4313 goto fail;
4314 }
4315
4316 sg_init_table(&sg, 1);
77351055 4317 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4318
4319 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4320 if (rv) {
13e6037d 4321 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4322 rv = -1;
b411b363
PR
4323 goto fail;
4324 }
4325
ce9879cb 4326 rv = !conn_send_cmd2(tconn, P_AUTH_RESPONSE, response, resp_size);
b411b363
PR
4327 if (!rv)
4328 goto fail;
4329
69bc7bc3
AG
4330 err = drbd_recv_header(tconn, &pi);
4331 if (err) {
4332 rv = 0;
b411b363 4333 goto fail;
69bc7bc3 4334 }
b411b363 4335
77351055 4336 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4337 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
77351055 4338 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4339 rv = 0;
4340 goto fail;
4341 }
4342
77351055 4343 if (pi.size != resp_size) {
13e6037d 4344 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4345 rv = 0;
4346 goto fail;
4347 }
4348
a5c31904
AG
4349 err = drbd_recv_all_warn(tconn, response , resp_size);
4350 if (err) {
b411b363
PR
4351 rv = 0;
4352 goto fail;
4353 }
4354
4355 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4356 if (right_response == NULL) {
13e6037d 4357 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4358 rv = -1;
b411b363
PR
4359 goto fail;
4360 }
4361
4362 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4363
4364 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4365 if (rv) {
13e6037d 4366 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4367 rv = -1;
b411b363
PR
4368 goto fail;
4369 }
4370
4371 rv = !memcmp(response, right_response, resp_size);
4372
4373 if (rv)
13e6037d
PR
4374 conn_info(tconn, "Peer authenticated using %d bytes of '%s' HMAC\n",
4375 resp_size, tconn->net_conf->cram_hmac_alg);
b10d96cb
JT
4376 else
4377 rv = -1;
b411b363
PR
4378
4379 fail:
4380 kfree(peers_ch);
4381 kfree(response);
4382 kfree(right_response);
4383
4384 return rv;
4385}
4386#endif
4387
4388int drbdd_init(struct drbd_thread *thi)
4389{
392c8801 4390 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4391 int h;
4392
4d641dd7 4393 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4394
4395 do {
4d641dd7 4396 h = drbd_connect(tconn);
b411b363 4397 if (h == 0) {
4d641dd7 4398 drbd_disconnect(tconn);
20ee6390 4399 schedule_timeout_interruptible(HZ);
b411b363
PR
4400 }
4401 if (h == -1) {
4d641dd7 4402 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4403 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4404 }
4405 } while (h == 0);
4406
4407 if (h > 0) {
4d641dd7
PR
4408 if (get_net_conf(tconn)) {
4409 drbdd(tconn);
4410 put_net_conf(tconn);
b411b363
PR
4411 }
4412 }
4413
4d641dd7 4414 drbd_disconnect(tconn);
b411b363 4415
4d641dd7 4416 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4417 return 0;
4418}
4419
4420/* ********* acknowledge sender ******** */
4421
e4f78ede
PR
4422static int got_conn_RqSReply(struct drbd_tconn *tconn, enum drbd_packet cmd)
4423{
4424 struct p_req_state_reply *p = &tconn->meta.rbuf.req_state_reply;
4425 int retcode = be32_to_cpu(p->retcode);
4426
4427 if (retcode >= SS_SUCCESS) {
4428 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4429 } else {
4430 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4431 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4432 drbd_set_st_err_str(retcode), retcode);
4433 }
4434 wake_up(&tconn->ping_wait);
4435
4436 return true;
4437}
4438
d8763023 4439static int got_RqSReply(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4440{
257d0af6 4441 struct p_req_state_reply *p = &mdev->tconn->meta.rbuf.req_state_reply;
b411b363
PR
4442 int retcode = be32_to_cpu(p->retcode);
4443
e4f78ede
PR
4444 if (retcode >= SS_SUCCESS) {
4445 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4446 } else {
4447 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4448 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4449 drbd_set_st_err_str(retcode), retcode);
b411b363 4450 }
e4f78ede
PR
4451 wake_up(&mdev->state_wait);
4452
81e84650 4453 return true;
b411b363
PR
4454}
4455
f19e4f8b 4456static int got_Ping(struct drbd_tconn *tconn, enum drbd_packet cmd)
b411b363 4457{
f19e4f8b 4458 return drbd_send_ping_ack(tconn);
b411b363
PR
4459
4460}
4461
f19e4f8b 4462static int got_PingAck(struct drbd_tconn *tconn, enum drbd_packet cmd)
b411b363
PR
4463{
4464 /* restore idle timeout */
2a67d8b9
PR
4465 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4466 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4467 wake_up(&tconn->ping_wait);
b411b363 4468
81e84650 4469 return true;
b411b363
PR
4470}
4471
d8763023 4472static int got_IsInSync(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4473{
257d0af6 4474 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363
PR
4475 sector_t sector = be64_to_cpu(p->sector);
4476 int blksize = be32_to_cpu(p->blksize);
4477
31890f4a 4478 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4479
4480 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4481
1d53f09e
LE
4482 if (get_ldev(mdev)) {
4483 drbd_rs_complete_io(mdev, sector);
4484 drbd_set_in_sync(mdev, sector, blksize);
4485 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4486 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4487 put_ldev(mdev);
4488 }
b411b363 4489 dec_rs_pending(mdev);
778f271d 4490 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4491
81e84650 4492 return true;
b411b363
PR
4493}
4494
bc9c5c41
AG
4495static int
4496validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4497 struct rb_root *root, const char *func,
4498 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4499{
4500 struct drbd_request *req;
4501 struct bio_and_error m;
4502
87eeee41 4503 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4504 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4505 if (unlikely(!req)) {
87eeee41 4506 spin_unlock_irq(&mdev->tconn->req_lock);
81e84650 4507 return false;
b411b363
PR
4508 }
4509 __req_mod(req, what, &m);
87eeee41 4510 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4511
4512 if (m.bio)
4513 complete_master_bio(mdev, &m);
81e84650 4514 return true;
b411b363
PR
4515}
4516
d8763023 4517static int got_BlockAck(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4518{
257d0af6 4519 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363
PR
4520 sector_t sector = be64_to_cpu(p->sector);
4521 int blksize = be32_to_cpu(p->blksize);
4522 enum drbd_req_event what;
4523
4524 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4525
579b57ed 4526 if (p->block_id == ID_SYNCER) {
b411b363
PR
4527 drbd_set_in_sync(mdev, sector, blksize);
4528 dec_rs_pending(mdev);
81e84650 4529 return true;
b411b363 4530 }
257d0af6 4531 switch (cmd) {
b411b363 4532 case P_RS_WRITE_ACK:
89e58e75 4533 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
8554df1c 4534 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
4535 break;
4536 case P_WRITE_ACK:
89e58e75 4537 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
8554df1c 4538 what = WRITE_ACKED_BY_PEER;
b411b363
PR
4539 break;
4540 case P_RECV_ACK:
89e58e75 4541 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
8554df1c 4542 what = RECV_ACKED_BY_PEER;
b411b363 4543 break;
7be8da07 4544 case P_DISCARD_WRITE:
89e58e75 4545 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
7be8da07
AG
4546 what = DISCARD_WRITE;
4547 break;
4548 case P_RETRY_WRITE:
4549 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
4550 what = POSTPONE_WRITE;
b411b363
PR
4551 break;
4552 default:
4553 D_ASSERT(0);
81e84650 4554 return false;
b411b363
PR
4555 }
4556
4557 return validate_req_change_req_state(mdev, p->block_id, sector,
bc9c5c41
AG
4558 &mdev->write_requests, __func__,
4559 what, false);
b411b363
PR
4560}
4561
d8763023 4562static int got_NegAck(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4563{
257d0af6 4564 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363 4565 sector_t sector = be64_to_cpu(p->sector);
2deb8336 4566 int size = be32_to_cpu(p->blksize);
89e58e75
PR
4567 bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
4568 mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
c3afd8f5 4569 bool found;
b411b363
PR
4570
4571 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4572
579b57ed 4573 if (p->block_id == ID_SYNCER) {
b411b363
PR
4574 dec_rs_pending(mdev);
4575 drbd_rs_failed_io(mdev, sector, size);
81e84650 4576 return true;
b411b363 4577 }
2deb8336 4578
c3afd8f5 4579 found = validate_req_change_req_state(mdev, p->block_id, sector,
bc9c5c41 4580 &mdev->write_requests, __func__,
8554df1c 4581 NEG_ACKED, missing_ok);
c3afd8f5
AG
4582 if (!found) {
4583 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4584 The master bio might already be completed, therefore the
4585 request is no longer in the collision hash. */
4586 /* In Protocol B we might already have got a P_RECV_ACK
4587 but then get a P_NEG_ACK afterwards. */
4588 if (!missing_ok)
2deb8336 4589 return false;
c3afd8f5 4590 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 4591 }
2deb8336 4592 return true;
b411b363
PR
4593}
4594
d8763023 4595static int got_NegDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4596{
257d0af6 4597 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363
PR
4598 sector_t sector = be64_to_cpu(p->sector);
4599
4600 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 4601
b411b363
PR
4602 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4603 (unsigned long long)sector, be32_to_cpu(p->blksize));
4604
4605 return validate_req_change_req_state(mdev, p->block_id, sector,
bc9c5c41 4606 &mdev->read_requests, __func__,
8554df1c 4607 NEG_ACKED, false);
b411b363
PR
4608}
4609
d8763023 4610static int got_NegRSDReply(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363
PR
4611{
4612 sector_t sector;
4613 int size;
257d0af6 4614 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363
PR
4615
4616 sector = be64_to_cpu(p->sector);
4617 size = be32_to_cpu(p->blksize);
b411b363
PR
4618
4619 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4620
4621 dec_rs_pending(mdev);
4622
4623 if (get_ldev_if_state(mdev, D_FAILED)) {
4624 drbd_rs_complete_io(mdev, sector);
257d0af6 4625 switch (cmd) {
d612d309
PR
4626 case P_NEG_RS_DREPLY:
4627 drbd_rs_failed_io(mdev, sector, size);
4628 case P_RS_CANCEL:
4629 break;
4630 default:
4631 D_ASSERT(0);
4632 put_ldev(mdev);
4633 return false;
4634 }
b411b363
PR
4635 put_ldev(mdev);
4636 }
4637
81e84650 4638 return true;
b411b363
PR
4639}
4640
d8763023 4641static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4642{
257d0af6 4643 struct p_barrier_ack *p = &mdev->tconn->meta.rbuf.barrier_ack;
b411b363 4644
2f5cdd0b 4645 tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 4646
c4752ef1
PR
4647 if (mdev->state.conn == C_AHEAD &&
4648 atomic_read(&mdev->ap_in_flight) == 0 &&
370a43e7
PR
4649 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4650 mdev->start_resync_timer.expires = jiffies + HZ;
4651 add_timer(&mdev->start_resync_timer);
c4752ef1
PR
4652 }
4653
81e84650 4654 return true;
b411b363
PR
4655}
4656
d8763023 4657static int got_OVResult(struct drbd_conf *mdev, enum drbd_packet cmd)
b411b363 4658{
257d0af6 4659 struct p_block_ack *p = &mdev->tconn->meta.rbuf.block_ack;
b411b363
PR
4660 struct drbd_work *w;
4661 sector_t sector;
4662 int size;
4663
4664 sector = be64_to_cpu(p->sector);
4665 size = be32_to_cpu(p->blksize);
4666
4667 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4668
4669 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 4670 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 4671 else
8f7bed77 4672 ov_out_of_sync_print(mdev);
b411b363 4673
1d53f09e 4674 if (!get_ldev(mdev))
81e84650 4675 return true;
1d53f09e 4676
b411b363
PR
4677 drbd_rs_complete_io(mdev, sector);
4678 dec_rs_pending(mdev);
4679
ea5442af
LE
4680 --mdev->ov_left;
4681
4682 /* let's advance progress step marks only for every other megabyte */
4683 if ((mdev->ov_left & 0x200) == 0x200)
4684 drbd_advance_rs_marks(mdev, mdev->ov_left);
4685
4686 if (mdev->ov_left == 0) {
b411b363
PR
4687 w = kmalloc(sizeof(*w), GFP_NOIO);
4688 if (w) {
4689 w->cb = w_ov_finished;
a21e9298 4690 w->mdev = mdev;
e42325a5 4691 drbd_queue_work_front(&mdev->tconn->data.work, w);
b411b363
PR
4692 } else {
4693 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 4694 ov_out_of_sync_print(mdev);
b411b363
PR
4695 drbd_resync_finished(mdev);
4696 }
4697 }
1d53f09e 4698 put_ldev(mdev);
81e84650 4699 return true;
b411b363
PR
4700}
4701
d8763023 4702static int got_skip(struct drbd_conf *mdev, enum drbd_packet cmd)
0ced55a3 4703{
81e84650 4704 return true;
0ced55a3
PR
4705}
4706
32862ec7
PR
4707static int tconn_process_done_ee(struct drbd_tconn *tconn)
4708{
082a3439
PR
4709 struct drbd_conf *mdev;
4710 int i, not_empty = 0;
32862ec7
PR
4711
4712 do {
4713 clear_bit(SIGNAL_ASENDER, &tconn->flags);
4714 flush_signals(current);
082a3439 4715 idr_for_each_entry(&tconn->volumes, mdev, i) {
e2b3032b 4716 if (drbd_process_done_ee(mdev))
082a3439
PR
4717 return 1; /* error */
4718 }
32862ec7 4719 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
4720
4721 spin_lock_irq(&tconn->req_lock);
4722 idr_for_each_entry(&tconn->volumes, mdev, i) {
4723 not_empty = !list_empty(&mdev->done_ee);
4724 if (not_empty)
4725 break;
4726 }
4727 spin_unlock_irq(&tconn->req_lock);
32862ec7
PR
4728 } while (not_empty);
4729
4730 return 0;
4731}
4732
7201b972
AG
4733struct asender_cmd {
4734 size_t pkt_size;
a4fbda8e
PR
4735 enum mdev_or_conn fa_type; /* first argument's type */
4736 union {
4737 int (*mdev_fn)(struct drbd_conf *mdev, enum drbd_packet cmd);
4738 int (*conn_fn)(struct drbd_tconn *tconn, enum drbd_packet cmd);
4739 };
7201b972
AG
4740};
4741
4742static struct asender_cmd asender_tbl[] = {
f19e4f8b
PR
4743 [P_PING] = { sizeof(struct p_header), CONN, { .conn_fn = got_Ping } },
4744 [P_PING_ACK] = { sizeof(struct p_header), CONN, { .conn_fn = got_PingAck } },
a4fbda8e
PR
4745 [P_RECV_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4746 [P_WRITE_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4747 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4748 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
4749 [P_NEG_ACK] = { sizeof(struct p_block_ack), MDEV, { got_NegAck } },
4750 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), MDEV, { got_NegDReply } },
4751 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), MDEV, { got_NegRSDReply } },
4752 [P_OV_RESULT] = { sizeof(struct p_block_ack), MDEV, { got_OVResult } },
4753 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), MDEV, { got_BarrierAck } },
4754 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), MDEV, { got_RqSReply } },
4755 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), MDEV, { got_IsInSync } },
4756 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), MDEV, { got_skip } },
4757 [P_RS_CANCEL] = { sizeof(struct p_block_ack), MDEV, { got_NegRSDReply } },
e4f78ede 4758 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), CONN, {.conn_fn = got_conn_RqSReply}},
a4fbda8e 4759 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), MDEV, { got_BlockAck } },
7201b972
AG
4760};
4761
b411b363
PR
4762int drbd_asender(struct drbd_thread *thi)
4763{
392c8801 4764 struct drbd_tconn *tconn = thi->tconn;
32862ec7 4765 struct p_header *h = &tconn->meta.rbuf.header;
b411b363 4766 struct asender_cmd *cmd = NULL;
77351055 4767 struct packet_info pi;
257d0af6 4768 int rv;
b411b363
PR
4769 void *buf = h;
4770 int received = 0;
257d0af6 4771 int expect = sizeof(struct p_header);
f36af18c 4772 int ping_timeout_active = 0;
b411b363 4773
b411b363
PR
4774 current->policy = SCHED_RR; /* Make this a realtime task! */
4775 current->rt_priority = 2; /* more important than all other tasks */
4776
e77a0a5c 4777 while (get_t_state(thi) == RUNNING) {
80822284 4778 drbd_thread_current_set_cpu(thi);
32862ec7 4779 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
2a67d8b9 4780 if (!drbd_send_ping(tconn)) {
32862ec7 4781 conn_err(tconn, "drbd_send_ping has failed\n");
841ce241
AG
4782 goto reconnect;
4783 }
32862ec7
PR
4784 tconn->meta.socket->sk->sk_rcvtimeo =
4785 tconn->net_conf->ping_timeo*HZ/10;
f36af18c 4786 ping_timeout_active = 1;
b411b363
PR
4787 }
4788
32862ec7
PR
4789 /* TODO: conditionally cork; it may hurt latency if we cork without
4790 much to send */
4791 if (!tconn->net_conf->no_cork)
4792 drbd_tcp_cork(tconn->meta.socket);
082a3439
PR
4793 if (tconn_process_done_ee(tconn)) {
4794 conn_err(tconn, "tconn_process_done_ee() failed\n");
32862ec7 4795 goto reconnect;
082a3439 4796 }
b411b363 4797 /* but unconditionally uncork unless disabled */
32862ec7
PR
4798 if (!tconn->net_conf->no_cork)
4799 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
4800
4801 /* short circuit, recv_msg would return EINTR anyways. */
4802 if (signal_pending(current))
4803 continue;
4804
32862ec7
PR
4805 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
4806 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
4807
4808 flush_signals(current);
4809
4810 /* Note:
4811 * -EINTR (on meta) we got a signal
4812 * -EAGAIN (on meta) rcvtimeo expired
4813 * -ECONNRESET other side closed the connection
4814 * -ERESTARTSYS (on data) we got a signal
4815 * rv < 0 other than above: unexpected error!
4816 * rv == expected: full header or command
4817 * rv < expected: "woken" by signal during receive
4818 * rv == 0 : "connection shut down by peer"
4819 */
4820 if (likely(rv > 0)) {
4821 received += rv;
4822 buf += rv;
4823 } else if (rv == 0) {
32862ec7 4824 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
4825 goto reconnect;
4826 } else if (rv == -EAGAIN) {
cb6518cb
LE
4827 /* If the data socket received something meanwhile,
4828 * that is good enough: peer is still alive. */
32862ec7
PR
4829 if (time_after(tconn->last_received,
4830 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 4831 continue;
f36af18c 4832 if (ping_timeout_active) {
32862ec7 4833 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
4834 goto reconnect;
4835 }
32862ec7 4836 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
4837 continue;
4838 } else if (rv == -EINTR) {
4839 continue;
4840 } else {
32862ec7 4841 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
4842 goto reconnect;
4843 }
4844
4845 if (received == expect && cmd == NULL) {
8172f3e9 4846 if (decode_header(tconn, h, &pi))
b411b363 4847 goto reconnect;
7201b972
AG
4848 cmd = &asender_tbl[pi.cmd];
4849 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd) {
32862ec7 4850 conn_err(tconn, "unknown command %d on meta (l: %d)\n",
77351055 4851 pi.cmd, pi.size);
b411b363
PR
4852 goto disconnect;
4853 }
4854 expect = cmd->pkt_size;
77351055 4855 if (pi.size != expect - sizeof(struct p_header)) {
32862ec7 4856 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 4857 pi.cmd, pi.size);
b411b363 4858 goto reconnect;
257d0af6 4859 }
b411b363
PR
4860 }
4861 if (received == expect) {
a4fbda8e
PR
4862 bool rv;
4863
4864 if (cmd->fa_type == CONN) {
4865 rv = cmd->conn_fn(tconn, pi.cmd);
4866 } else {
4867 struct drbd_conf *mdev = vnr_to_mdev(tconn, pi.vnr);
4868 rv = cmd->mdev_fn(mdev, pi.cmd);
4869 }
4870
4871 if (!rv)
b411b363
PR
4872 goto reconnect;
4873
a4fbda8e
PR
4874 tconn->last_received = jiffies;
4875
f36af18c
LE
4876 /* the idle_timeout (ping-int)
4877 * has been restored in got_PingAck() */
7201b972 4878 if (cmd == &asender_tbl[P_PING_ACK])
f36af18c
LE
4879 ping_timeout_active = 0;
4880
b411b363
PR
4881 buf = h;
4882 received = 0;
257d0af6 4883 expect = sizeof(struct p_header);
b411b363
PR
4884 cmd = NULL;
4885 }
4886 }
4887
4888 if (0) {
4889reconnect:
bbeb641c 4890 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
b411b363
PR
4891 }
4892 if (0) {
4893disconnect:
bbeb641c 4894 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 4895 }
32862ec7 4896 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 4897
32862ec7 4898 conn_info(tconn, "asender terminated\n");
b411b363
PR
4899
4900 return 0;
4901}