]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
Merge branch 'stable/for-jens-3.10' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
e2857216
AG
53 unsigned int size;
54 unsigned int vnr;
e658983a 55 void *data;
77351055
PR
56};
57
b411b363
PR
58enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
6038178e 64static int drbd_do_features(struct drbd_tconn *tconn);
13e6037d 65static int drbd_do_auth(struct drbd_tconn *tconn);
c141ebda 66static int drbd_disconnected(struct drbd_conf *mdev);
b411b363 67
1e9dd291 68static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
99920dc5 69static int e_end_block(struct drbd_work *, int);
b411b363 70
b411b363
PR
71
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
45bb912b
LE
74/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
23ce4227
PR
92
93 if (!page)
94 return NULL;
95
45bb912b
LE
96 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
18c2d522
AG
153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
b411b363
PR
155{
156 struct page *page = NULL;
45bb912b 157 struct page *tmp = NULL;
18c2d522 158 unsigned int i = 0;
b411b363
PR
159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
45bb912b 162 if (drbd_pp_vacant >= number) {
b411b363 163 spin_lock(&drbd_pp_lock);
45bb912b
LE
164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
b411b363 167 spin_unlock(&drbd_pp_lock);
45bb912b
LE
168 if (page)
169 return page;
b411b363 170 }
45bb912b 171
b411b363
PR
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
45bb912b
LE
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
c37c8ecf 187 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
b411b363
PR
197}
198
a990be46
AG
199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
b411b363 201{
db830c46 202 struct drbd_peer_request *peer_req;
b411b363
PR
203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46 211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
045417f7 212 if (drbd_peer_req_has_active_page(peer_req))
b411b363
PR
213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
db830c46 221 struct drbd_peer_request *peer_req, *t;
b411b363 222
87eeee41 223 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
87eeee41 225 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 226
db830c46 227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 228 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
229}
230
231/**
c37c8ecf 232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b411b363 233 * @mdev: DRBD device.
45bb912b
LE
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
236 *
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 240 *
45bb912b 241 * Returns a page chain linked via page->private.
b411b363 242 */
c37c8ecf
AG
243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
b411b363
PR
245{
246 struct page *page = NULL;
44ed167d 247 struct net_conf *nc;
b411b363 248 DEFINE_WAIT(wait);
44ed167d 249 int mxb;
b411b363 250
45bb912b
LE
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
44ed167d
PR
253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
18c2d522 259 page = __drbd_alloc_pages(mdev, number);
b411b363 260
45bb912b 261 while (page == NULL) {
b411b363
PR
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
44ed167d 266 if (atomic_read(&mdev->pp_in_use) < mxb) {
18c2d522 267 page = __drbd_alloc_pages(mdev, number);
b411b363
PR
268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
c37c8ecf 276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
b411b363
PR
277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
45bb912b
LE
284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
286 return page;
287}
288
c37c8ecf 289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
87eeee41 290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
5cc287e0 293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 294{
435f0740 295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 296 int i;
435f0740 297
a73ff323
LE
298 if (page == NULL)
299 return;
300
81a5d60e 301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
b411b363 310 }
435f0740 311 i = atomic_sub_return(i, a);
45bb912b 312 if (i < 0)
435f0740
LE
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
3967deb1 323 drbd_free_peer_req()
0db55363 324 drbd_alloc_peer_req()
7721f567 325 drbd_free_peer_reqs()
b411b363 326 drbd_ee_fix_bhs()
a990be46 327 drbd_finish_peer_reqs()
b411b363
PR
328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
f6ffca9f 332struct drbd_peer_request *
0db55363
AG
333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 335{
db830c46 336 struct drbd_peer_request *peer_req;
a73ff323 337 struct page *page = NULL;
45bb912b 338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 339
0cf9d27e 340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
341 return NULL;
342
db830c46
AG
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
b411b363 345 if (!(gfp_mask & __GFP_NOWARN))
0db55363 346 dev_err(DEV, "%s: allocation failed\n", __func__);
b411b363
PR
347 return NULL;
348 }
349
a73ff323 350 if (data_size) {
81a3537a 351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
a73ff323
LE
352 if (!page)
353 goto fail;
354 }
b411b363 355
db830c46
AG
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
361
362 peer_req->epoch = NULL;
a21e9298 363 peer_req->w.mdev = mdev;
db830c46
AG
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
9a8e7753
AG
367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
db830c46 371 peer_req->block_id = id;
b411b363 372
db830c46 373 return peer_req;
b411b363 374
45bb912b 375 fail:
db830c46 376 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
377 return NULL;
378}
379
3967deb1 380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 381 int is_net)
b411b363 382{
db830c46
AG
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
5cc287e0 385 drbd_free_pages(mdev, peer_req->pages, is_net);
db830c46
AG
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
389}
390
7721f567 391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
b411b363
PR
392{
393 LIST_HEAD(work_list);
db830c46 394 struct drbd_peer_request *peer_req, *t;
b411b363 395 int count = 0;
435f0740 396 int is_net = list == &mdev->net_ee;
b411b363 397
87eeee41 398 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 399 list_splice_init(list, &work_list);
87eeee41 400 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 401
db830c46 402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
3967deb1 403 __drbd_free_peer_req(mdev, peer_req, is_net);
b411b363
PR
404 count++;
405 }
406 return count;
407}
408
b411b363 409/*
a990be46 410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 411 */
a990be46 412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
db830c46 416 struct drbd_peer_request *peer_req, *t;
e2b3032b 417 int err = 0;
b411b363 418
87eeee41 419 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
b411b363 421 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 422 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 423
db830c46 424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 425 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
426
427 /* possible callbacks here:
d4dabbe2 428 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
429 * all ignore the last argument.
430 */
db830c46 431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
432 int err2;
433
b411b363 434 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
3967deb1 438 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
439 }
440 wake_up(&mdev->ee_wait);
441
e2b3032b 442 return err;
b411b363
PR
443}
444
d4da1537
AG
445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
b411b363
PR
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 454 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 455 io_schedule();
b411b363 456 finish_wait(&mdev->ee_wait, &wait);
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
458 }
459}
460
d4da1537
AG
461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
b411b363 463{
87eeee41 464 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 465 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 466 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
467}
468
dbd9eea0 469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
de0ff338 491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363 492{
b411b363
PR
493 int rv;
494
1393b59f 495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
b411b363 496
dbd0820c
PR
497 if (rv < 0) {
498 if (rv == -ECONNRESET)
155522df 499 conn_info(tconn, "sock was reset by peer\n");
dbd0820c 500 else if (rv != -ERESTARTSYS)
155522df 501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
dbd0820c 502 } else if (rv == 0) {
b66623e3
PR
503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
599377ac
PR
511 if (t)
512 goto out;
513 }
b66623e3 514 conn_info(tconn, "sock was shut down by peer\n");
599377ac
PR
515 }
516
b411b363 517 if (rv != size)
bbeb641c 518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 519
599377ac 520out:
b411b363
PR
521 return rv;
522}
523
c6967746
AG
524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
a5c31904
AG
537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
5dbf1673
LE
547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
eac3e990 566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
44ed167d
PR
571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
69ef82de 574 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
575 int disconnect_on_error = 1;
576
44ed167d
PR
577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
b411b363 581 return NULL;
44ed167d 582 }
44ed167d
PR
583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
69ef82de 585 connect_int = nc->connect_int;
089c075d 586 rcu_read_unlock();
44ed167d 587
089c075d
AG
588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
44ed167d 590
089c075d 591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
44ed167d
PR
592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
089c075d
AG
596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
b411b363
PR
598
599 what = "sock_create_kern";
44ed167d
PR
600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
69ef82de 608 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
b411b363 618 what = "bind before connect";
44ed167d 619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
44ed167d 627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
eac3e990 645 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
646 }
647 if (disconnect_on_error)
bbeb641c 648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 649 }
44ed167d 650
b411b363
PR
651 return sock;
652}
653
7a426fd8
PR
654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
715306f6 662static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
663{
664 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 665 void (*state_change)(struct sock *sk);
7a426fd8 666
715306f6
AG
667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
7a426fd8
PR
671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 674{
1f3e509b 675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 676 struct sockaddr_in6 my_addr;
1f3e509b 677 struct socket *s_listen;
44ed167d 678 struct net_conf *nc;
b411b363
PR
679 const char *what;
680
44ed167d
PR
681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
7a426fd8 685 return -EIO;
44ed167d 686 }
44ed167d
PR
687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
44ed167d 689 rcu_read_unlock();
b411b363 690
089c075d
AG
691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
b411b363
PR
693
694 what = "sock_create_kern";
44ed167d 695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
98683650 702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
704
705 what = "bind before listen";
44ed167d 706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
707 if (err < 0)
708 goto out;
709
7a426fd8
PR
710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 713 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 716
2820fd39
PR
717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
7a426fd8 722 return 0;
b411b363
PR
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b
PR
728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
b411b363 732
7a426fd8 733 return -EIO;
b411b363
PR
734}
735
715306f6 736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 737{
715306f6
AG
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
742}
743
7a426fd8 744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 745{
1f3e509b
PR
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
1f3e509b
PR
748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
38b682b2
AM
760 /* 28.5% random jitter */
761 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 762
7a426fd8
PR
763 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 if (err <= 0)
765 return NULL;
b411b363 766
7a426fd8 767 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
768 if (err < 0) {
769 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b 770 conn_err(tconn, "accept failed, err = %d\n", err);
bbeb641c 771 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
772 }
773 }
b411b363 774
715306f6
AG
775 if (s_estab)
776 unregister_state_change(s_estab->sk, ad);
b411b363 777
b411b363
PR
778 return s_estab;
779}
b411b363 780
e658983a 781static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
b411b363 782
9f5bdc33
AG
783static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 enum drbd_packet cmd)
785{
786 if (!conn_prepare_command(tconn, sock))
787 return -EIO;
e658983a 788 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
b411b363
PR
789}
790
9f5bdc33 791static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
b411b363 792{
9f5bdc33
AG
793 unsigned int header_size = drbd_header_size(tconn);
794 struct packet_info pi;
795 int err;
b411b363 796
9f5bdc33
AG
797 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 if (err != header_size) {
799 if (err >= 0)
800 err = -EIO;
801 return err;
802 }
803 err = decode_header(tconn, tconn->data.rbuf, &pi);
804 if (err)
805 return err;
806 return pi.cmd;
b411b363
PR
807}
808
809/**
810 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
811 * @sock: pointer to the pointer to the socket.
812 */
dbd9eea0 813static int drbd_socket_okay(struct socket **sock)
b411b363
PR
814{
815 int rr;
816 char tb[4];
817
818 if (!*sock)
81e84650 819 return false;
b411b363 820
dbd9eea0 821 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
822
823 if (rr > 0 || rr == -EAGAIN) {
81e84650 824 return true;
b411b363
PR
825 } else {
826 sock_release(*sock);
827 *sock = NULL;
81e84650 828 return false;
b411b363
PR
829 }
830}
2325eb66
PR
831/* Gets called if a connection is established, or if a new minor gets created
832 in a connection */
c141ebda 833int drbd_connected(struct drbd_conf *mdev)
907599e0 834{
0829f5ed 835 int err;
907599e0
PR
836
837 atomic_set(&mdev->packet_seq, 0);
838 mdev->peer_seq = 0;
839
8410da8f
PR
840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 &mdev->tconn->cstate_mutex :
842 &mdev->own_state_mutex;
843
0829f5ed
AG
844 err = drbd_send_sync_param(mdev);
845 if (!err)
846 err = drbd_send_sizes(mdev, 0, 0);
847 if (!err)
848 err = drbd_send_uuids(mdev);
849 if (!err)
43de7c85 850 err = drbd_send_current_state(mdev);
907599e0
PR
851 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 clear_bit(RESIZE_PENDING, &mdev->flags);
2d56a974 853 atomic_set(&mdev->ap_in_flight, 0);
8b924f1d 854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 855 return err;
907599e0 856}
b411b363
PR
857
858/*
859 * return values:
860 * 1 yes, we have a valid connection
861 * 0 oops, did not work out, please try again
862 * -1 peer talks different language,
863 * no point in trying again, please go standalone.
864 * -2 We do not have a network config...
865 */
81fa2e67 866static int conn_connect(struct drbd_tconn *tconn)
b411b363 867{
7da35862 868 struct drbd_socket sock, msock;
c141ebda 869 struct drbd_conf *mdev;
44ed167d 870 struct net_conf *nc;
92f14951 871 int vnr, timeout, h, ok;
08b165ba 872 bool discard_my_data;
197296ff 873 enum drbd_state_rv rv;
7a426fd8
PR
874 struct accept_wait_data ad = {
875 .tconn = tconn,
876 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 };
b411b363 878
b66623e3 879 clear_bit(DISCONNECT_SENT, &tconn->flags);
bbeb641c 880 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
881 return -2;
882
7da35862
PR
883 mutex_init(&sock.mutex);
884 sock.sbuf = tconn->data.sbuf;
885 sock.rbuf = tconn->data.rbuf;
886 sock.socket = NULL;
887 mutex_init(&msock.mutex);
888 msock.sbuf = tconn->meta.sbuf;
889 msock.rbuf = tconn->meta.rbuf;
890 msock.socket = NULL;
891
0916e0e3
AG
892 /* Assume that the peer only understands protocol 80 until we know better. */
893 tconn->agreed_pro_version = 80;
b411b363 894
7a426fd8
PR
895 if (prepare_listen_socket(tconn, &ad))
896 return 0;
b411b363
PR
897
898 do {
2bf89621 899 struct socket *s;
b411b363 900
92f14951 901 s = drbd_try_connect(tconn);
b411b363 902 if (s) {
7da35862
PR
903 if (!sock.socket) {
904 sock.socket = s;
905 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 } else if (!msock.socket) {
427c0434 907 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862
PR
908 msock.socket = s;
909 send_first_packet(tconn, &msock, P_INITIAL_META);
b411b363 910 } else {
81fa2e67 911 conn_err(tconn, "Logic error in conn_connect()\n");
b411b363
PR
912 goto out_release_sockets;
913 }
914 }
915
7da35862
PR
916 if (sock.socket && msock.socket) {
917 rcu_read_lock();
918 nc = rcu_dereference(tconn->net_conf);
919 timeout = nc->ping_timeo * HZ / 10;
920 rcu_read_unlock();
921 schedule_timeout_interruptible(timeout);
922 ok = drbd_socket_okay(&sock.socket);
923 ok = drbd_socket_okay(&msock.socket) && ok;
b411b363
PR
924 if (ok)
925 break;
926 }
927
928retry:
7a426fd8 929 s = drbd_wait_for_connect(tconn, &ad);
b411b363 930 if (s) {
92f14951 931 int fp = receive_first_packet(tconn, s);
7da35862
PR
932 drbd_socket_okay(&sock.socket);
933 drbd_socket_okay(&msock.socket);
92f14951 934 switch (fp) {
e5d6f33a 935 case P_INITIAL_DATA:
7da35862 936 if (sock.socket) {
907599e0 937 conn_warn(tconn, "initial packet S crossed\n");
7da35862 938 sock_release(sock.socket);
80c6eed4
PR
939 sock.socket = s;
940 goto randomize;
b411b363 941 }
7da35862 942 sock.socket = s;
b411b363 943 break;
e5d6f33a 944 case P_INITIAL_META:
427c0434 945 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862 946 if (msock.socket) {
907599e0 947 conn_warn(tconn, "initial packet M crossed\n");
7da35862 948 sock_release(msock.socket);
80c6eed4
PR
949 msock.socket = s;
950 goto randomize;
b411b363 951 }
7da35862 952 msock.socket = s;
b411b363
PR
953 break;
954 default:
907599e0 955 conn_warn(tconn, "Error receiving initial packet\n");
b411b363 956 sock_release(s);
80c6eed4 957randomize:
38b682b2 958 if (prandom_u32() & 1)
b411b363
PR
959 goto retry;
960 }
961 }
962
bbeb641c 963 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
964 goto out_release_sockets;
965 if (signal_pending(current)) {
966 flush_signals(current);
967 smp_rmb();
907599e0 968 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
969 goto out_release_sockets;
970 }
971
b666dbf8
PR
972 ok = drbd_socket_okay(&sock.socket);
973 ok = drbd_socket_okay(&msock.socket) && ok;
974 } while (!ok);
b411b363 975
7a426fd8
PR
976 if (ad.s_listen)
977 sock_release(ad.s_listen);
b411b363 978
98683650
PR
979 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 981
7da35862
PR
982 sock.socket->sk->sk_allocation = GFP_NOIO;
983 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 984
7da35862
PR
985 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 987
b411b363 988 /* NOT YET ...
7da35862
PR
989 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 991 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 992 * which we set to 4x the configured ping_timeout. */
44ed167d
PR
993 rcu_read_lock();
994 nc = rcu_dereference(tconn->net_conf);
995
7da35862
PR
996 sock.socket->sk->sk_sndtimeo =
997 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 998
7da35862 999 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1000 timeout = nc->timeout * HZ / 10;
08b165ba 1001 discard_my_data = nc->discard_my_data;
44ed167d 1002 rcu_read_unlock();
b411b363 1003
7da35862 1004 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1005
1006 /* we don't want delays.
25985edc 1007 * we use TCP_CORK where appropriate, though */
7da35862
PR
1008 drbd_tcp_nodelay(sock.socket);
1009 drbd_tcp_nodelay(msock.socket);
b411b363 1010
7da35862
PR
1011 tconn->data.socket = sock.socket;
1012 tconn->meta.socket = msock.socket;
907599e0 1013 tconn->last_received = jiffies;
b411b363 1014
6038178e 1015 h = drbd_do_features(tconn);
b411b363
PR
1016 if (h <= 0)
1017 return h;
1018
907599e0 1019 if (tconn->cram_hmac_tfm) {
b411b363 1020 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 1021 switch (drbd_do_auth(tconn)) {
b10d96cb 1022 case -1:
907599e0 1023 conn_err(tconn, "Authentication of peer failed\n");
b411b363 1024 return -1;
b10d96cb 1025 case 0:
907599e0 1026 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 1027 return 0;
b411b363
PR
1028 }
1029 }
1030
7da35862
PR
1031 tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1033
387eb308 1034 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 1035 return -1;
b411b363 1036
a1096a6e
PR
1037 set_bit(STATE_SENT, &tconn->flags);
1038
c141ebda
PR
1039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref);
13c76aba
PR
1042 /* Prevent a race between resync-handshake and
1043 * being promoted to Primary.
1044 *
1045 * Grab and release the state mutex, so we know that any current
1046 * drbd_set_role() is finished, and any incoming drbd_set_role
1047 * will see the STATE_SENT flag, and wait for it to be cleared.
1048 */
1049 mutex_lock(mdev->state_mutex);
1050 mutex_unlock(mdev->state_mutex);
1051
c141ebda 1052 rcu_read_unlock();
08b165ba
PR
1053
1054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
c141ebda
PR
1059 drbd_connected(mdev);
1060 kref_put(&mdev->kref, &drbd_minor_destroy);
1061 rcu_read_lock();
1062 }
1063 rcu_read_unlock();
1064
a1096a6e 1065 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
ed635cb0 1066 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
a1096a6e 1067 clear_bit(STATE_SENT, &tconn->flags);
1e86ac48 1068 return 0;
a1096a6e 1069 }
1e86ac48 1070
823bd832 1071 drbd_thread_start(&tconn->asender);
b411b363 1072
08b165ba
PR
1073 mutex_lock(&tconn->conf_update);
1074 /* The discard_my_data flag is a single-shot modifier to the next
1075 * connection attempt, the handshake of which is now well underway.
1076 * No need for rcu style copying of the whole struct
1077 * just to clear a single value. */
1078 tconn->net_conf->discard_my_data = 0;
1079 mutex_unlock(&tconn->conf_update);
1080
d3fcb490 1081 return h;
b411b363
PR
1082
1083out_release_sockets:
7a426fd8
PR
1084 if (ad.s_listen)
1085 sock_release(ad.s_listen);
7da35862
PR
1086 if (sock.socket)
1087 sock_release(sock.socket);
1088 if (msock.socket)
1089 sock_release(msock.socket);
b411b363
PR
1090 return -1;
1091}
1092
e658983a 1093static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
b411b363 1094{
e658983a
AG
1095 unsigned int header_size = drbd_header_size(tconn);
1096
0c8e36d9
AG
1097 if (header_size == sizeof(struct p_header100) &&
1098 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 struct p_header100 *h = header;
1100 if (h->pad != 0) {
1101 conn_err(tconn, "Header padding is not zero\n");
1102 return -EINVAL;
1103 }
1104 pi->vnr = be16_to_cpu(h->volume);
1105 pi->cmd = be16_to_cpu(h->command);
1106 pi->size = be32_to_cpu(h->length);
1107 } else if (header_size == sizeof(struct p_header95) &&
1108 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1109 struct p_header95 *h = header;
e658983a 1110 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1111 pi->size = be32_to_cpu(h->length);
1112 pi->vnr = 0;
e658983a
AG
1113 } else if (header_size == sizeof(struct p_header80) &&
1114 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 struct p_header80 *h = header;
1116 pi->cmd = be16_to_cpu(h->command);
1117 pi->size = be16_to_cpu(h->length);
77351055 1118 pi->vnr = 0;
02918be2 1119 } else {
e658983a
AG
1120 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 be32_to_cpu(*(__be32 *)header),
1122 tconn->agreed_pro_version);
8172f3e9 1123 return -EINVAL;
b411b363 1124 }
e658983a 1125 pi->data = header + header_size;
8172f3e9 1126 return 0;
257d0af6 1127}
b411b363 1128
9ba7aa00 1129static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 1130{
e658983a 1131 void *buffer = tconn->data.rbuf;
69bc7bc3 1132 int err;
257d0af6 1133
e658983a 1134 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
a5c31904 1135 if (err)
69bc7bc3 1136 return err;
257d0af6 1137
e658983a 1138 err = decode_header(tconn, buffer, pi);
9ba7aa00 1139 tconn->last_received = jiffies;
b411b363 1140
69bc7bc3 1141 return err;
b411b363
PR
1142}
1143
4b0007c0 1144static void drbd_flush(struct drbd_tconn *tconn)
b411b363
PR
1145{
1146 int rv;
4b0007c0
PR
1147 struct drbd_conf *mdev;
1148 int vnr;
1149
1150 if (tconn->write_ordering >= WO_bdev_flush) {
615e087f 1151 rcu_read_lock();
4b0007c0 1152 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
615e087f
LE
1153 if (!get_ldev(mdev))
1154 continue;
1155 kref_get(&mdev->kref);
1156 rcu_read_unlock();
1157
1158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 GFP_NOIO, NULL);
1160 if (rv) {
1161 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163 * don't try again for ANY return value != 0
1164 * if (rv == -EOPNOTSUPP) */
1165 drbd_bump_write_ordering(tconn, WO_drain_io);
4b0007c0 1166 }
615e087f
LE
1167 put_ldev(mdev);
1168 kref_put(&mdev->kref, &drbd_minor_destroy);
b411b363 1169
615e087f
LE
1170 rcu_read_lock();
1171 if (rv)
1172 break;
b411b363 1173 }
615e087f 1174 rcu_read_unlock();
b411b363 1175 }
b411b363
PR
1176}
1177
1178/**
1179 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180 * @mdev: DRBD device.
1181 * @epoch: Epoch object.
1182 * @ev: Epoch event.
1183 */
1e9dd291 1184static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
b411b363
PR
1185 struct drbd_epoch *epoch,
1186 enum epoch_event ev)
1187{
2451fc3b 1188 int epoch_size;
b411b363 1189 struct drbd_epoch *next_epoch;
b411b363
PR
1190 enum finish_epoch rv = FE_STILL_LIVE;
1191
12038a3a 1192 spin_lock(&tconn->epoch_lock);
b411b363
PR
1193 do {
1194 next_epoch = NULL;
b411b363
PR
1195
1196 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198 switch (ev & ~EV_CLEANUP) {
1199 case EV_PUT:
1200 atomic_dec(&epoch->active);
1201 break;
1202 case EV_GOT_BARRIER_NR:
1203 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1204 break;
1205 case EV_BECAME_LAST:
1206 /* nothing to do*/
1207 break;
1208 }
1209
b411b363
PR
1210 if (epoch_size != 0 &&
1211 atomic_read(&epoch->active) == 0 &&
80f9fd55 1212 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1213 if (!(ev & EV_CLEANUP)) {
12038a3a 1214 spin_unlock(&tconn->epoch_lock);
9ed57dcb 1215 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
12038a3a 1216 spin_lock(&tconn->epoch_lock);
b411b363 1217 }
9ed57dcb
LE
1218#if 0
1219 /* FIXME: dec unacked on connection, once we have
1220 * something to count pending connection packets in. */
80f9fd55 1221 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
9ed57dcb
LE
1222 dec_unacked(epoch->tconn);
1223#endif
b411b363 1224
12038a3a 1225 if (tconn->current_epoch != epoch) {
b411b363
PR
1226 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 list_del(&epoch->list);
1228 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
12038a3a 1229 tconn->epochs--;
b411b363
PR
1230 kfree(epoch);
1231
1232 if (rv == FE_STILL_LIVE)
1233 rv = FE_DESTROYED;
1234 } else {
1235 epoch->flags = 0;
1236 atomic_set(&epoch->epoch_size, 0);
698f9315 1237 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1238 if (rv == FE_STILL_LIVE)
1239 rv = FE_RECYCLED;
1240 }
1241 }
1242
1243 if (!next_epoch)
1244 break;
1245
1246 epoch = next_epoch;
1247 } while (1);
1248
12038a3a 1249 spin_unlock(&tconn->epoch_lock);
b411b363 1250
b411b363
PR
1251 return rv;
1252}
1253
1254/**
1255 * drbd_bump_write_ordering() - Fall back to an other write ordering method
4b0007c0 1256 * @tconn: DRBD connection.
b411b363
PR
1257 * @wo: Write ordering method to try.
1258 */
4b0007c0 1259void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
b411b363 1260{
daeda1cc 1261 struct disk_conf *dc;
4b0007c0 1262 struct drbd_conf *mdev;
b411b363 1263 enum write_ordering_e pwo;
4b0007c0 1264 int vnr;
b411b363
PR
1265 static char *write_ordering_str[] = {
1266 [WO_none] = "none",
1267 [WO_drain_io] = "drain",
1268 [WO_bdev_flush] = "flush",
b411b363
PR
1269 };
1270
4b0007c0 1271 pwo = tconn->write_ordering;
b411b363 1272 wo = min(pwo, wo);
daeda1cc 1273 rcu_read_lock();
4b0007c0 1274 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
27eb13e9 1275 if (!get_ldev_if_state(mdev, D_ATTACHING))
4b0007c0
PR
1276 continue;
1277 dc = rcu_dereference(mdev->ldev->disk_conf);
1278
1279 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 wo = WO_drain_io;
1281 if (wo == WO_drain_io && !dc->disk_drain)
1282 wo = WO_none;
1283 put_ldev(mdev);
1284 }
daeda1cc 1285 rcu_read_unlock();
4b0007c0
PR
1286 tconn->write_ordering = wo;
1287 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
b411b363
PR
1289}
1290
45bb912b 1291/**
fbe29dec 1292 * drbd_submit_peer_request()
45bb912b 1293 * @mdev: DRBD device.
db830c46 1294 * @peer_req: peer request
45bb912b 1295 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1296 *
1297 * May spread the pages to multiple bios,
1298 * depending on bio_add_page restrictions.
1299 *
1300 * Returns 0 if all bios have been submitted,
1301 * -ENOMEM if we could not allocate enough bios,
1302 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303 * single page to an empty bio (which should never happen and likely indicates
1304 * that the lower level IO stack is in some way broken). This has been observed
1305 * on certain Xen deployments.
45bb912b
LE
1306 */
1307/* TODO allocate from our own bio_set. */
fbe29dec
AG
1308int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 struct drbd_peer_request *peer_req,
1310 const unsigned rw, const int fault_type)
45bb912b
LE
1311{
1312 struct bio *bios = NULL;
1313 struct bio *bio;
db830c46
AG
1314 struct page *page = peer_req->pages;
1315 sector_t sector = peer_req->i.sector;
1316 unsigned ds = peer_req->i.size;
45bb912b
LE
1317 unsigned n_bios = 0;
1318 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1319 int err = -ENOMEM;
45bb912b
LE
1320
1321 /* In most cases, we will only need one bio. But in case the lower
1322 * level restrictions happen to be different at this offset on this
1323 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1324 * request in more than one bio.
1325 *
1326 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 * generated bio, but a bio allocated on behalf of the peer.
1328 */
45bb912b
LE
1329next_bio:
1330 bio = bio_alloc(GFP_NOIO, nr_pages);
1331 if (!bio) {
1332 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 goto fail;
1334 }
db830c46 1335 /* > peer_req->i.sector, unless this is the first bio */
45bb912b
LE
1336 bio->bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1338 bio->bi_rw = rw;
db830c46 1339 bio->bi_private = peer_req;
fcefa62e 1340 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1341
1342 bio->bi_next = bios;
1343 bios = bio;
1344 ++n_bios;
1345
1346 page_chain_for_each(page) {
1347 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1349 /* A single page must always be possible!
1350 * But in case it fails anyways,
1351 * we deal with it, and complain (below). */
1352 if (bio->bi_vcnt == 0) {
1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector);
1357 err = -ENOSPC;
1358 goto fail;
1359 }
45bb912b
LE
1360 goto next_bio;
1361 }
1362 ds -= len;
1363 sector += len >> 9;
1364 --nr_pages;
1365 }
1366 D_ASSERT(page == NULL);
1367 D_ASSERT(ds == 0);
1368
db830c46 1369 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1370 do {
1371 bio = bios;
1372 bios = bios->bi_next;
1373 bio->bi_next = NULL;
1374
45bb912b 1375 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1376 } while (bios);
45bb912b
LE
1377 return 0;
1378
1379fail:
1380 while (bios) {
1381 bio = bios;
1382 bios = bios->bi_next;
1383 bio_put(bio);
1384 }
10f6d992 1385 return err;
45bb912b
LE
1386}
1387
53840641 1388static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1389 struct drbd_peer_request *peer_req)
53840641 1390{
db830c46 1391 struct drbd_interval *i = &peer_req->i;
53840641
AG
1392
1393 drbd_remove_interval(&mdev->write_requests, i);
1394 drbd_clear_interval(i);
1395
6c852bec 1396 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1397 if (i->waiting)
1398 wake_up(&mdev->misc_wait);
1399}
1400
77fede51
PR
1401void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402{
1403 struct drbd_conf *mdev;
1404 int vnr;
1405
1406 rcu_read_lock();
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 kref_get(&mdev->kref);
1409 rcu_read_unlock();
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 kref_put(&mdev->kref, &drbd_minor_destroy);
1412 rcu_read_lock();
1413 }
1414 rcu_read_unlock();
1415}
1416
4a76b161 1417static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1418{
2451fc3b 1419 int rv;
e658983a 1420 struct p_barrier *p = pi->data;
b411b363
PR
1421 struct drbd_epoch *epoch;
1422
9ed57dcb
LE
1423 /* FIXME these are unacked on connection,
1424 * not a specific (peer)device.
1425 */
12038a3a 1426 tconn->current_epoch->barrier_nr = p->barrier;
9ed57dcb 1427 tconn->current_epoch->tconn = tconn;
1e9dd291 1428 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1429
1430 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 * the activity log, which means it would not be resynced in case the
1432 * R_PRIMARY crashes now.
1433 * Therefore we must send the barrier_ack after the barrier request was
1434 * completed. */
4b0007c0 1435 switch (tconn->write_ordering) {
b411b363
PR
1436 case WO_none:
1437 if (rv == FE_RECYCLED)
82bc0194 1438 return 0;
2451fc3b
PR
1439
1440 /* receiver context, in the writeout path of the other node.
1441 * avoid potential distributed deadlock */
1442 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 if (epoch)
1444 break;
1445 else
9ed57dcb 1446 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1447 /* Fall through */
b411b363
PR
1448
1449 case WO_bdev_flush:
1450 case WO_drain_io:
77fede51 1451 conn_wait_active_ee_empty(tconn);
4b0007c0 1452 drbd_flush(tconn);
2451fc3b 1453
12038a3a 1454 if (atomic_read(&tconn->current_epoch->epoch_size)) {
2451fc3b
PR
1455 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 if (epoch)
1457 break;
b411b363
PR
1458 }
1459
82bc0194 1460 return 0;
2451fc3b 1461 default:
9ed57dcb 1462 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
82bc0194 1463 return -EIO;
b411b363
PR
1464 }
1465
1466 epoch->flags = 0;
1467 atomic_set(&epoch->epoch_size, 0);
1468 atomic_set(&epoch->active, 0);
1469
12038a3a
PR
1470 spin_lock(&tconn->epoch_lock);
1471 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 list_add(&epoch->list, &tconn->current_epoch->list);
1473 tconn->current_epoch = epoch;
1474 tconn->epochs++;
b411b363
PR
1475 } else {
1476 /* The current_epoch got recycled while we allocated this one... */
1477 kfree(epoch);
1478 }
12038a3a 1479 spin_unlock(&tconn->epoch_lock);
b411b363 1480
82bc0194 1481 return 0;
b411b363
PR
1482}
1483
1484/* used from receive_RSDataReply (recv_resync_read)
1485 * and from receive_Data */
f6ffca9f
AG
1486static struct drbd_peer_request *
1487read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 int data_size) __must_hold(local)
b411b363 1489{
6666032a 1490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1491 struct drbd_peer_request *peer_req;
b411b363 1492 struct page *page;
a5c31904 1493 int dgs, ds, err;
a0638456
PR
1494 void *dig_in = mdev->tconn->int_dig_in;
1495 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1496 unsigned long *data;
b411b363 1497
88104ca4
AG
1498 dgs = 0;
1499 if (mdev->tconn->peer_integrity_tfm) {
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
9f5bdc33
AG
1501 /*
1502 * FIXME: Receive the incoming digest into the receive buffer
1503 * here, together with its struct p_data?
1504 */
a5c31904
AG
1505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 if (err)
b411b363 1507 return NULL;
88104ca4 1508 data_size -= dgs;
b411b363
PR
1509 }
1510
841ce241
AG
1511 if (!expect(IS_ALIGNED(data_size, 512)))
1512 return NULL;
1513 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 return NULL;
b411b363 1515
6666032a
LE
1516 /* even though we trust out peer,
1517 * we sometimes have to double check. */
1518 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1519 dev_err(DEV, "request from peer beyond end of local disk: "
1520 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1521 (unsigned long long)capacity,
1522 (unsigned long long)sector, data_size);
1523 return NULL;
1524 }
1525
b411b363
PR
1526 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 * which in turn might block on the other node at this very place. */
0db55363 1529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
db830c46 1530 if (!peer_req)
b411b363 1531 return NULL;
45bb912b 1532
a73ff323 1533 if (!data_size)
81a3537a 1534 return peer_req;
a73ff323 1535
b411b363 1536 ds = data_size;
db830c46 1537 page = peer_req->pages;
45bb912b
LE
1538 page_chain_for_each(page) {
1539 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1540 data = kmap(page);
a5c31904 1541 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1543 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 data[0] = data[0] ^ (unsigned long)-1;
1545 }
b411b363 1546 kunmap(page);
a5c31904 1547 if (err) {
3967deb1 1548 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1549 return NULL;
1550 }
a5c31904 1551 ds -= len;
b411b363
PR
1552 }
1553
1554 if (dgs) {
5b614abe 1555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
b411b363 1556 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1557 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 (unsigned long long)sector, data_size);
3967deb1 1559 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1560 return NULL;
1561 }
1562 }
1563 mdev->recv_cnt += data_size>>9;
db830c46 1564 return peer_req;
b411b363
PR
1565}
1566
1567/* drbd_drain_block() just takes a data block
1568 * out of the socket input buffer, and discards it.
1569 */
1570static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571{
1572 struct page *page;
a5c31904 1573 int err = 0;
b411b363
PR
1574 void *data;
1575
c3470cde 1576 if (!data_size)
fc5be839 1577 return 0;
c3470cde 1578
c37c8ecf 1579 page = drbd_alloc_pages(mdev, 1, 1);
b411b363
PR
1580
1581 data = kmap(page);
1582 while (data_size) {
fc5be839
AG
1583 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
a5c31904
AG
1585 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 if (err)
b411b363 1587 break;
a5c31904 1588 data_size -= len;
b411b363
PR
1589 }
1590 kunmap(page);
5cc287e0 1591 drbd_free_pages(mdev, page, 0);
fc5be839 1592 return err;
b411b363
PR
1593}
1594
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size)
1597{
1598 struct bio_vec *bvec;
1599 struct bio *bio;
a5c31904 1600 int dgs, err, i, expect;
a0638456
PR
1601 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1603
88104ca4
AG
1604 dgs = 0;
1605 if (mdev->tconn->peer_integrity_tfm) {
1606 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a5c31904
AG
1607 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1608 if (err)
1609 return err;
88104ca4 1610 data_size -= dgs;
b411b363
PR
1611 }
1612
b411b363
PR
1613 /* optimistically update recv_cnt. if receiving fails below,
1614 * we disconnect anyways, and counters will be reset. */
1615 mdev->recv_cnt += data_size>>9;
1616
1617 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector);
1619
1620 bio_for_each_segment(bvec, bio, i) {
a5c31904 1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
b411b363 1622 expect = min_t(int, data_size, bvec->bv_len);
a5c31904 1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
b411b363 1624 kunmap(bvec->bv_page);
a5c31904
AG
1625 if (err)
1626 return err;
1627 data_size -= expect;
b411b363
PR
1628 }
1629
1630 if (dgs) {
5b614abe 1631 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
b411b363
PR
1632 if (memcmp(dig_in, dig_vv, dgs)) {
1633 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1634 return -EINVAL;
b411b363
PR
1635 }
1636 }
1637
1638 D_ASSERT(data_size == 0);
28284cef 1639 return 0;
b411b363
PR
1640}
1641
a990be46
AG
1642/*
1643 * e_end_resync_block() is called in asender context via
1644 * drbd_finish_peer_reqs().
1645 */
99920dc5 1646static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1647{
8050e6d0
AG
1648 struct drbd_peer_request *peer_req =
1649 container_of(w, struct drbd_peer_request, w);
00d56944 1650 struct drbd_conf *mdev = w->mdev;
db830c46 1651 sector_t sector = peer_req->i.sector;
99920dc5 1652 int err;
b411b363 1653
db830c46 1654 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1655
db830c46
AG
1656 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1657 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1658 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1659 } else {
1660 /* Record failure to sync */
db830c46 1661 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1662
99920dc5 1663 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1664 }
1665 dec_unacked(mdev);
1666
99920dc5 1667 return err;
b411b363
PR
1668}
1669
1670static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1671{
db830c46 1672 struct drbd_peer_request *peer_req;
b411b363 1673
db830c46
AG
1674 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1675 if (!peer_req)
45bb912b 1676 goto fail;
b411b363
PR
1677
1678 dec_rs_pending(mdev);
1679
b411b363
PR
1680 inc_unacked(mdev);
1681 /* corresponding dec_unacked() in e_end_resync_block()
1682 * respective _drbd_clear_done_ee */
1683
db830c46 1684 peer_req->w.cb = e_end_resync_block;
45bb912b 1685
87eeee41 1686 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1687 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1688 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1689
0f0601f4 1690 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1691 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1692 return 0;
b411b363 1693
10f6d992
LE
1694 /* don't care for the reason here */
1695 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1696 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1697 list_del(&peer_req->w.list);
87eeee41 1698 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1699
3967deb1 1700 drbd_free_peer_req(mdev, peer_req);
45bb912b
LE
1701fail:
1702 put_ldev(mdev);
e1c1b0fc 1703 return -EIO;
b411b363
PR
1704}
1705
668eebc6 1706static struct drbd_request *
bc9c5c41
AG
1707find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1708 sector_t sector, bool missing_ok, const char *func)
51624585 1709{
51624585
AG
1710 struct drbd_request *req;
1711
bc9c5c41
AG
1712 /* Request object according to our peer */
1713 req = (struct drbd_request *)(unsigned long)id;
5e472264 1714 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1715 return req;
c3afd8f5 1716 if (!missing_ok) {
5af172ed 1717 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
1718 (unsigned long)id, (unsigned long long)sector);
1719 }
51624585 1720 return NULL;
b411b363
PR
1721}
1722
4a76b161 1723static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1724{
4a76b161 1725 struct drbd_conf *mdev;
b411b363
PR
1726 struct drbd_request *req;
1727 sector_t sector;
82bc0194 1728 int err;
e658983a 1729 struct p_data *p = pi->data;
4a76b161
AG
1730
1731 mdev = vnr_to_mdev(tconn, pi->vnr);
1732 if (!mdev)
1733 return -EIO;
b411b363
PR
1734
1735 sector = be64_to_cpu(p->sector);
1736
87eeee41 1737 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1738 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1739 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1740 if (unlikely(!req))
82bc0194 1741 return -EIO;
b411b363 1742
24c4830c 1743 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1744 * special casing it there for the various failure cases.
1745 * still no race with drbd_fail_pending_reads */
e2857216 1746 err = recv_dless_read(mdev, req, sector, pi->size);
82bc0194 1747 if (!err)
8554df1c 1748 req_mod(req, DATA_RECEIVED);
b411b363
PR
1749 /* else: nothing. handled from drbd_disconnect...
1750 * I don't think we may complete this just yet
1751 * in case we are "on-disconnect: freeze" */
1752
82bc0194 1753 return err;
b411b363
PR
1754}
1755
4a76b161 1756static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1757{
4a76b161 1758 struct drbd_conf *mdev;
b411b363 1759 sector_t sector;
82bc0194 1760 int err;
e658983a 1761 struct p_data *p = pi->data;
4a76b161
AG
1762
1763 mdev = vnr_to_mdev(tconn, pi->vnr);
1764 if (!mdev)
1765 return -EIO;
b411b363
PR
1766
1767 sector = be64_to_cpu(p->sector);
1768 D_ASSERT(p->block_id == ID_SYNCER);
1769
1770 if (get_ldev(mdev)) {
1771 /* data is submitted to disk within recv_resync_read.
1772 * corresponding put_ldev done below on error,
fcefa62e 1773 * or in drbd_peer_request_endio. */
e2857216 1774 err = recv_resync_read(mdev, sector, pi->size);
b411b363
PR
1775 } else {
1776 if (__ratelimit(&drbd_ratelimit_state))
1777 dev_err(DEV, "Can not write resync data to local disk.\n");
1778
e2857216 1779 err = drbd_drain_block(mdev, pi->size);
b411b363 1780
e2857216 1781 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
b411b363
PR
1782 }
1783
e2857216 1784 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
778f271d 1785
82bc0194 1786 return err;
b411b363
PR
1787}
1788
7be8da07
AG
1789static void restart_conflicting_writes(struct drbd_conf *mdev,
1790 sector_t sector, int size)
b411b363 1791{
7be8da07
AG
1792 struct drbd_interval *i;
1793 struct drbd_request *req;
1794
1795 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1796 if (!i->local)
1797 continue;
1798 req = container_of(i, struct drbd_request, i);
1799 if (req->rq_state & RQ_LOCAL_PENDING ||
1800 !(req->rq_state & RQ_POSTPONED))
1801 continue;
2312f0b3
LE
1802 /* as it is RQ_POSTPONED, this will cause it to
1803 * be queued on the retry workqueue. */
d4dabbe2 1804 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
1805 }
1806}
b411b363 1807
a990be46
AG
1808/*
1809 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
b411b363 1810 */
99920dc5 1811static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1812{
8050e6d0
AG
1813 struct drbd_peer_request *peer_req =
1814 container_of(w, struct drbd_peer_request, w);
00d56944 1815 struct drbd_conf *mdev = w->mdev;
db830c46 1816 sector_t sector = peer_req->i.sector;
99920dc5 1817 int err = 0, pcmd;
b411b363 1818
303d1448 1819 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 1820 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1821 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1822 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1823 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1824 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1825 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1826 if (pcmd == P_RS_WRITE_ACK)
db830c46 1827 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1828 } else {
99920dc5 1829 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1830 /* we expect it to be marked out of sync anyways...
1831 * maybe assert this? */
1832 }
1833 dec_unacked(mdev);
1834 }
1835 /* we delete from the conflict detection hash _after_ we sent out the
1836 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 1837 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
87eeee41 1838 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1839 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1840 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1841 if (peer_req->flags & EE_RESTART_REQUESTS)
1842 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1843 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1844 } else
db830c46 1845 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1846
1e9dd291 1847 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1848
99920dc5 1849 return err;
b411b363
PR
1850}
1851
7be8da07 1852static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1853{
7be8da07 1854 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1855 struct drbd_peer_request *peer_req =
1856 container_of(w, struct drbd_peer_request, w);
99920dc5 1857 int err;
b411b363 1858
99920dc5 1859 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1860 dec_unacked(mdev);
1861
99920dc5 1862 return err;
b411b363
PR
1863}
1864
d4dabbe2 1865static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 1866{
d4dabbe2 1867 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
1868}
1869
99920dc5 1870static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1871{
1872 struct drbd_tconn *tconn = w->mdev->tconn;
1873
1874 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
d4dabbe2 1875 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 1876}
b411b363 1877
3e394da1
AG
1878static bool seq_greater(u32 a, u32 b)
1879{
1880 /*
1881 * We assume 32-bit wrap-around here.
1882 * For 24-bit wrap-around, we would have to shift:
1883 * a <<= 8; b <<= 8;
1884 */
1885 return (s32)a - (s32)b > 0;
1886}
b411b363 1887
3e394da1
AG
1888static u32 seq_max(u32 a, u32 b)
1889{
1890 return seq_greater(a, b) ? a : b;
b411b363
PR
1891}
1892
7be8da07 1893static bool need_peer_seq(struct drbd_conf *mdev)
b411b363 1894{
7be8da07 1895 struct drbd_tconn *tconn = mdev->tconn;
302bdeae 1896 int tp;
b411b363 1897
7be8da07
AG
1898 /*
1899 * We only need to keep track of the last packet_seq number of our peer
427c0434 1900 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
7be8da07
AG
1901 * handle_write_conflicts().
1902 */
b411b363 1903
302bdeae
PR
1904 rcu_read_lock();
1905 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1906 rcu_read_unlock();
b411b363 1907
427c0434 1908 return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07 1909}
b411b363 1910
43ae077d 1911static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1912{
3c13b680 1913 unsigned int newest_peer_seq;
3e394da1 1914
7be8da07
AG
1915 if (need_peer_seq(mdev)) {
1916 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1917 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1918 mdev->peer_seq = newest_peer_seq;
7be8da07 1919 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1920 /* wake up only if we actually changed mdev->peer_seq */
1921 if (peer_seq == newest_peer_seq)
7be8da07
AG
1922 wake_up(&mdev->seq_wait);
1923 }
b411b363
PR
1924}
1925
d93f6302 1926static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 1927{
d93f6302
LE
1928 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1929}
b6a370ba 1930
d93f6302 1931/* maybe change sync_ee into interval trees as well? */
3ea35df8 1932static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
d93f6302
LE
1933{
1934 struct drbd_peer_request *rs_req;
b6a370ba
PR
1935 bool rv = 0;
1936
d93f6302
LE
1937 spin_lock_irq(&mdev->tconn->req_lock);
1938 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1939 if (overlaps(peer_req->i.sector, peer_req->i.size,
1940 rs_req->i.sector, rs_req->i.size)) {
b6a370ba
PR
1941 rv = 1;
1942 break;
1943 }
1944 }
d93f6302 1945 spin_unlock_irq(&mdev->tconn->req_lock);
b6a370ba
PR
1946
1947 return rv;
1948}
1949
b411b363
PR
1950/* Called from receive_Data.
1951 * Synchronize packets on sock with packets on msock.
1952 *
1953 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1954 * packet traveling on msock, they are still processed in the order they have
1955 * been sent.
1956 *
1957 * Note: we don't care for Ack packets overtaking P_DATA packets.
1958 *
1959 * In case packet_seq is larger than mdev->peer_seq number, there are
1960 * outstanding packets on the msock. We wait for them to arrive.
1961 * In case we are the logically next packet, we update mdev->peer_seq
1962 * ourselves. Correctly handles 32bit wrap around.
1963 *
1964 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1965 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1966 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1967 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1968 *
1969 * returns 0 if we may process the packet,
1970 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1971static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1972{
1973 DEFINE_WAIT(wait);
b411b363 1974 long timeout;
7be8da07
AG
1975 int ret;
1976
1977 if (!need_peer_seq(mdev))
1978 return 0;
1979
b411b363
PR
1980 spin_lock(&mdev->peer_seq_lock);
1981 for (;;) {
7be8da07
AG
1982 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1983 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1984 ret = 0;
b411b363 1985 break;
7be8da07 1986 }
b411b363
PR
1987 if (signal_pending(current)) {
1988 ret = -ERESTARTSYS;
1989 break;
1990 }
7be8da07 1991 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1992 spin_unlock(&mdev->peer_seq_lock);
44ed167d
PR
1993 rcu_read_lock();
1994 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1995 rcu_read_unlock();
71b1c1eb 1996 timeout = schedule_timeout(timeout);
b411b363 1997 spin_lock(&mdev->peer_seq_lock);
7be8da07 1998 if (!timeout) {
b411b363 1999 ret = -ETIMEDOUT;
71b1c1eb 2000 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
2001 break;
2002 }
2003 }
b411b363 2004 spin_unlock(&mdev->peer_seq_lock);
7be8da07 2005 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
2006 return ret;
2007}
2008
688593c5
LE
2009/* see also bio_flags_to_wire()
2010 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2011 * flags and back. We may replicate to other kernel versions. */
2012static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 2013{
688593c5
LE
2014 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2015 (dpf & DP_FUA ? REQ_FUA : 0) |
2016 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2017 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
2018}
2019
7be8da07
AG
2020static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2021 unsigned int size)
2022{
2023 struct drbd_interval *i;
2024
2025 repeat:
2026 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2027 struct drbd_request *req;
2028 struct bio_and_error m;
2029
2030 if (!i->local)
2031 continue;
2032 req = container_of(i, struct drbd_request, i);
2033 if (!(req->rq_state & RQ_POSTPONED))
2034 continue;
2035 req->rq_state &= ~RQ_POSTPONED;
2036 __req_mod(req, NEG_ACKED, &m);
2037 spin_unlock_irq(&mdev->tconn->req_lock);
2038 if (m.bio)
2039 complete_master_bio(mdev, &m);
2040 spin_lock_irq(&mdev->tconn->req_lock);
2041 goto repeat;
2042 }
2043}
2044
2045static int handle_write_conflicts(struct drbd_conf *mdev,
2046 struct drbd_peer_request *peer_req)
2047{
2048 struct drbd_tconn *tconn = mdev->tconn;
427c0434 2049 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07
AG
2050 sector_t sector = peer_req->i.sector;
2051 const unsigned int size = peer_req->i.size;
2052 struct drbd_interval *i;
2053 bool equal;
2054 int err;
2055
2056 /*
2057 * Inserting the peer request into the write_requests tree will prevent
2058 * new conflicting local requests from being added.
2059 */
2060 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2061
2062 repeat:
2063 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2064 if (i == &peer_req->i)
2065 continue;
2066
2067 if (!i->local) {
2068 /*
2069 * Our peer has sent a conflicting remote request; this
2070 * should not happen in a two-node setup. Wait for the
2071 * earlier peer request to complete.
2072 */
2073 err = drbd_wait_misc(mdev, i);
2074 if (err)
2075 goto out;
2076 goto repeat;
2077 }
2078
2079 equal = i->sector == sector && i->size == size;
2080 if (resolve_conflicts) {
2081 /*
2082 * If the peer request is fully contained within the
d4dabbe2
LE
2083 * overlapping request, it can be considered overwritten
2084 * and thus superseded; otherwise, it will be retried
2085 * once all overlapping requests have completed.
7be8da07 2086 */
d4dabbe2 2087 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2088 (i->size >> 9) >= sector + (size >> 9);
2089
2090 if (!equal)
2091 dev_alert(DEV, "Concurrent writes detected: "
2092 "local=%llus +%u, remote=%llus +%u, "
2093 "assuming %s came first\n",
2094 (unsigned long long)i->sector, i->size,
2095 (unsigned long long)sector, size,
d4dabbe2 2096 superseded ? "local" : "remote");
7be8da07
AG
2097
2098 inc_unacked(mdev);
d4dabbe2 2099 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07
AG
2100 e_send_retry_write;
2101 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2102 wake_asender(mdev->tconn);
2103
2104 err = -ENOENT;
2105 goto out;
2106 } else {
2107 struct drbd_request *req =
2108 container_of(i, struct drbd_request, i);
2109
2110 if (!equal)
2111 dev_alert(DEV, "Concurrent writes detected: "
2112 "local=%llus +%u, remote=%llus +%u\n",
2113 (unsigned long long)i->sector, i->size,
2114 (unsigned long long)sector, size);
2115
2116 if (req->rq_state & RQ_LOCAL_PENDING ||
2117 !(req->rq_state & RQ_POSTPONED)) {
2118 /*
2119 * Wait for the node with the discard flag to
d4dabbe2
LE
2120 * decide if this request has been superseded
2121 * or needs to be retried.
2122 * Requests that have been superseded will
7be8da07
AG
2123 * disappear from the write_requests tree.
2124 *
2125 * In addition, wait for the conflicting
2126 * request to finish locally before submitting
2127 * the conflicting peer request.
2128 */
2129 err = drbd_wait_misc(mdev, &req->i);
2130 if (err) {
2131 _conn_request_state(mdev->tconn,
2132 NS(conn, C_TIMEOUT),
2133 CS_HARD);
2134 fail_postponed_requests(mdev, sector, size);
2135 goto out;
2136 }
2137 goto repeat;
2138 }
2139 /*
2140 * Remember to restart the conflicting requests after
2141 * the new peer request has completed.
2142 */
2143 peer_req->flags |= EE_RESTART_REQUESTS;
2144 }
2145 }
2146 err = 0;
2147
2148 out:
2149 if (err)
2150 drbd_remove_epoch_entry_interval(mdev, peer_req);
2151 return err;
2152}
2153
b411b363 2154/* mirrored write */
4a76b161 2155static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2156{
4a76b161 2157 struct drbd_conf *mdev;
b411b363 2158 sector_t sector;
db830c46 2159 struct drbd_peer_request *peer_req;
e658983a 2160 struct p_data *p = pi->data;
7be8da07 2161 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
2162 int rw = WRITE;
2163 u32 dp_flags;
302bdeae 2164 int err, tp;
b411b363 2165
4a76b161
AG
2166 mdev = vnr_to_mdev(tconn, pi->vnr);
2167 if (!mdev)
2168 return -EIO;
b411b363 2169
7be8da07 2170 if (!get_ldev(mdev)) {
82bc0194
AG
2171 int err2;
2172
7be8da07 2173 err = wait_for_and_update_peer_seq(mdev, peer_seq);
e2857216 2174 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
12038a3a 2175 atomic_inc(&tconn->current_epoch->epoch_size);
e2857216 2176 err2 = drbd_drain_block(mdev, pi->size);
82bc0194
AG
2177 if (!err)
2178 err = err2;
2179 return err;
b411b363
PR
2180 }
2181
fcefa62e
AG
2182 /*
2183 * Corresponding put_ldev done either below (on various errors), or in
2184 * drbd_peer_request_endio, if we successfully submit the data at the
2185 * end of this function.
2186 */
b411b363
PR
2187
2188 sector = be64_to_cpu(p->sector);
e2857216 2189 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
db830c46 2190 if (!peer_req) {
b411b363 2191 put_ldev(mdev);
82bc0194 2192 return -EIO;
b411b363
PR
2193 }
2194
db830c46 2195 peer_req->w.cb = e_end_block;
b411b363 2196
688593c5
LE
2197 dp_flags = be32_to_cpu(p->dp_flags);
2198 rw |= wire_flags_to_bio(mdev, dp_flags);
81a3537a
LE
2199 if (peer_req->pages == NULL) {
2200 D_ASSERT(peer_req->i.size == 0);
a73ff323
LE
2201 D_ASSERT(dp_flags & DP_FLUSH);
2202 }
688593c5
LE
2203
2204 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2205 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2206
12038a3a
PR
2207 spin_lock(&tconn->epoch_lock);
2208 peer_req->epoch = tconn->current_epoch;
db830c46
AG
2209 atomic_inc(&peer_req->epoch->epoch_size);
2210 atomic_inc(&peer_req->epoch->active);
12038a3a 2211 spin_unlock(&tconn->epoch_lock);
b411b363 2212
302bdeae
PR
2213 rcu_read_lock();
2214 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2215 rcu_read_unlock();
2216 if (tp) {
2217 peer_req->flags |= EE_IN_INTERVAL_TREE;
7be8da07
AG
2218 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2219 if (err)
b411b363 2220 goto out_interrupted;
87eeee41 2221 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2222 err = handle_write_conflicts(mdev, peer_req);
2223 if (err) {
2224 spin_unlock_irq(&mdev->tconn->req_lock);
2225 if (err == -ENOENT) {
b411b363 2226 put_ldev(mdev);
82bc0194 2227 return 0;
b411b363 2228 }
7be8da07 2229 goto out_interrupted;
b411b363 2230 }
7be8da07
AG
2231 } else
2232 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2233 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2234 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2235
d93f6302 2236 if (mdev->state.conn == C_SYNC_TARGET)
3ea35df8 2237 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
b411b363 2238
303d1448 2239 if (mdev->tconn->agreed_pro_version < 100) {
44ed167d
PR
2240 rcu_read_lock();
2241 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
303d1448
PR
2242 case DRBD_PROT_C:
2243 dp_flags |= DP_SEND_WRITE_ACK;
2244 break;
2245 case DRBD_PROT_B:
2246 dp_flags |= DP_SEND_RECEIVE_ACK;
2247 break;
b411b363 2248 }
44ed167d 2249 rcu_read_unlock();
b411b363
PR
2250 }
2251
303d1448
PR
2252 if (dp_flags & DP_SEND_WRITE_ACK) {
2253 peer_req->flags |= EE_SEND_WRITE_ACK;
b411b363
PR
2254 inc_unacked(mdev);
2255 /* corresponding dec_unacked() in e_end_block()
2256 * respective _drbd_clear_done_ee */
303d1448
PR
2257 }
2258
2259 if (dp_flags & DP_SEND_RECEIVE_ACK) {
b411b363
PR
2260 /* I really don't like it that the receiver thread
2261 * sends on the msock, but anyways */
db830c46 2262 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2263 }
2264
6719fb03 2265 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2266 /* In case we have the only disk of the cluster, */
db830c46
AG
2267 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2268 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2269 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
56392d2f 2270 drbd_al_begin_io(mdev, &peer_req->i, true);
b411b363
PR
2271 }
2272
82bc0194
AG
2273 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2274 if (!err)
2275 return 0;
b411b363 2276
10f6d992
LE
2277 /* don't care for the reason here */
2278 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2279 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2280 list_del(&peer_req->w.list);
2281 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2282 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46 2283 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
181286ad 2284 drbd_al_complete_io(mdev, &peer_req->i);
22cc37a9 2285
b411b363 2286out_interrupted:
1e9dd291 2287 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2288 put_ldev(mdev);
3967deb1 2289 drbd_free_peer_req(mdev, peer_req);
82bc0194 2290 return err;
b411b363
PR
2291}
2292
0f0601f4
LE
2293/* We may throttle resync, if the lower device seems to be busy,
2294 * and current sync rate is above c_min_rate.
2295 *
2296 * To decide whether or not the lower device is busy, we use a scheme similar
2297 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2298 * (more than 64 sectors) of activity we cannot account for with our own resync
2299 * activity, it obviously is "busy".
2300 *
2301 * The current sync rate used here uses only the most recent two step marks,
2302 * to have a short time average so we can react faster.
2303 */
e3555d85 2304int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2305{
2306 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2307 unsigned long db, dt, dbdt;
e3555d85 2308 struct lc_element *tmp;
0f0601f4
LE
2309 int curr_events;
2310 int throttle = 0;
daeda1cc
PR
2311 unsigned int c_min_rate;
2312
2313 rcu_read_lock();
2314 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2315 rcu_read_unlock();
0f0601f4
LE
2316
2317 /* feature disabled? */
daeda1cc 2318 if (c_min_rate == 0)
0f0601f4
LE
2319 return 0;
2320
e3555d85
PR
2321 spin_lock_irq(&mdev->al_lock);
2322 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2323 if (tmp) {
2324 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2325 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2326 spin_unlock_irq(&mdev->al_lock);
2327 return 0;
2328 }
2329 /* Do not slow down if app IO is already waiting for this extent */
2330 }
2331 spin_unlock_irq(&mdev->al_lock);
2332
0f0601f4
LE
2333 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2334 (int)part_stat_read(&disk->part0, sectors[1]) -
2335 atomic_read(&mdev->rs_sect_ev);
e3555d85 2336
0f0601f4
LE
2337 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2338 unsigned long rs_left;
2339 int i;
2340
2341 mdev->rs_last_events = curr_events;
2342
2343 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2344 * approx. */
2649f080
LE
2345 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2346
2347 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2348 rs_left = mdev->ov_left;
2349 else
2350 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2351
2352 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2353 if (!dt)
2354 dt++;
2355 db = mdev->rs_mark_left[i] - rs_left;
2356 dbdt = Bit2KB(db/dt);
2357
daeda1cc 2358 if (dbdt > c_min_rate)
0f0601f4
LE
2359 throttle = 1;
2360 }
2361 return throttle;
2362}
2363
2364
4a76b161 2365static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2366{
4a76b161 2367 struct drbd_conf *mdev;
b411b363 2368 sector_t sector;
4a76b161 2369 sector_t capacity;
db830c46 2370 struct drbd_peer_request *peer_req;
b411b363 2371 struct digest_info *di = NULL;
b18b37be 2372 int size, verb;
b411b363 2373 unsigned int fault_type;
e658983a 2374 struct p_block_req *p = pi->data;
4a76b161
AG
2375
2376 mdev = vnr_to_mdev(tconn, pi->vnr);
2377 if (!mdev)
2378 return -EIO;
2379 capacity = drbd_get_capacity(mdev->this_bdev);
b411b363
PR
2380
2381 sector = be64_to_cpu(p->sector);
2382 size = be32_to_cpu(p->blksize);
2383
c670a398 2384 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2385 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2386 (unsigned long long)sector, size);
82bc0194 2387 return -EINVAL;
b411b363
PR
2388 }
2389 if (sector + (size>>9) > capacity) {
2390 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2391 (unsigned long long)sector, size);
82bc0194 2392 return -EINVAL;
b411b363
PR
2393 }
2394
2395 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be 2396 verb = 1;
e2857216 2397 switch (pi->cmd) {
b18b37be
PR
2398 case P_DATA_REQUEST:
2399 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2400 break;
2401 case P_RS_DATA_REQUEST:
2402 case P_CSUM_RS_REQUEST:
2403 case P_OV_REQUEST:
2404 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2405 break;
2406 case P_OV_REPLY:
2407 verb = 0;
2408 dec_rs_pending(mdev);
2409 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2410 break;
2411 default:
49ba9b1b 2412 BUG();
b18b37be
PR
2413 }
2414 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2415 dev_err(DEV, "Can not satisfy peer's read request, "
2416 "no local data.\n");
b18b37be 2417
a821cc4a 2418 /* drain possibly payload */
e2857216 2419 return drbd_drain_block(mdev, pi->size);
b411b363
PR
2420 }
2421
2422 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2423 * "criss-cross" setup, that might cause write-out on some other DRBD,
2424 * which in turn might block on the other node at this very place. */
0db55363 2425 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
db830c46 2426 if (!peer_req) {
b411b363 2427 put_ldev(mdev);
82bc0194 2428 return -ENOMEM;
b411b363
PR
2429 }
2430
e2857216 2431 switch (pi->cmd) {
b411b363 2432 case P_DATA_REQUEST:
db830c46 2433 peer_req->w.cb = w_e_end_data_req;
b411b363 2434 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2435 /* application IO, don't drbd_rs_begin_io */
2436 goto submit;
2437
b411b363 2438 case P_RS_DATA_REQUEST:
db830c46 2439 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2440 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2441 /* used in the sector offset progress display */
2442 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2443 break;
2444
2445 case P_OV_REPLY:
2446 case P_CSUM_RS_REQUEST:
2447 fault_type = DRBD_FAULT_RS_RD;
e2857216 2448 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2449 if (!di)
2450 goto out_free_e;
2451
e2857216 2452 di->digest_size = pi->size;
b411b363
PR
2453 di->digest = (((char *)di)+sizeof(struct digest_info));
2454
db830c46
AG
2455 peer_req->digest = di;
2456 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2457
e2857216 2458 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
b411b363
PR
2459 goto out_free_e;
2460
e2857216 2461 if (pi->cmd == P_CSUM_RS_REQUEST) {
31890f4a 2462 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2463 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2464 /* used in the sector offset progress display */
2465 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
e2857216 2466 } else if (pi->cmd == P_OV_REPLY) {
2649f080
LE
2467 /* track progress, we may need to throttle */
2468 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2469 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2470 dec_rs_pending(mdev);
0f0601f4
LE
2471 /* drbd_rs_begin_io done when we sent this request,
2472 * but accounting still needs to be done. */
2473 goto submit_for_resync;
b411b363
PR
2474 }
2475 break;
2476
2477 case P_OV_REQUEST:
b411b363 2478 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2479 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2480 unsigned long now = jiffies;
2481 int i;
b411b363
PR
2482 mdev->ov_start_sector = sector;
2483 mdev->ov_position = sector;
30b743a2
LE
2484 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2485 mdev->rs_total = mdev->ov_left;
de228bba
LE
2486 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2487 mdev->rs_mark_left[i] = mdev->ov_left;
2488 mdev->rs_mark_time[i] = now;
2489 }
b411b363
PR
2490 dev_info(DEV, "Online Verify start sector: %llu\n",
2491 (unsigned long long)sector);
2492 }
db830c46 2493 peer_req->w.cb = w_e_end_ov_req;
b411b363 2494 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2495 break;
2496
b411b363 2497 default:
49ba9b1b 2498 BUG();
b411b363
PR
2499 }
2500
0f0601f4
LE
2501 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2502 * wrt the receiver, but it is not as straightforward as it may seem.
2503 * Various places in the resync start and stop logic assume resync
2504 * requests are processed in order, requeuing this on the worker thread
2505 * introduces a bunch of new code for synchronization between threads.
2506 *
2507 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2508 * "forever", throttling after drbd_rs_begin_io will lock that extent
2509 * for application writes for the same time. For now, just throttle
2510 * here, where the rest of the code expects the receiver to sleep for
2511 * a while, anyways.
2512 */
2513
2514 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2515 * this defers syncer requests for some time, before letting at least
2516 * on request through. The resync controller on the receiving side
2517 * will adapt to the incoming rate accordingly.
2518 *
2519 * We cannot throttle here if remote is Primary/SyncTarget:
2520 * we would also throttle its application reads.
2521 * In that case, throttling is done on the SyncTarget only.
2522 */
e3555d85
PR
2523 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2524 schedule_timeout_uninterruptible(HZ/10);
2525 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2526 goto out_free_e;
b411b363 2527
0f0601f4
LE
2528submit_for_resync:
2529 atomic_add(size >> 9, &mdev->rs_sect_ev);
2530
80a40e43 2531submit:
b411b363 2532 inc_unacked(mdev);
87eeee41 2533 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2534 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2535 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2536
fbe29dec 2537 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2538 return 0;
b411b363 2539
10f6d992
LE
2540 /* don't care for the reason here */
2541 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2542 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2543 list_del(&peer_req->w.list);
87eeee41 2544 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2545 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2546
b411b363 2547out_free_e:
b411b363 2548 put_ldev(mdev);
3967deb1 2549 drbd_free_peer_req(mdev, peer_req);
82bc0194 2550 return -EIO;
b411b363
PR
2551}
2552
2553static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2554{
2555 int self, peer, rv = -100;
2556 unsigned long ch_self, ch_peer;
44ed167d 2557 enum drbd_after_sb_p after_sb_0p;
b411b363
PR
2558
2559 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2560 peer = mdev->p_uuid[UI_BITMAP] & 1;
2561
2562 ch_peer = mdev->p_uuid[UI_SIZE];
2563 ch_self = mdev->comm_bm_set;
2564
44ed167d
PR
2565 rcu_read_lock();
2566 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2567 rcu_read_unlock();
2568 switch (after_sb_0p) {
b411b363
PR
2569 case ASB_CONSENSUS:
2570 case ASB_DISCARD_SECONDARY:
2571 case ASB_CALL_HELPER:
44ed167d 2572 case ASB_VIOLENTLY:
b411b363
PR
2573 dev_err(DEV, "Configuration error.\n");
2574 break;
2575 case ASB_DISCONNECT:
2576 break;
2577 case ASB_DISCARD_YOUNGER_PRI:
2578 if (self == 0 && peer == 1) {
2579 rv = -1;
2580 break;
2581 }
2582 if (self == 1 && peer == 0) {
2583 rv = 1;
2584 break;
2585 }
2586 /* Else fall through to one of the other strategies... */
2587 case ASB_DISCARD_OLDER_PRI:
2588 if (self == 0 && peer == 1) {
2589 rv = 1;
2590 break;
2591 }
2592 if (self == 1 && peer == 0) {
2593 rv = -1;
2594 break;
2595 }
2596 /* Else fall through to one of the other strategies... */
ad19bf6e 2597 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2598 "Using discard-least-changes instead\n");
2599 case ASB_DISCARD_ZERO_CHG:
2600 if (ch_peer == 0 && ch_self == 0) {
427c0434 2601 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2602 ? -1 : 1;
2603 break;
2604 } else {
2605 if (ch_peer == 0) { rv = 1; break; }
2606 if (ch_self == 0) { rv = -1; break; }
2607 }
44ed167d 2608 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2609 break;
2610 case ASB_DISCARD_LEAST_CHG:
2611 if (ch_self < ch_peer)
2612 rv = -1;
2613 else if (ch_self > ch_peer)
2614 rv = 1;
2615 else /* ( ch_self == ch_peer ) */
2616 /* Well, then use something else. */
427c0434 2617 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2618 ? -1 : 1;
2619 break;
2620 case ASB_DISCARD_LOCAL:
2621 rv = -1;
2622 break;
2623 case ASB_DISCARD_REMOTE:
2624 rv = 1;
2625 }
2626
2627 return rv;
2628}
2629
2630static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2631{
6184ea21 2632 int hg, rv = -100;
44ed167d 2633 enum drbd_after_sb_p after_sb_1p;
b411b363 2634
44ed167d
PR
2635 rcu_read_lock();
2636 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2637 rcu_read_unlock();
2638 switch (after_sb_1p) {
b411b363
PR
2639 case ASB_DISCARD_YOUNGER_PRI:
2640 case ASB_DISCARD_OLDER_PRI:
2641 case ASB_DISCARD_LEAST_CHG:
2642 case ASB_DISCARD_LOCAL:
2643 case ASB_DISCARD_REMOTE:
44ed167d 2644 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2645 dev_err(DEV, "Configuration error.\n");
2646 break;
2647 case ASB_DISCONNECT:
2648 break;
2649 case ASB_CONSENSUS:
2650 hg = drbd_asb_recover_0p(mdev);
2651 if (hg == -1 && mdev->state.role == R_SECONDARY)
2652 rv = hg;
2653 if (hg == 1 && mdev->state.role == R_PRIMARY)
2654 rv = hg;
2655 break;
2656 case ASB_VIOLENTLY:
2657 rv = drbd_asb_recover_0p(mdev);
2658 break;
2659 case ASB_DISCARD_SECONDARY:
2660 return mdev->state.role == R_PRIMARY ? 1 : -1;
2661 case ASB_CALL_HELPER:
2662 hg = drbd_asb_recover_0p(mdev);
2663 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2664 enum drbd_state_rv rv2;
2665
b411b363
PR
2666 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2667 * we might be here in C_WF_REPORT_PARAMS which is transient.
2668 * we do not need to wait for the after state change work either. */
bb437946
AG
2669 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2670 if (rv2 != SS_SUCCESS) {
b411b363
PR
2671 drbd_khelper(mdev, "pri-lost-after-sb");
2672 } else {
2673 dev_warn(DEV, "Successfully gave up primary role.\n");
2674 rv = hg;
2675 }
2676 } else
2677 rv = hg;
2678 }
2679
2680 return rv;
2681}
2682
2683static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2684{
6184ea21 2685 int hg, rv = -100;
44ed167d 2686 enum drbd_after_sb_p after_sb_2p;
b411b363 2687
44ed167d
PR
2688 rcu_read_lock();
2689 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2690 rcu_read_unlock();
2691 switch (after_sb_2p) {
b411b363
PR
2692 case ASB_DISCARD_YOUNGER_PRI:
2693 case ASB_DISCARD_OLDER_PRI:
2694 case ASB_DISCARD_LEAST_CHG:
2695 case ASB_DISCARD_LOCAL:
2696 case ASB_DISCARD_REMOTE:
2697 case ASB_CONSENSUS:
2698 case ASB_DISCARD_SECONDARY:
44ed167d 2699 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2700 dev_err(DEV, "Configuration error.\n");
2701 break;
2702 case ASB_VIOLENTLY:
2703 rv = drbd_asb_recover_0p(mdev);
2704 break;
2705 case ASB_DISCONNECT:
2706 break;
2707 case ASB_CALL_HELPER:
2708 hg = drbd_asb_recover_0p(mdev);
2709 if (hg == -1) {
bb437946
AG
2710 enum drbd_state_rv rv2;
2711
b411b363
PR
2712 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2713 * we might be here in C_WF_REPORT_PARAMS which is transient.
2714 * we do not need to wait for the after state change work either. */
bb437946
AG
2715 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2716 if (rv2 != SS_SUCCESS) {
b411b363
PR
2717 drbd_khelper(mdev, "pri-lost-after-sb");
2718 } else {
2719 dev_warn(DEV, "Successfully gave up primary role.\n");
2720 rv = hg;
2721 }
2722 } else
2723 rv = hg;
2724 }
2725
2726 return rv;
2727}
2728
2729static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2730 u64 bits, u64 flags)
2731{
2732 if (!uuid) {
2733 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2734 return;
2735 }
2736 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2737 text,
2738 (unsigned long long)uuid[UI_CURRENT],
2739 (unsigned long long)uuid[UI_BITMAP],
2740 (unsigned long long)uuid[UI_HISTORY_START],
2741 (unsigned long long)uuid[UI_HISTORY_END],
2742 (unsigned long long)bits,
2743 (unsigned long long)flags);
2744}
2745
2746/*
2747 100 after split brain try auto recover
2748 2 C_SYNC_SOURCE set BitMap
2749 1 C_SYNC_SOURCE use BitMap
2750 0 no Sync
2751 -1 C_SYNC_TARGET use BitMap
2752 -2 C_SYNC_TARGET set BitMap
2753 -100 after split brain, disconnect
2754-1000 unrelated data
4a23f264
PR
2755-1091 requires proto 91
2756-1096 requires proto 96
b411b363
PR
2757 */
2758static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2759{
2760 u64 self, peer;
2761 int i, j;
2762
2763 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2764 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2765
2766 *rule_nr = 10;
2767 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2768 return 0;
2769
2770 *rule_nr = 20;
2771 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2772 peer != UUID_JUST_CREATED)
2773 return -2;
2774
2775 *rule_nr = 30;
2776 if (self != UUID_JUST_CREATED &&
2777 (peer == UUID_JUST_CREATED || peer == (u64)0))
2778 return 2;
2779
2780 if (self == peer) {
2781 int rct, dc; /* roles at crash time */
2782
2783 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2784
31890f4a 2785 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2786 return -1091;
b411b363
PR
2787
2788 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2789 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2790 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
9f2247bb
PR
2791 drbd_uuid_move_history(mdev);
2792 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2793 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363
PR
2794
2795 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2796 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2797 *rule_nr = 34;
2798 } else {
2799 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2800 *rule_nr = 36;
2801 }
2802
2803 return 1;
2804 }
2805
2806 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2807
31890f4a 2808 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2809 return -1091;
b411b363
PR
2810
2811 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2812 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2813 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2814
2815 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2816 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2817 mdev->p_uuid[UI_BITMAP] = 0UL;
2818
2819 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2820 *rule_nr = 35;
2821 } else {
2822 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2823 *rule_nr = 37;
2824 }
2825
2826 return -1;
2827 }
2828
2829 /* Common power [off|failure] */
2830 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2831 (mdev->p_uuid[UI_FLAGS] & 2);
2832 /* lowest bit is set when we were primary,
2833 * next bit (weight 2) is set when peer was primary */
2834 *rule_nr = 40;
2835
2836 switch (rct) {
2837 case 0: /* !self_pri && !peer_pri */ return 0;
2838 case 1: /* self_pri && !peer_pri */ return 1;
2839 case 2: /* !self_pri && peer_pri */ return -1;
2840 case 3: /* self_pri && peer_pri */
427c0434 2841 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2842 return dc ? -1 : 1;
2843 }
2844 }
2845
2846 *rule_nr = 50;
2847 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2848 if (self == peer)
2849 return -1;
2850
2851 *rule_nr = 51;
2852 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2853 if (self == peer) {
31890f4a 2854 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2855 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2856 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2857 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2858 /* The last P_SYNC_UUID did not get though. Undo the last start of
2859 resync as sync source modifications of the peer's UUIDs. */
2860
31890f4a 2861 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2862 return -1091;
b411b363
PR
2863
2864 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2865 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264 2866
92b4ca29 2867 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
4a23f264
PR
2868 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2869
b411b363
PR
2870 return -1;
2871 }
2872 }
2873
2874 *rule_nr = 60;
2875 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2876 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2877 peer = mdev->p_uuid[i] & ~((u64)1);
2878 if (self == peer)
2879 return -2;
2880 }
2881
2882 *rule_nr = 70;
2883 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2884 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2885 if (self == peer)
2886 return 1;
2887
2888 *rule_nr = 71;
2889 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2890 if (self == peer) {
31890f4a 2891 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2892 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2893 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2894 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2895 /* The last P_SYNC_UUID did not get though. Undo the last start of
2896 resync as sync source modifications of our UUIDs. */
2897
31890f4a 2898 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2899 return -1091;
b411b363 2900
9f2247bb
PR
2901 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2902 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 2903
4a23f264 2904 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2905 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2906 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2907
2908 return 1;
2909 }
2910 }
2911
2912
2913 *rule_nr = 80;
d8c2a36b 2914 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2915 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2916 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2917 if (self == peer)
2918 return 2;
2919 }
2920
2921 *rule_nr = 90;
2922 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2923 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2924 if (self == peer && self != ((u64)0))
2925 return 100;
2926
2927 *rule_nr = 100;
2928 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2929 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2930 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2931 peer = mdev->p_uuid[j] & ~((u64)1);
2932 if (self == peer)
2933 return -100;
2934 }
2935 }
2936
2937 return -1000;
2938}
2939
2940/* drbd_sync_handshake() returns the new conn state on success, or
2941 CONN_MASK (-1) on failure.
2942 */
2943static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2944 enum drbd_disk_state peer_disk) __must_hold(local)
2945{
b411b363
PR
2946 enum drbd_conns rv = C_MASK;
2947 enum drbd_disk_state mydisk;
44ed167d 2948 struct net_conf *nc;
6dff2902 2949 int hg, rule_nr, rr_conflict, tentative;
b411b363
PR
2950
2951 mydisk = mdev->state.disk;
2952 if (mydisk == D_NEGOTIATING)
2953 mydisk = mdev->new_state_tmp.disk;
2954
2955 dev_info(DEV, "drbd_sync_handshake:\n");
9f2247bb
PR
2956
2957 spin_lock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2958 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2959 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2960 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2961
2962 hg = drbd_uuid_compare(mdev, &rule_nr);
9f2247bb 2963 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2964
2965 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2966
2967 if (hg == -1000) {
2968 dev_alert(DEV, "Unrelated data, aborting!\n");
2969 return C_MASK;
2970 }
4a23f264
PR
2971 if (hg < -1000) {
2972 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2973 return C_MASK;
2974 }
2975
2976 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2977 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2978 int f = (hg == -100) || abs(hg) == 2;
2979 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2980 if (f)
2981 hg = hg*2;
2982 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2983 hg > 0 ? "source" : "target");
2984 }
2985
3a11a487
AG
2986 if (abs(hg) == 100)
2987 drbd_khelper(mdev, "initial-split-brain");
2988
44ed167d
PR
2989 rcu_read_lock();
2990 nc = rcu_dereference(mdev->tconn->net_conf);
2991
2992 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b411b363
PR
2993 int pcount = (mdev->state.role == R_PRIMARY)
2994 + (peer_role == R_PRIMARY);
2995 int forced = (hg == -100);
2996
2997 switch (pcount) {
2998 case 0:
2999 hg = drbd_asb_recover_0p(mdev);
3000 break;
3001 case 1:
3002 hg = drbd_asb_recover_1p(mdev);
3003 break;
3004 case 2:
3005 hg = drbd_asb_recover_2p(mdev);
3006 break;
3007 }
3008 if (abs(hg) < 100) {
3009 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3010 "automatically solved. Sync from %s node\n",
3011 pcount, (hg < 0) ? "peer" : "this");
3012 if (forced) {
3013 dev_warn(DEV, "Doing a full sync, since"
3014 " UUIDs where ambiguous.\n");
3015 hg = hg*2;
3016 }
3017 }
3018 }
3019
3020 if (hg == -100) {
08b165ba 3021 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 3022 hg = -1;
08b165ba 3023 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
3024 hg = 1;
3025
3026 if (abs(hg) < 100)
3027 dev_warn(DEV, "Split-Brain detected, manually solved. "
3028 "Sync from %s node\n",
3029 (hg < 0) ? "peer" : "this");
3030 }
44ed167d 3031 rr_conflict = nc->rr_conflict;
6dff2902 3032 tentative = nc->tentative;
44ed167d 3033 rcu_read_unlock();
b411b363
PR
3034
3035 if (hg == -100) {
580b9767
LE
3036 /* FIXME this log message is not correct if we end up here
3037 * after an attempted attach on a diskless node.
3038 * We just refuse to attach -- well, we drop the "connection"
3039 * to that disk, in a way... */
3a11a487 3040 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
3041 drbd_khelper(mdev, "split-brain");
3042 return C_MASK;
3043 }
3044
3045 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3046 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3047 return C_MASK;
3048 }
3049
3050 if (hg < 0 && /* by intention we do not use mydisk here. */
3051 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
44ed167d 3052 switch (rr_conflict) {
b411b363
PR
3053 case ASB_CALL_HELPER:
3054 drbd_khelper(mdev, "pri-lost");
3055 /* fall through */
3056 case ASB_DISCONNECT:
3057 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3058 return C_MASK;
3059 case ASB_VIOLENTLY:
3060 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3061 "assumption\n");
3062 }
3063 }
3064
6dff2902 3065 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
3066 if (hg == 0)
3067 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3068 else
3069 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3070 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3071 abs(hg) >= 2 ? "full" : "bit-map based");
3072 return C_MASK;
3073 }
3074
b411b363
PR
3075 if (abs(hg) >= 2) {
3076 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
3077 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3078 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3079 return C_MASK;
3080 }
3081
3082 if (hg > 0) { /* become sync source. */
3083 rv = C_WF_BITMAP_S;
3084 } else if (hg < 0) { /* become sync target */
3085 rv = C_WF_BITMAP_T;
3086 } else {
3087 rv = C_CONNECTED;
3088 if (drbd_bm_total_weight(mdev)) {
3089 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3090 drbd_bm_total_weight(mdev));
3091 }
3092 }
3093
3094 return rv;
3095}
3096
f179d76d 3097static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3098{
3099 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3100 if (peer == ASB_DISCARD_REMOTE)
3101 return ASB_DISCARD_LOCAL;
b411b363
PR
3102
3103 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3104 if (peer == ASB_DISCARD_LOCAL)
3105 return ASB_DISCARD_REMOTE;
b411b363
PR
3106
3107 /* everything else is valid if they are equal on both sides. */
f179d76d 3108 return peer;
b411b363
PR
3109}
3110
e2857216 3111static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3112{
e658983a 3113 struct p_protocol *p = pi->data;
036b17ea
PR
3114 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3115 int p_proto, p_discard_my_data, p_two_primaries, cf;
3116 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3117 char integrity_alg[SHARED_SECRET_MAX] = "";
accdbcc5 3118 struct crypto_hash *peer_integrity_tfm = NULL;
7aca6c75 3119 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3120
b411b363
PR
3121 p_proto = be32_to_cpu(p->protocol);
3122 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3123 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3124 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3125 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3126 cf = be32_to_cpu(p->conn_flags);
6139f60d 3127 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3128
86db0618
AG
3129 if (tconn->agreed_pro_version >= 87) {
3130 int err;
cf14c2e9 3131
88104ca4 3132 if (pi->size > sizeof(integrity_alg))
86db0618 3133 return -EIO;
88104ca4 3134 err = drbd_recv_all(tconn, integrity_alg, pi->size);
86db0618
AG
3135 if (err)
3136 return err;
036b17ea 3137 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3138 }
3139
7d4c782c 3140 if (pi->cmd != P_PROTOCOL_UPDATE) {
fbc12f45 3141 clear_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3142
fbc12f45
AG
3143 if (cf & CF_DRY_RUN)
3144 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3145
fbc12f45
AG
3146 rcu_read_lock();
3147 nc = rcu_dereference(tconn->net_conf);
b411b363 3148
fbc12f45 3149 if (p_proto != nc->wire_protocol) {
d505d9be 3150 conn_err(tconn, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3151 goto disconnect_rcu_unlock;
3152 }
b411b363 3153
fbc12f45 3154 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
d505d9be 3155 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3156 goto disconnect_rcu_unlock;
3157 }
b411b363 3158
fbc12f45 3159 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
d505d9be 3160 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3161 goto disconnect_rcu_unlock;
3162 }
b411b363 3163
fbc12f45 3164 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
d505d9be 3165 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3166 goto disconnect_rcu_unlock;
3167 }
b411b363 3168
fbc12f45 3169 if (p_discard_my_data && nc->discard_my_data) {
d505d9be 3170 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3171 goto disconnect_rcu_unlock;
3172 }
b411b363 3173
fbc12f45 3174 if (p_two_primaries != nc->two_primaries) {
d505d9be 3175 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3176 goto disconnect_rcu_unlock;
3177 }
b411b363 3178
fbc12f45 3179 if (strcmp(integrity_alg, nc->integrity_alg)) {
d505d9be 3180 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3181 goto disconnect_rcu_unlock;
3182 }
b411b363 3183
fbc12f45 3184 rcu_read_unlock();
b411b363
PR
3185 }
3186
7d4c782c
AG
3187 if (integrity_alg[0]) {
3188 int hash_size;
3189
3190 /*
3191 * We can only change the peer data integrity algorithm
3192 * here. Changing our own data integrity algorithm
3193 * requires that we send a P_PROTOCOL_UPDATE packet at
3194 * the same time; otherwise, the peer has no way to
3195 * tell between which packets the algorithm should
3196 * change.
3197 */
b411b363 3198
7d4c782c
AG
3199 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3200 if (!peer_integrity_tfm) {
3201 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3202 integrity_alg);
3203 goto disconnect;
3204 }
b411b363 3205
7d4c782c
AG
3206 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3207 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3208 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3209 if (!(int_dig_in && int_dig_vv)) {
3210 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3211 goto disconnect;
3212 }
b411b363
PR
3213 }
3214
7d4c782c
AG
3215 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3216 if (!new_net_conf) {
3217 conn_err(tconn, "Allocation of new net_conf failed\n");
3218 goto disconnect;
3219 }
3220
3221 mutex_lock(&tconn->data.mutex);
3222 mutex_lock(&tconn->conf_update);
3223 old_net_conf = tconn->net_conf;
3224 *new_net_conf = *old_net_conf;
3225
3226 new_net_conf->wire_protocol = p_proto;
3227 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3228 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3229 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3230 new_net_conf->two_primaries = p_two_primaries;
3231
3232 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3233 mutex_unlock(&tconn->conf_update);
3234 mutex_unlock(&tconn->data.mutex);
3235
3236 crypto_free_hash(tconn->peer_integrity_tfm);
3237 kfree(tconn->int_dig_in);
3238 kfree(tconn->int_dig_vv);
3239 tconn->peer_integrity_tfm = peer_integrity_tfm;
3240 tconn->int_dig_in = int_dig_in;
3241 tconn->int_dig_vv = int_dig_vv;
3242
3243 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3244 conn_info(tconn, "peer data-integrity-alg: %s\n",
3245 integrity_alg[0] ? integrity_alg : "(none)");
3246
3247 synchronize_rcu();
3248 kfree(old_net_conf);
82bc0194 3249 return 0;
b411b363 3250
44ed167d
PR
3251disconnect_rcu_unlock:
3252 rcu_read_unlock();
b411b363 3253disconnect:
b792c35c 3254 crypto_free_hash(peer_integrity_tfm);
036b17ea
PR
3255 kfree(int_dig_in);
3256 kfree(int_dig_vv);
7204624c 3257 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3258 return -EIO;
b411b363
PR
3259}
3260
3261/* helper function
3262 * input: alg name, feature name
3263 * return: NULL (alg name was "")
3264 * ERR_PTR(error) if something goes wrong
3265 * or the crypto hash ptr, if it worked out ok. */
3266struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3267 const char *alg, const char *name)
3268{
3269 struct crypto_hash *tfm;
3270
3271 if (!alg[0])
3272 return NULL;
3273
3274 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3275 if (IS_ERR(tfm)) {
3276 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3277 alg, name, PTR_ERR(tfm));
3278 return tfm;
3279 }
b411b363
PR
3280 return tfm;
3281}
3282
4a76b161
AG
3283static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3284{
3285 void *buffer = tconn->data.rbuf;
3286 int size = pi->size;
3287
3288 while (size) {
3289 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3290 s = drbd_recv(tconn, buffer, s);
3291 if (s <= 0) {
3292 if (s < 0)
3293 return s;
3294 break;
3295 }
3296 size -= s;
3297 }
3298 if (size)
3299 return -EIO;
3300 return 0;
3301}
3302
3303/*
3304 * config_unknown_volume - device configuration command for unknown volume
3305 *
3306 * When a device is added to an existing connection, the node on which the
3307 * device is added first will send configuration commands to its peer but the
3308 * peer will not know about the device yet. It will warn and ignore these
3309 * commands. Once the device is added on the second node, the second node will
3310 * send the same device configuration commands, but in the other direction.
3311 *
3312 * (We can also end up here if drbd is misconfigured.)
3313 */
3314static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3315{
2fcb8f30
AG
3316 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3317 cmdname(pi->cmd), pi->vnr);
4a76b161
AG
3318 return ignore_remaining_packet(tconn, pi);
3319}
3320
3321static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3322{
4a76b161 3323 struct drbd_conf *mdev;
e658983a 3324 struct p_rs_param_95 *p;
b411b363
PR
3325 unsigned int header_size, data_size, exp_max_sz;
3326 struct crypto_hash *verify_tfm = NULL;
3327 struct crypto_hash *csums_tfm = NULL;
2ec91e0e 3328 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3329 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
4a76b161 3330 const int apv = tconn->agreed_pro_version;
813472ce 3331 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3332 int fifo_size = 0;
82bc0194 3333 int err;
b411b363 3334
4a76b161
AG
3335 mdev = vnr_to_mdev(tconn, pi->vnr);
3336 if (!mdev)
3337 return config_unknown_volume(tconn, pi);
b411b363
PR
3338
3339 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3340 : apv == 88 ? sizeof(struct p_rs_param)
3341 + SHARED_SECRET_MAX
8e26f9cc
PR
3342 : apv <= 94 ? sizeof(struct p_rs_param_89)
3343 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3344
e2857216 3345 if (pi->size > exp_max_sz) {
b411b363 3346 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3347 pi->size, exp_max_sz);
82bc0194 3348 return -EIO;
b411b363
PR
3349 }
3350
3351 if (apv <= 88) {
e658983a 3352 header_size = sizeof(struct p_rs_param);
e2857216 3353 data_size = pi->size - header_size;
8e26f9cc 3354 } else if (apv <= 94) {
e658983a 3355 header_size = sizeof(struct p_rs_param_89);
e2857216 3356 data_size = pi->size - header_size;
b411b363 3357 D_ASSERT(data_size == 0);
8e26f9cc 3358 } else {
e658983a 3359 header_size = sizeof(struct p_rs_param_95);
e2857216 3360 data_size = pi->size - header_size;
b411b363
PR
3361 D_ASSERT(data_size == 0);
3362 }
3363
3364 /* initialize verify_alg and csums_alg */
e658983a 3365 p = pi->data;
b411b363
PR
3366 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3367
e658983a 3368 err = drbd_recv_all(mdev->tconn, p, header_size);
82bc0194
AG
3369 if (err)
3370 return err;
b411b363 3371
daeda1cc
PR
3372 mutex_lock(&mdev->tconn->conf_update);
3373 old_net_conf = mdev->tconn->net_conf;
813472ce
PR
3374 if (get_ldev(mdev)) {
3375 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3376 if (!new_disk_conf) {
3377 put_ldev(mdev);
3378 mutex_unlock(&mdev->tconn->conf_update);
3379 dev_err(DEV, "Allocation of new disk_conf failed\n");
3380 return -ENOMEM;
3381 }
daeda1cc 3382
813472ce
PR
3383 old_disk_conf = mdev->ldev->disk_conf;
3384 *new_disk_conf = *old_disk_conf;
b411b363 3385
6394b935 3386 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3387 }
b411b363
PR
3388
3389 if (apv >= 88) {
3390 if (apv == 88) {
5de73827
PR
3391 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3392 dev_err(DEV, "verify-alg of wrong size, "
3393 "peer wants %u, accepting only up to %u byte\n",
3394 data_size, SHARED_SECRET_MAX);
813472ce
PR
3395 err = -EIO;
3396 goto reconnect;
b411b363
PR
3397 }
3398
82bc0194 3399 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
813472ce
PR
3400 if (err)
3401 goto reconnect;
b411b363
PR
3402 /* we expect NUL terminated string */
3403 /* but just in case someone tries to be evil */
3404 D_ASSERT(p->verify_alg[data_size-1] == 0);
3405 p->verify_alg[data_size-1] = 0;
3406
3407 } else /* apv >= 89 */ {
3408 /* we still expect NUL terminated strings */
3409 /* but just in case someone tries to be evil */
3410 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3411 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3412 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3413 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3414 }
3415
2ec91e0e 3416 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3417 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3418 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3419 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3420 goto disconnect;
3421 }
3422 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3423 p->verify_alg, "verify-alg");
3424 if (IS_ERR(verify_tfm)) {
3425 verify_tfm = NULL;
3426 goto disconnect;
3427 }
3428 }
3429
2ec91e0e 3430 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3431 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3432 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3433 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3434 goto disconnect;
3435 }
3436 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3437 p->csums_alg, "csums-alg");
3438 if (IS_ERR(csums_tfm)) {
3439 csums_tfm = NULL;
3440 goto disconnect;
3441 }
3442 }
3443
813472ce 3444 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3445 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3446 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3447 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3448 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3449
daeda1cc 3450 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 3451 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
3452 new_plan = fifo_alloc(fifo_size);
3453 if (!new_plan) {
778f271d 3454 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3455 put_ldev(mdev);
778f271d
PR
3456 goto disconnect;
3457 }
3458 }
8e26f9cc 3459 }
b411b363 3460
91fd4dad 3461 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3462 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3463 if (!new_net_conf) {
91fd4dad
PR
3464 dev_err(DEV, "Allocation of new net_conf failed\n");
3465 goto disconnect;
3466 }
3467
2ec91e0e 3468 *new_net_conf = *old_net_conf;
91fd4dad
PR
3469
3470 if (verify_tfm) {
2ec91e0e
PR
3471 strcpy(new_net_conf->verify_alg, p->verify_alg);
3472 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
91fd4dad
PR
3473 crypto_free_hash(mdev->tconn->verify_tfm);
3474 mdev->tconn->verify_tfm = verify_tfm;
3475 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3476 }
3477 if (csums_tfm) {
2ec91e0e
PR
3478 strcpy(new_net_conf->csums_alg, p->csums_alg);
3479 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
91fd4dad
PR
3480 crypto_free_hash(mdev->tconn->csums_tfm);
3481 mdev->tconn->csums_tfm = csums_tfm;
3482 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3483 }
2ec91e0e 3484 rcu_assign_pointer(tconn->net_conf, new_net_conf);
778f271d 3485 }
b411b363
PR
3486 }
3487
813472ce
PR
3488 if (new_disk_conf) {
3489 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3490 put_ldev(mdev);
3491 }
3492
3493 if (new_plan) {
3494 old_plan = mdev->rs_plan_s;
3495 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
b411b363 3496 }
daeda1cc
PR
3497
3498 mutex_unlock(&mdev->tconn->conf_update);
3499 synchronize_rcu();
3500 if (new_net_conf)
3501 kfree(old_net_conf);
3502 kfree(old_disk_conf);
813472ce 3503 kfree(old_plan);
daeda1cc 3504
82bc0194 3505 return 0;
b411b363 3506
813472ce
PR
3507reconnect:
3508 if (new_disk_conf) {
3509 put_ldev(mdev);
3510 kfree(new_disk_conf);
3511 }
3512 mutex_unlock(&mdev->tconn->conf_update);
3513 return -EIO;
3514
b411b363 3515disconnect:
813472ce
PR
3516 kfree(new_plan);
3517 if (new_disk_conf) {
3518 put_ldev(mdev);
3519 kfree(new_disk_conf);
3520 }
a0095508 3521 mutex_unlock(&mdev->tconn->conf_update);
b411b363
PR
3522 /* just for completeness: actually not needed,
3523 * as this is not reached if csums_tfm was ok. */
3524 crypto_free_hash(csums_tfm);
3525 /* but free the verify_tfm again, if csums_tfm did not work out */
3526 crypto_free_hash(verify_tfm);
38fa9988 3527 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3528 return -EIO;
b411b363
PR
3529}
3530
b411b363
PR
3531/* warn if the arguments differ by more than 12.5% */
3532static void warn_if_differ_considerably(struct drbd_conf *mdev,
3533 const char *s, sector_t a, sector_t b)
3534{
3535 sector_t d;
3536 if (a == 0 || b == 0)
3537 return;
3538 d = (a > b) ? (a - b) : (b - a);
3539 if (d > (a>>3) || d > (b>>3))
3540 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3541 (unsigned long long)a, (unsigned long long)b);
3542}
3543
4a76b161 3544static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3545{
4a76b161 3546 struct drbd_conf *mdev;
e658983a 3547 struct p_sizes *p = pi->data;
b411b363 3548 enum determine_dev_size dd = unchanged;
b411b363
PR
3549 sector_t p_size, p_usize, my_usize;
3550 int ldsc = 0; /* local disk size changed */
e89b591c 3551 enum dds_flags ddsf;
b411b363 3552
4a76b161
AG
3553 mdev = vnr_to_mdev(tconn, pi->vnr);
3554 if (!mdev)
3555 return config_unknown_volume(tconn, pi);
3556
b411b363
PR
3557 p_size = be64_to_cpu(p->d_size);
3558 p_usize = be64_to_cpu(p->u_size);
3559
b411b363
PR
3560 /* just store the peer's disk size for now.
3561 * we still need to figure out whether we accept that. */
3562 mdev->p_size = p_size;
3563
b411b363 3564 if (get_ldev(mdev)) {
daeda1cc
PR
3565 rcu_read_lock();
3566 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3567 rcu_read_unlock();
3568
b411b363
PR
3569 warn_if_differ_considerably(mdev, "lower level device sizes",
3570 p_size, drbd_get_max_capacity(mdev->ldev));
3571 warn_if_differ_considerably(mdev, "user requested size",
daeda1cc 3572 p_usize, my_usize);
b411b363
PR
3573
3574 /* if this is the first connect, or an otherwise expected
3575 * param exchange, choose the minimum */
3576 if (mdev->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 3577 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
3578
3579 /* Never shrink a device with usable data during connect.
3580 But allow online shrinking if we are connected. */
ef5e44a6 3581 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
daeda1cc
PR
3582 drbd_get_capacity(mdev->this_bdev) &&
3583 mdev->state.disk >= D_OUTDATED &&
3584 mdev->state.conn < C_CONNECTED) {
b411b363 3585 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3586 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 3587 put_ldev(mdev);
82bc0194 3588 return -EIO;
b411b363 3589 }
daeda1cc
PR
3590
3591 if (my_usize != p_usize) {
3592 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3593
3594 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3595 if (!new_disk_conf) {
3596 dev_err(DEV, "Allocation of new disk_conf failed\n");
3597 put_ldev(mdev);
3598 return -ENOMEM;
3599 }
3600
3601 mutex_lock(&mdev->tconn->conf_update);
3602 old_disk_conf = mdev->ldev->disk_conf;
3603 *new_disk_conf = *old_disk_conf;
3604 new_disk_conf->disk_size = p_usize;
3605
3606 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3607 mutex_unlock(&mdev->tconn->conf_update);
3608 synchronize_rcu();
3609 kfree(old_disk_conf);
3610
3611 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3612 (unsigned long)my_usize);
b411b363 3613 }
daeda1cc 3614
b411b363
PR
3615 put_ldev(mdev);
3616 }
b411b363 3617
e89b591c 3618 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3619 if (get_ldev(mdev)) {
24c4830c 3620 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
3621 put_ldev(mdev);
3622 if (dd == dev_size_error)
82bc0194 3623 return -EIO;
b411b363
PR
3624 drbd_md_sync(mdev);
3625 } else {
3626 /* I am diskless, need to accept the peer's size. */
3627 drbd_set_my_capacity(mdev, p_size);
3628 }
3629
99432fcc
PR
3630 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3631 drbd_reconsider_max_bio_size(mdev);
3632
b411b363
PR
3633 if (get_ldev(mdev)) {
3634 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3635 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3636 ldsc = 1;
3637 }
3638
b411b363
PR
3639 put_ldev(mdev);
3640 }
3641
3642 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3643 if (be64_to_cpu(p->c_size) !=
3644 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3645 /* we have different sizes, probably peer
3646 * needs to know my new size... */
e89b591c 3647 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3648 }
3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3650 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3651 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3652 mdev->state.disk >= D_INCONSISTENT) {
3653 if (ddsf & DDSF_NO_RESYNC)
3654 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3655 else
3656 resync_after_online_grow(mdev);
3657 } else
b411b363
PR
3658 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3659 }
3660 }
3661
82bc0194 3662 return 0;
b411b363
PR
3663}
3664
4a76b161 3665static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3666{
4a76b161 3667 struct drbd_conf *mdev;
e658983a 3668 struct p_uuids *p = pi->data;
b411b363 3669 u64 *p_uuid;
62b0da3a 3670 int i, updated_uuids = 0;
b411b363 3671
4a76b161
AG
3672 mdev = vnr_to_mdev(tconn, pi->vnr);
3673 if (!mdev)
3674 return config_unknown_volume(tconn, pi);
3675
b411b363 3676 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
063eacf8
JW
3677 if (!p_uuid) {
3678 dev_err(DEV, "kmalloc of p_uuid failed\n");
3679 return false;
3680 }
b411b363
PR
3681
3682 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3683 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3684
3685 kfree(mdev->p_uuid);
3686 mdev->p_uuid = p_uuid;
3687
3688 if (mdev->state.conn < C_CONNECTED &&
3689 mdev->state.disk < D_INCONSISTENT &&
3690 mdev->state.role == R_PRIMARY &&
3691 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3692 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3693 (unsigned long long)mdev->ed_uuid);
38fa9988 3694 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3695 return -EIO;
b411b363
PR
3696 }
3697
3698 if (get_ldev(mdev)) {
3699 int skip_initial_sync =
3700 mdev->state.conn == C_CONNECTED &&
31890f4a 3701 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3702 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3703 (p_uuid[UI_FLAGS] & 8);
3704 if (skip_initial_sync) {
3705 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3706 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3707 "clear_n_write from receive_uuids",
3708 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3709 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3710 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3711 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3712 CS_VERBOSE, NULL);
3713 drbd_md_sync(mdev);
62b0da3a 3714 updated_uuids = 1;
b411b363
PR
3715 }
3716 put_ldev(mdev);
18a50fa2
PR
3717 } else if (mdev->state.disk < D_INCONSISTENT &&
3718 mdev->state.role == R_PRIMARY) {
3719 /* I am a diskless primary, the peer just created a new current UUID
3720 for me. */
62b0da3a 3721 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3722 }
3723
3724 /* Before we test for the disk state, we should wait until an eventually
3725 ongoing cluster wide state change is finished. That is important if
3726 we are primary and are detaching from our disk. We need to see the
3727 new disk state... */
8410da8f
PR
3728 mutex_lock(mdev->state_mutex);
3729 mutex_unlock(mdev->state_mutex);
b411b363 3730 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3731 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3732
3733 if (updated_uuids)
3734 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3735
82bc0194 3736 return 0;
b411b363
PR
3737}
3738
3739/**
3740 * convert_state() - Converts the peer's view of the cluster state to our point of view
3741 * @ps: The state as seen by the peer.
3742 */
3743static union drbd_state convert_state(union drbd_state ps)
3744{
3745 union drbd_state ms;
3746
3747 static enum drbd_conns c_tab[] = {
369bea63 3748 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
3749 [C_CONNECTED] = C_CONNECTED,
3750
3751 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3752 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3753 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3754 [C_VERIFY_S] = C_VERIFY_T,
3755 [C_MASK] = C_MASK,
3756 };
3757
3758 ms.i = ps.i;
3759
3760 ms.conn = c_tab[ps.conn];
3761 ms.peer = ps.role;
3762 ms.role = ps.peer;
3763 ms.pdsk = ps.disk;
3764 ms.disk = ps.pdsk;
3765 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3766
3767 return ms;
3768}
3769
4a76b161 3770static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3771{
4a76b161 3772 struct drbd_conf *mdev;
e658983a 3773 struct p_req_state *p = pi->data;
b411b363 3774 union drbd_state mask, val;
bf885f8a 3775 enum drbd_state_rv rv;
b411b363 3776
4a76b161
AG
3777 mdev = vnr_to_mdev(tconn, pi->vnr);
3778 if (!mdev)
3779 return -EIO;
3780
b411b363
PR
3781 mask.i = be32_to_cpu(p->mask);
3782 val.i = be32_to_cpu(p->val);
3783
427c0434 3784 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
8410da8f 3785 mutex_is_locked(mdev->state_mutex)) {
b411b363 3786 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3787 return 0;
b411b363
PR
3788 }
3789
3790 mask = convert_state(mask);
3791 val = convert_state(val);
3792
dfafcc8a
PR
3793 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3794 drbd_send_sr_reply(mdev, rv);
b411b363 3795
b411b363
PR
3796 drbd_md_sync(mdev);
3797
82bc0194 3798 return 0;
b411b363
PR
3799}
3800
e2857216 3801static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3802{
e658983a 3803 struct p_req_state *p = pi->data;
b411b363 3804 union drbd_state mask, val;
bf885f8a 3805 enum drbd_state_rv rv;
b411b363 3806
b411b363
PR
3807 mask.i = be32_to_cpu(p->mask);
3808 val.i = be32_to_cpu(p->val);
3809
427c0434 3810 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
dfafcc8a
PR
3811 mutex_is_locked(&tconn->cstate_mutex)) {
3812 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3813 return 0;
b411b363
PR
3814 }
3815
3816 mask = convert_state(mask);
3817 val = convert_state(val);
3818
778bcf2e 3819 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
dfafcc8a 3820 conn_send_sr_reply(tconn, rv);
b411b363 3821
82bc0194 3822 return 0;
b411b363
PR
3823}
3824
4a76b161 3825static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3826{
4a76b161 3827 struct drbd_conf *mdev;
e658983a 3828 struct p_state *p = pi->data;
4ac4aada 3829 union drbd_state os, ns, peer_state;
b411b363 3830 enum drbd_disk_state real_peer_disk;
65d922c3 3831 enum chg_state_flags cs_flags;
b411b363
PR
3832 int rv;
3833
4a76b161
AG
3834 mdev = vnr_to_mdev(tconn, pi->vnr);
3835 if (!mdev)
3836 return config_unknown_volume(tconn, pi);
3837
b411b363
PR
3838 peer_state.i = be32_to_cpu(p->state);
3839
3840 real_peer_disk = peer_state.disk;
3841 if (peer_state.disk == D_NEGOTIATING) {
3842 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3843 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3844 }
3845
87eeee41 3846 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3847 retry:
78bae59b 3848 os = ns = drbd_read_state(mdev);
87eeee41 3849 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3850
545752d5
LE
3851 /* If some other part of the code (asender thread, timeout)
3852 * already decided to close the connection again,
3853 * we must not "re-establish" it here. */
3854 if (os.conn <= C_TEAR_DOWN)
58ffa580 3855 return -ECONNRESET;
545752d5 3856
40424e4a
LE
3857 /* If this is the "end of sync" confirmation, usually the peer disk
3858 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3859 * set) resync started in PausedSyncT, or if the timing of pause-/
3860 * unpause-sync events has been "just right", the peer disk may
3861 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3862 */
3863 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3864 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
3865 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3866 /* If we are (becoming) SyncSource, but peer is still in sync
3867 * preparation, ignore its uptodate-ness to avoid flapping, it
3868 * will change to inconsistent once the peer reaches active
3869 * syncing states.
3870 * It may have changed syncer-paused flags, however, so we
3871 * cannot ignore this completely. */
3872 if (peer_state.conn > C_CONNECTED &&
3873 peer_state.conn < C_SYNC_SOURCE)
3874 real_peer_disk = D_INCONSISTENT;
3875
3876 /* if peer_state changes to connected at the same time,
3877 * it explicitly notifies us that it finished resync.
3878 * Maybe we should finish it up, too? */
3879 else if (os.conn >= C_SYNC_SOURCE &&
3880 peer_state.conn == C_CONNECTED) {
3881 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3882 drbd_resync_finished(mdev);
82bc0194 3883 return 0;
e9ef7bb6
LE
3884 }
3885 }
3886
02b91b55
LE
3887 /* explicit verify finished notification, stop sector reached. */
3888 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3889 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
58ffa580 3890 ov_out_of_sync_print(mdev);
02b91b55 3891 drbd_resync_finished(mdev);
58ffa580 3892 return 0;
02b91b55
LE
3893 }
3894
e9ef7bb6
LE
3895 /* peer says his disk is inconsistent, while we think it is uptodate,
3896 * and this happens while the peer still thinks we have a sync going on,
3897 * but we think we are already done with the sync.
3898 * We ignore this to avoid flapping pdsk.
3899 * This should not happen, if the peer is a recent version of drbd. */
3900 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3901 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3902 real_peer_disk = D_UP_TO_DATE;
3903
4ac4aada
LE
3904 if (ns.conn == C_WF_REPORT_PARAMS)
3905 ns.conn = C_CONNECTED;
b411b363 3906
67531718
PR
3907 if (peer_state.conn == C_AHEAD)
3908 ns.conn = C_BEHIND;
3909
b411b363
PR
3910 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3911 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3912 int cr; /* consider resync */
3913
3914 /* if we established a new connection */
4ac4aada 3915 cr = (os.conn < C_CONNECTED);
b411b363
PR
3916 /* if we had an established connection
3917 * and one of the nodes newly attaches a disk */
4ac4aada 3918 cr |= (os.conn == C_CONNECTED &&
b411b363 3919 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3920 os.disk == D_NEGOTIATING));
b411b363
PR
3921 /* if we have both been inconsistent, and the peer has been
3922 * forced to be UpToDate with --overwrite-data */
3923 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3924 /* if we had been plain connected, and the admin requested to
3925 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3926 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3927 (peer_state.conn >= C_STARTING_SYNC_S &&
3928 peer_state.conn <= C_WF_BITMAP_T));
3929
3930 if (cr)
4ac4aada 3931 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3932
3933 put_ldev(mdev);
4ac4aada
LE
3934 if (ns.conn == C_MASK) {
3935 ns.conn = C_CONNECTED;
b411b363 3936 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3937 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3938 } else if (peer_state.disk == D_NEGOTIATING) {
3939 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3940 peer_state.disk = D_DISKLESS;
580b9767 3941 real_peer_disk = D_DISKLESS;
b411b363 3942 } else {
8169e41b 3943 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3944 return -EIO;
4ac4aada 3945 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3946 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3947 return -EIO;
b411b363
PR
3948 }
3949 }
3950 }
3951
87eeee41 3952 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b 3953 if (os.i != drbd_read_state(mdev).i)
b411b363
PR
3954 goto retry;
3955 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3956 ns.peer = peer_state.role;
3957 ns.pdsk = real_peer_disk;
3958 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3959 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3960 ns.disk = mdev->new_state_tmp.disk;
4ac4aada 3961 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
2aebfabb 3962 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3963 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3964 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3965 for temporal network outages! */
87eeee41 3966 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3967 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3968 tl_clear(mdev->tconn);
481c6f50
PR
3969 drbd_uuid_new_current(mdev);
3970 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3971 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3972 return -EIO;
481c6f50 3973 }
65d922c3 3974 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
78bae59b 3975 ns = drbd_read_state(mdev);
87eeee41 3976 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3977
3978 if (rv < SS_SUCCESS) {
38fa9988 3979 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3980 return -EIO;
b411b363
PR
3981 }
3982
4ac4aada
LE
3983 if (os.conn > C_WF_REPORT_PARAMS) {
3984 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3985 peer_state.disk != D_NEGOTIATING ) {
3986 /* we want resync, peer has not yet decided to sync... */
3987 /* Nowadays only used when forcing a node into primary role and
3988 setting its disk to UpToDate with that */
3989 drbd_send_uuids(mdev);
f479ea06 3990 drbd_send_current_state(mdev);
b411b363
PR
3991 }
3992 }
3993
08b165ba 3994 clear_bit(DISCARD_MY_DATA, &mdev->flags);
b411b363 3995
cccac985 3996 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
b411b363 3997
82bc0194 3998 return 0;
b411b363
PR
3999}
4000
4a76b161 4001static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4002{
4a76b161 4003 struct drbd_conf *mdev;
e658983a 4004 struct p_rs_uuid *p = pi->data;
4a76b161
AG
4005
4006 mdev = vnr_to_mdev(tconn, pi->vnr);
4007 if (!mdev)
4008 return -EIO;
b411b363
PR
4009
4010 wait_event(mdev->misc_wait,
4011 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 4012 mdev->state.conn == C_BEHIND ||
b411b363
PR
4013 mdev->state.conn < C_CONNECTED ||
4014 mdev->state.disk < D_NEGOTIATING);
4015
4016 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4017
b411b363
PR
4018 /* Here the _drbd_uuid_ functions are right, current should
4019 _not_ be rotated into the history */
4020 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4021 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4022 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4023
62b0da3a 4024 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
4025 drbd_start_resync(mdev, C_SYNC_TARGET);
4026
4027 put_ldev(mdev);
4028 } else
4029 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4030
82bc0194 4031 return 0;
b411b363
PR
4032}
4033
2c46407d
AG
4034/**
4035 * receive_bitmap_plain
4036 *
4037 * Return 0 when done, 1 when another iteration is needed, and a negative error
4038 * code upon failure.
4039 */
4040static int
50d0b1ad 4041receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
e658983a 4042 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4043{
50d0b1ad
AG
4044 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4045 drbd_header_size(mdev->tconn);
e658983a 4046 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4047 c->bm_words - c->word_offset);
e658983a 4048 unsigned int want = num_words * sizeof(*p);
2c46407d 4049 int err;
b411b363 4050
50d0b1ad
AG
4051 if (want != size) {
4052 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4053 return -EIO;
b411b363
PR
4054 }
4055 if (want == 0)
2c46407d 4056 return 0;
e658983a 4057 err = drbd_recv_all(mdev->tconn, p, want);
82bc0194 4058 if (err)
2c46407d 4059 return err;
b411b363 4060
e658983a 4061 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
b411b363
PR
4062
4063 c->word_offset += num_words;
4064 c->bit_offset = c->word_offset * BITS_PER_LONG;
4065 if (c->bit_offset > c->bm_bits)
4066 c->bit_offset = c->bm_bits;
4067
2c46407d 4068 return 1;
b411b363
PR
4069}
4070
a02d1240
AG
4071static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4072{
4073 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4074}
4075
4076static int dcbp_get_start(struct p_compressed_bm *p)
4077{
4078 return (p->encoding & 0x80) != 0;
4079}
4080
4081static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4082{
4083 return (p->encoding >> 4) & 0x7;
4084}
4085
2c46407d
AG
4086/**
4087 * recv_bm_rle_bits
4088 *
4089 * Return 0 when done, 1 when another iteration is needed, and a negative error
4090 * code upon failure.
4091 */
4092static int
b411b363
PR
4093recv_bm_rle_bits(struct drbd_conf *mdev,
4094 struct p_compressed_bm *p,
c6d25cfe
PR
4095 struct bm_xfer_ctx *c,
4096 unsigned int len)
b411b363
PR
4097{
4098 struct bitstream bs;
4099 u64 look_ahead;
4100 u64 rl;
4101 u64 tmp;
4102 unsigned long s = c->bit_offset;
4103 unsigned long e;
a02d1240 4104 int toggle = dcbp_get_start(p);
b411b363
PR
4105 int have;
4106 int bits;
4107
a02d1240 4108 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4109
4110 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4111 if (bits < 0)
2c46407d 4112 return -EIO;
b411b363
PR
4113
4114 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4115 bits = vli_decode_bits(&rl, look_ahead);
4116 if (bits <= 0)
2c46407d 4117 return -EIO;
b411b363
PR
4118
4119 if (toggle) {
4120 e = s + rl -1;
4121 if (e >= c->bm_bits) {
4122 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4123 return -EIO;
b411b363
PR
4124 }
4125 _drbd_bm_set_bits(mdev, s, e);
4126 }
4127
4128 if (have < bits) {
4129 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4130 have, bits, look_ahead,
4131 (unsigned int)(bs.cur.b - p->code),
4132 (unsigned int)bs.buf_len);
2c46407d 4133 return -EIO;
b411b363
PR
4134 }
4135 look_ahead >>= bits;
4136 have -= bits;
4137
4138 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4139 if (bits < 0)
2c46407d 4140 return -EIO;
b411b363
PR
4141 look_ahead |= tmp << have;
4142 have += bits;
4143 }
4144
4145 c->bit_offset = s;
4146 bm_xfer_ctx_bit_to_word_offset(c);
4147
2c46407d 4148 return (s != c->bm_bits);
b411b363
PR
4149}
4150
2c46407d
AG
4151/**
4152 * decode_bitmap_c
4153 *
4154 * Return 0 when done, 1 when another iteration is needed, and a negative error
4155 * code upon failure.
4156 */
4157static int
b411b363
PR
4158decode_bitmap_c(struct drbd_conf *mdev,
4159 struct p_compressed_bm *p,
c6d25cfe
PR
4160 struct bm_xfer_ctx *c,
4161 unsigned int len)
b411b363 4162{
a02d1240 4163 if (dcbp_get_code(p) == RLE_VLI_Bits)
e658983a 4164 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
b411b363
PR
4165
4166 /* other variants had been implemented for evaluation,
4167 * but have been dropped as this one turned out to be "best"
4168 * during all our tests. */
4169
4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 4171 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4172 return -EIO;
b411b363
PR
4173}
4174
4175void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4176 const char *direction, struct bm_xfer_ctx *c)
4177{
4178 /* what would it take to transfer it "plaintext" */
50d0b1ad
AG
4179 unsigned int header_size = drbd_header_size(mdev->tconn);
4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181 unsigned int plain =
4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4183 c->bm_words * sizeof(unsigned long);
4184 unsigned int total = c->bytes[0] + c->bytes[1];
4185 unsigned int r;
b411b363
PR
4186
4187 /* total can not be zero. but just in case: */
4188 if (total == 0)
4189 return;
4190
4191 /* don't report if not compressed */
4192 if (total >= plain)
4193 return;
4194
4195 /* total < plain. check for overflow, still */
4196 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4197 : (1000 * total / plain);
4198
4199 if (r > 1000)
4200 r = 1000;
4201
4202 r = 1000 - r;
4203 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4204 "total %u; compression: %u.%u%%\n",
4205 direction,
4206 c->bytes[1], c->packets[1],
4207 c->bytes[0], c->packets[0],
4208 total, r/10, r % 10);
4209}
4210
4211/* Since we are processing the bitfield from lower addresses to higher,
4212 it does not matter if the process it in 32 bit chunks or 64 bit
4213 chunks as long as it is little endian. (Understand it as byte stream,
4214 beginning with the lowest byte...) If we would use big endian
4215 we would need to process it from the highest address to the lowest,
4216 in order to be agnostic to the 32 vs 64 bits issue.
4217
4218 returns 0 on failure, 1 if we successfully received it. */
4a76b161 4219static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4220{
4a76b161 4221 struct drbd_conf *mdev;
b411b363 4222 struct bm_xfer_ctx c;
2c46407d 4223 int err;
4a76b161
AG
4224
4225 mdev = vnr_to_mdev(tconn, pi->vnr);
4226 if (!mdev)
4227 return -EIO;
b411b363 4228
20ceb2b2
LE
4229 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4230 /* you are supposed to send additional out-of-sync information
4231 * if you actually set bits during this phase */
b411b363 4232
b411b363
PR
4233 c = (struct bm_xfer_ctx) {
4234 .bm_bits = drbd_bm_bits(mdev),
4235 .bm_words = drbd_bm_words(mdev),
4236 };
4237
2c46407d 4238 for(;;) {
e658983a
AG
4239 if (pi->cmd == P_BITMAP)
4240 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4241 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4242 /* MAYBE: sanity check that we speak proto >= 90,
4243 * and the feature is enabled! */
e658983a 4244 struct p_compressed_bm *p = pi->data;
b411b363 4245
50d0b1ad 4246 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
b411b363 4247 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 4248 err = -EIO;
b411b363
PR
4249 goto out;
4250 }
e658983a 4251 if (pi->size <= sizeof(*p)) {
e2857216 4252 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4253 err = -EIO;
78fcbdae 4254 goto out;
b411b363 4255 }
e658983a
AG
4256 err = drbd_recv_all(mdev->tconn, p, pi->size);
4257 if (err)
4258 goto out;
e2857216 4259 err = decode_bitmap_c(mdev, p, &c, pi->size);
b411b363 4260 } else {
e2857216 4261 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4262 err = -EIO;
b411b363
PR
4263 goto out;
4264 }
4265
e2857216 4266 c.packets[pi->cmd == P_BITMAP]++;
50d0b1ad 4267 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
b411b363 4268
2c46407d
AG
4269 if (err <= 0) {
4270 if (err < 0)
4271 goto out;
b411b363 4272 break;
2c46407d 4273 }
e2857216 4274 err = drbd_recv_header(mdev->tconn, pi);
82bc0194 4275 if (err)
b411b363 4276 goto out;
2c46407d 4277 }
b411b363
PR
4278
4279 INFO_bm_xfer_stats(mdev, "receive", &c);
4280
4281 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4282 enum drbd_state_rv rv;
4283
82bc0194
AG
4284 err = drbd_send_bitmap(mdev);
4285 if (err)
b411b363
PR
4286 goto out;
4287 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
4288 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4289 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
4290 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4291 /* admin may have requested C_DISCONNECTING,
4292 * other threads may have noticed network errors */
4293 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4294 drbd_conn_str(mdev->state.conn));
4295 }
82bc0194 4296 err = 0;
b411b363 4297
b411b363 4298 out:
20ceb2b2 4299 drbd_bm_unlock(mdev);
82bc0194 4300 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363 4301 drbd_start_resync(mdev, C_SYNC_SOURCE);
82bc0194 4302 return err;
b411b363
PR
4303}
4304
4a76b161 4305static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4306{
4a76b161 4307 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4308 pi->cmd, pi->size);
b411b363 4309
4a76b161 4310 return ignore_remaining_packet(tconn, pi);
b411b363
PR
4311}
4312
4a76b161 4313static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 4314{
e7f52dfb
LE
4315 /* Make sure we've acked all the TCP data associated
4316 * with the data requests being unplugged */
4a76b161 4317 drbd_tcp_quickack(tconn->data.socket);
0ced55a3 4318
82bc0194 4319 return 0;
0ced55a3
PR
4320}
4321
4a76b161 4322static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
73a01a18 4323{
4a76b161 4324 struct drbd_conf *mdev;
e658983a 4325 struct p_block_desc *p = pi->data;
4a76b161
AG
4326
4327 mdev = vnr_to_mdev(tconn, pi->vnr);
4328 if (!mdev)
4329 return -EIO;
73a01a18 4330
f735e363
LE
4331 switch (mdev->state.conn) {
4332 case C_WF_SYNC_UUID:
4333 case C_WF_BITMAP_T:
4334 case C_BEHIND:
4335 break;
4336 default:
4337 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4338 drbd_conn_str(mdev->state.conn));
4339 }
4340
73a01a18
PR
4341 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4342
82bc0194 4343 return 0;
73a01a18
PR
4344}
4345
02918be2
PR
4346struct data_cmd {
4347 int expect_payload;
4348 size_t pkt_size;
4a76b161 4349 int (*fn)(struct drbd_tconn *, struct packet_info *);
02918be2
PR
4350};
4351
4352static struct data_cmd drbd_cmd_handler[] = {
4353 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4354 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4355 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4356 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4357 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4358 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4359 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4360 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4362 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4363 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4364 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4365 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4366 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4367 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4368 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4369 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4370 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4373 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4374 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4375 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4376 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
b411b363
PR
4377};
4378
eefc2f7d 4379static void drbdd(struct drbd_tconn *tconn)
b411b363 4380{
77351055 4381 struct packet_info pi;
02918be2 4382 size_t shs; /* sub header size */
82bc0194 4383 int err;
b411b363 4384
eefc2f7d 4385 while (get_t_state(&tconn->receiver) == RUNNING) {
deebe195 4386 struct data_cmd *cmd;
b411b363 4387
eefc2f7d 4388 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 4389 if (drbd_recv_header(tconn, &pi))
02918be2 4390 goto err_out;
b411b363 4391
deebe195 4392 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4393 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
2fcb8f30
AG
4394 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4395 cmdname(pi.cmd), pi.cmd);
02918be2 4396 goto err_out;
0b33a916 4397 }
b411b363 4398
e658983a
AG
4399 shs = cmd->pkt_size;
4400 if (pi.size > shs && !cmd->expect_payload) {
2fcb8f30
AG
4401 conn_err(tconn, "No payload expected %s l:%d\n",
4402 cmdname(pi.cmd), pi.size);
02918be2 4403 goto err_out;
b411b363 4404 }
b411b363 4405
c13f7e1a 4406 if (shs) {
e658983a 4407 err = drbd_recv_all_warn(tconn, pi.data, shs);
a5c31904 4408 if (err)
c13f7e1a 4409 goto err_out;
e2857216 4410 pi.size -= shs;
c13f7e1a
LE
4411 }
4412
4a76b161
AG
4413 err = cmd->fn(tconn, &pi);
4414 if (err) {
9f5bdc33
AG
4415 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4416 cmdname(pi.cmd), err, pi.size);
02918be2 4417 goto err_out;
b411b363
PR
4418 }
4419 }
82bc0194 4420 return;
b411b363 4421
82bc0194
AG
4422 err_out:
4423 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4424}
4425
0e29d163 4426void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
4427{
4428 struct drbd_wq_barrier barr;
4429
4430 barr.w.cb = w_prev_work_done;
0e29d163 4431 barr.w.tconn = tconn;
b411b363 4432 init_completion(&barr.done);
d5b27b01 4433 drbd_queue_work(&tconn->sender_work, &barr.w);
b411b363
PR
4434 wait_for_completion(&barr.done);
4435}
4436
81fa2e67 4437static void conn_disconnect(struct drbd_tconn *tconn)
b411b363 4438{
c141ebda 4439 struct drbd_conf *mdev;
bbeb641c 4440 enum drbd_conns oc;
376694a0 4441 int vnr;
b411b363 4442
bbeb641c 4443 if (tconn->cstate == C_STANDALONE)
b411b363 4444 return;
b411b363 4445
545752d5
LE
4446 /* We are about to start the cleanup after connection loss.
4447 * Make sure drbd_make_request knows about that.
4448 * Usually we should be in some network failure state already,
4449 * but just in case we are not, we fix it up here.
4450 */
b8853dbd 4451 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4452
b411b363 4453 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4454 drbd_thread_stop(&tconn->asender);
4455 drbd_free_sock(tconn);
4456
c141ebda
PR
4457 rcu_read_lock();
4458 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4459 kref_get(&mdev->kref);
4460 rcu_read_unlock();
4461 drbd_disconnected(mdev);
4462 kref_put(&mdev->kref, &drbd_minor_destroy);
4463 rcu_read_lock();
4464 }
4465 rcu_read_unlock();
4466
12038a3a
PR
4467 if (!list_empty(&tconn->current_epoch->list))
4468 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4469 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4470 atomic_set(&tconn->current_epoch->epoch_size, 0);
b6dd1a89 4471 tconn->send.seen_any_write_yet = false;
12038a3a 4472
360cc740
PR
4473 conn_info(tconn, "Connection closed\n");
4474
cb703454
PR
4475 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4476 conn_try_outdate_peer_async(tconn);
4477
360cc740 4478 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4479 oc = tconn->cstate;
4480 if (oc >= C_UNCONNECTED)
376694a0 4481 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 4482
360cc740
PR
4483 spin_unlock_irq(&tconn->req_lock);
4484
f3dfa40a 4485 if (oc == C_DISCONNECTING)
d9cc6e23 4486 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
4487}
4488
c141ebda 4489static int drbd_disconnected(struct drbd_conf *mdev)
360cc740 4490{
360cc740 4491 unsigned int i;
b411b363 4492
85719573 4493 /* wait for current activity to cease. */
87eeee41 4494 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4495 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4496 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4497 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4498 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4499
4500 /* We do not have data structures that would allow us to
4501 * get the rs_pending_cnt down to 0 again.
4502 * * On C_SYNC_TARGET we do not have any data structures describing
4503 * the pending RSDataRequest's we have sent.
4504 * * On C_SYNC_SOURCE there is no data structure that tracks
4505 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4506 * And no, it is not the sum of the reference counts in the
4507 * resync_LRU. The resync_LRU tracks the whole operation including
4508 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4509 * on the fly. */
4510 drbd_rs_cancel_all(mdev);
4511 mdev->rs_total = 0;
4512 mdev->rs_failed = 0;
4513 atomic_set(&mdev->rs_pending_cnt, 0);
4514 wake_up(&mdev->misc_wait);
4515
b411b363 4516 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4517 resync_timer_fn((unsigned long)mdev);
4518
b411b363
PR
4519 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4520 * w_make_resync_request etc. which may still be on the worker queue
4521 * to be "canceled" */
4522 drbd_flush_workqueue(mdev);
4523
a990be46 4524 drbd_finish_peer_reqs(mdev);
b411b363 4525
d10b4ea3
PR
4526 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4527 might have issued a work again. The one before drbd_finish_peer_reqs() is
4528 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4529 drbd_flush_workqueue(mdev);
4530
08332d73
LE
4531 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4532 * again via drbd_try_clear_on_disk_bm(). */
4533 drbd_rs_cancel_all(mdev);
b411b363
PR
4534
4535 kfree(mdev->p_uuid);
4536 mdev->p_uuid = NULL;
4537
2aebfabb 4538 if (!drbd_suspended(mdev))
2f5cdd0b 4539 tl_clear(mdev->tconn);
b411b363
PR
4540
4541 drbd_md_sync(mdev);
4542
20ceb2b2
LE
4543 /* serialize with bitmap writeout triggered by the state change,
4544 * if any. */
4545 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4546
b411b363
PR
4547 /* tcp_close and release of sendpage pages can be deferred. I don't
4548 * want to use SO_LINGER, because apparently it can be deferred for
4549 * more than 20 seconds (longest time I checked).
4550 *
4551 * Actually we don't care for exactly when the network stack does its
4552 * put_page(), but release our reference on these pages right here.
4553 */
7721f567 4554 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
4555 if (i)
4556 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4557 i = atomic_read(&mdev->pp_in_use_by_net);
4558 if (i)
4559 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4560 i = atomic_read(&mdev->pp_in_use);
4561 if (i)
45bb912b 4562 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4563
4564 D_ASSERT(list_empty(&mdev->read_ee));
4565 D_ASSERT(list_empty(&mdev->active_ee));
4566 D_ASSERT(list_empty(&mdev->sync_ee));
4567 D_ASSERT(list_empty(&mdev->done_ee));
4568
360cc740 4569 return 0;
b411b363
PR
4570}
4571
4572/*
4573 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4574 * we can agree on is stored in agreed_pro_version.
4575 *
4576 * feature flags and the reserved array should be enough room for future
4577 * enhancements of the handshake protocol, and possible plugins...
4578 *
4579 * for now, they are expected to be zero, but ignored.
4580 */
6038178e 4581static int drbd_send_features(struct drbd_tconn *tconn)
b411b363 4582{
9f5bdc33
AG
4583 struct drbd_socket *sock;
4584 struct p_connection_features *p;
b411b363 4585
9f5bdc33
AG
4586 sock = &tconn->data;
4587 p = conn_prepare_command(tconn, sock);
4588 if (!p)
e8d17b01 4589 return -EIO;
b411b363
PR
4590 memset(p, 0, sizeof(*p));
4591 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4592 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
9f5bdc33 4593 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
4594}
4595
4596/*
4597 * return values:
4598 * 1 yes, we have a valid connection
4599 * 0 oops, did not work out, please try again
4600 * -1 peer talks different language,
4601 * no point in trying again, please go standalone.
4602 */
6038178e 4603static int drbd_do_features(struct drbd_tconn *tconn)
b411b363 4604{
65d11ed6 4605 /* ASSERT current == tconn->receiver ... */
e658983a
AG
4606 struct p_connection_features *p;
4607 const int expect = sizeof(struct p_connection_features);
77351055 4608 struct packet_info pi;
a5c31904 4609 int err;
b411b363 4610
6038178e 4611 err = drbd_send_features(tconn);
e8d17b01 4612 if (err)
b411b363
PR
4613 return 0;
4614
69bc7bc3
AG
4615 err = drbd_recv_header(tconn, &pi);
4616 if (err)
b411b363
PR
4617 return 0;
4618
6038178e
AG
4619 if (pi.cmd != P_CONNECTION_FEATURES) {
4620 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 4621 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4622 return -1;
4623 }
4624
77351055 4625 if (pi.size != expect) {
6038178e 4626 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 4627 expect, pi.size);
b411b363
PR
4628 return -1;
4629 }
4630
e658983a
AG
4631 p = pi.data;
4632 err = drbd_recv_all_warn(tconn, p, expect);
a5c31904 4633 if (err)
b411b363 4634 return 0;
b411b363 4635
b411b363
PR
4636 p->protocol_min = be32_to_cpu(p->protocol_min);
4637 p->protocol_max = be32_to_cpu(p->protocol_max);
4638 if (p->protocol_max == 0)
4639 p->protocol_max = p->protocol_min;
4640
4641 if (PRO_VERSION_MAX < p->protocol_min ||
4642 PRO_VERSION_MIN > p->protocol_max)
4643 goto incompat;
4644
65d11ed6 4645 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4646
65d11ed6
PR
4647 conn_info(tconn, "Handshake successful: "
4648 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4649
4650 return 1;
4651
4652 incompat:
65d11ed6 4653 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4654 "I support %d-%d, peer supports %d-%d\n",
4655 PRO_VERSION_MIN, PRO_VERSION_MAX,
4656 p->protocol_min, p->protocol_max);
4657 return -1;
4658}
4659
4660#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4661static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4662{
ef57f9e6
PR
4663 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4664 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4665 return -1;
b411b363
PR
4666}
4667#else
4668#define CHALLENGE_LEN 64
b10d96cb
JT
4669
4670/* Return value:
4671 1 - auth succeeded,
4672 0 - failed, try again (network error),
4673 -1 - auth failed, don't try again.
4674*/
4675
13e6037d 4676static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4677{
9f5bdc33 4678 struct drbd_socket *sock;
b411b363
PR
4679 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4680 struct scatterlist sg;
4681 char *response = NULL;
4682 char *right_response = NULL;
4683 char *peers_ch = NULL;
44ed167d
PR
4684 unsigned int key_len;
4685 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363
PR
4686 unsigned int resp_size;
4687 struct hash_desc desc;
77351055 4688 struct packet_info pi;
44ed167d 4689 struct net_conf *nc;
69bc7bc3 4690 int err, rv;
b411b363 4691
9f5bdc33 4692 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 4693
44ed167d
PR
4694 rcu_read_lock();
4695 nc = rcu_dereference(tconn->net_conf);
4696 key_len = strlen(nc->shared_secret);
4697 memcpy(secret, nc->shared_secret, key_len);
4698 rcu_read_unlock();
4699
13e6037d 4700 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4701 desc.flags = 0;
4702
44ed167d 4703 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 4704 if (rv) {
13e6037d 4705 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4706 rv = -1;
b411b363
PR
4707 goto fail;
4708 }
4709
4710 get_random_bytes(my_challenge, CHALLENGE_LEN);
4711
9f5bdc33
AG
4712 sock = &tconn->data;
4713 if (!conn_prepare_command(tconn, sock)) {
4714 rv = 0;
4715 goto fail;
4716 }
e658983a 4717 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 4718 my_challenge, CHALLENGE_LEN);
b411b363
PR
4719 if (!rv)
4720 goto fail;
4721
69bc7bc3
AG
4722 err = drbd_recv_header(tconn, &pi);
4723 if (err) {
4724 rv = 0;
b411b363 4725 goto fail;
69bc7bc3 4726 }
b411b363 4727
77351055 4728 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4729 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 4730 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4731 rv = 0;
4732 goto fail;
4733 }
4734
77351055 4735 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4736 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4737 rv = -1;
b411b363
PR
4738 goto fail;
4739 }
4740
77351055 4741 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4742 if (peers_ch == NULL) {
13e6037d 4743 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4744 rv = -1;
b411b363
PR
4745 goto fail;
4746 }
4747
a5c31904
AG
4748 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4749 if (err) {
b411b363
PR
4750 rv = 0;
4751 goto fail;
4752 }
4753
13e6037d 4754 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4755 response = kmalloc(resp_size, GFP_NOIO);
4756 if (response == NULL) {
13e6037d 4757 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4758 rv = -1;
b411b363
PR
4759 goto fail;
4760 }
4761
4762 sg_init_table(&sg, 1);
77351055 4763 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4764
4765 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4766 if (rv) {
13e6037d 4767 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4768 rv = -1;
b411b363
PR
4769 goto fail;
4770 }
4771
9f5bdc33
AG
4772 if (!conn_prepare_command(tconn, sock)) {
4773 rv = 0;
b411b363 4774 goto fail;
9f5bdc33 4775 }
e658983a 4776 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 4777 response, resp_size);
b411b363
PR
4778 if (!rv)
4779 goto fail;
4780
69bc7bc3
AG
4781 err = drbd_recv_header(tconn, &pi);
4782 if (err) {
b411b363
PR
4783 rv = 0;
4784 goto fail;
4785 }
4786
77351055 4787 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4788 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 4789 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4790 rv = 0;
4791 goto fail;
4792 }
4793
77351055 4794 if (pi.size != resp_size) {
13e6037d 4795 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4796 rv = 0;
4797 goto fail;
4798 }
b411b363 4799
a5c31904
AG
4800 err = drbd_recv_all_warn(tconn, response , resp_size);
4801 if (err) {
b411b363
PR
4802 rv = 0;
4803 goto fail;
4804 }
4805
4806 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4807 if (right_response == NULL) {
13e6037d 4808 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4809 rv = -1;
b411b363
PR
4810 goto fail;
4811 }
4812
4813 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4814
4815 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4816 if (rv) {
13e6037d 4817 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4818 rv = -1;
b411b363
PR
4819 goto fail;
4820 }
4821
4822 rv = !memcmp(response, right_response, resp_size);
4823
4824 if (rv)
44ed167d
PR
4825 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4826 resp_size);
b10d96cb
JT
4827 else
4828 rv = -1;
b411b363
PR
4829
4830 fail:
4831 kfree(peers_ch);
4832 kfree(response);
4833 kfree(right_response);
4834
4835 return rv;
4836}
4837#endif
4838
4839int drbdd_init(struct drbd_thread *thi)
4840{
392c8801 4841 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4842 int h;
4843
4d641dd7 4844 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4845
4846 do {
81fa2e67 4847 h = conn_connect(tconn);
b411b363 4848 if (h == 0) {
81fa2e67 4849 conn_disconnect(tconn);
20ee6390 4850 schedule_timeout_interruptible(HZ);
b411b363
PR
4851 }
4852 if (h == -1) {
4d641dd7 4853 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4854 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4855 }
4856 } while (h == 0);
4857
91fd4dad
PR
4858 if (h > 0)
4859 drbdd(tconn);
b411b363 4860
81fa2e67 4861 conn_disconnect(tconn);
b411b363 4862
4d641dd7 4863 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4864 return 0;
4865}
4866
4867/* ********* acknowledge sender ******** */
4868
e05e1e59 4869static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4870{
e658983a 4871 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
4872 int retcode = be32_to_cpu(p->retcode);
4873
4874 if (retcode >= SS_SUCCESS) {
4875 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4876 } else {
4877 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4878 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4879 drbd_set_st_err_str(retcode), retcode);
4880 }
4881 wake_up(&tconn->ping_wait);
4882
2735a594 4883 return 0;
e4f78ede 4884}
b411b363 4885
1952e916 4886static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4887{
1952e916 4888 struct drbd_conf *mdev;
e658983a 4889 struct p_req_state_reply *p = pi->data;
b411b363
PR
4890 int retcode = be32_to_cpu(p->retcode);
4891
1952e916
AG
4892 mdev = vnr_to_mdev(tconn, pi->vnr);
4893 if (!mdev)
2735a594 4894 return -EIO;
1952e916 4895
4d0fc3fd
PR
4896 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4897 D_ASSERT(tconn->agreed_pro_version < 100);
4898 return got_conn_RqSReply(tconn, pi);
4899 }
4900
b411b363 4901 if (retcode >= SS_SUCCESS) {
e4f78ede 4902 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
b411b363 4903 } else {
e4f78ede 4904 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
b411b363 4905 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 4906 drbd_set_st_err_str(retcode), retcode);
b411b363
PR
4907 }
4908 wake_up(&mdev->state_wait);
4909
2735a594 4910 return 0;
b411b363
PR
4911}
4912
e05e1e59 4913static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4914{
2735a594 4915 return drbd_send_ping_ack(tconn);
b411b363
PR
4916
4917}
4918
e05e1e59 4919static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363
PR
4920{
4921 /* restore idle timeout */
2a67d8b9
PR
4922 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4923 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4924 wake_up(&tconn->ping_wait);
b411b363 4925
2735a594 4926 return 0;
b411b363
PR
4927}
4928
1952e916 4929static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4930{
1952e916 4931 struct drbd_conf *mdev;
e658983a 4932 struct p_block_ack *p = pi->data;
b411b363
PR
4933 sector_t sector = be64_to_cpu(p->sector);
4934 int blksize = be32_to_cpu(p->blksize);
4935
1952e916
AG
4936 mdev = vnr_to_mdev(tconn, pi->vnr);
4937 if (!mdev)
2735a594 4938 return -EIO;
1952e916 4939
31890f4a 4940 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4941
4942 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4943
1d53f09e
LE
4944 if (get_ldev(mdev)) {
4945 drbd_rs_complete_io(mdev, sector);
4946 drbd_set_in_sync(mdev, sector, blksize);
4947 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4948 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4949 put_ldev(mdev);
4950 }
b411b363 4951 dec_rs_pending(mdev);
778f271d 4952 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4953
2735a594 4954 return 0;
b411b363
PR
4955}
4956
bc9c5c41
AG
4957static int
4958validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4959 struct rb_root *root, const char *func,
4960 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4961{
4962 struct drbd_request *req;
4963 struct bio_and_error m;
4964
87eeee41 4965 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4966 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4967 if (unlikely(!req)) {
87eeee41 4968 spin_unlock_irq(&mdev->tconn->req_lock);
85997675 4969 return -EIO;
b411b363
PR
4970 }
4971 __req_mod(req, what, &m);
87eeee41 4972 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4973
4974 if (m.bio)
4975 complete_master_bio(mdev, &m);
85997675 4976 return 0;
b411b363
PR
4977}
4978
1952e916 4979static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4980{
1952e916 4981 struct drbd_conf *mdev;
e658983a 4982 struct p_block_ack *p = pi->data;
b411b363
PR
4983 sector_t sector = be64_to_cpu(p->sector);
4984 int blksize = be32_to_cpu(p->blksize);
4985 enum drbd_req_event what;
4986
1952e916
AG
4987 mdev = vnr_to_mdev(tconn, pi->vnr);
4988 if (!mdev)
2735a594 4989 return -EIO;
1952e916 4990
b411b363
PR
4991 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4992
579b57ed 4993 if (p->block_id == ID_SYNCER) {
b411b363
PR
4994 drbd_set_in_sync(mdev, sector, blksize);
4995 dec_rs_pending(mdev);
2735a594 4996 return 0;
b411b363 4997 }
e05e1e59 4998 switch (pi->cmd) {
b411b363 4999 case P_RS_WRITE_ACK:
8554df1c 5000 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5001 break;
5002 case P_WRITE_ACK:
8554df1c 5003 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5004 break;
5005 case P_RECV_ACK:
8554df1c 5006 what = RECV_ACKED_BY_PEER;
b411b363 5007 break;
d4dabbe2
LE
5008 case P_SUPERSEDED:
5009 what = CONFLICT_RESOLVED;
b411b363 5010 break;
7be8da07 5011 case P_RETRY_WRITE:
7be8da07 5012 what = POSTPONE_WRITE;
b411b363
PR
5013 break;
5014 default:
2735a594 5015 BUG();
b411b363
PR
5016 }
5017
5018 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5019 &mdev->write_requests, __func__,
5020 what, false);
b411b363
PR
5021}
5022
1952e916 5023static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5024{
1952e916 5025 struct drbd_conf *mdev;
e658983a 5026 struct p_block_ack *p = pi->data;
b411b363 5027 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5028 int size = be32_to_cpu(p->blksize);
85997675 5029 int err;
b411b363 5030
1952e916
AG
5031 mdev = vnr_to_mdev(tconn, pi->vnr);
5032 if (!mdev)
2735a594 5033 return -EIO;
b411b363
PR
5034
5035 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5036
579b57ed 5037 if (p->block_id == ID_SYNCER) {
b411b363
PR
5038 dec_rs_pending(mdev);
5039 drbd_rs_failed_io(mdev, sector, size);
2735a594 5040 return 0;
b411b363 5041 }
2deb8336 5042
85997675
AG
5043 err = validate_req_change_req_state(mdev, p->block_id, sector,
5044 &mdev->write_requests, __func__,
303d1448 5045 NEG_ACKED, true);
85997675 5046 if (err) {
c3afd8f5
AG
5047 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5048 The master bio might already be completed, therefore the
5049 request is no longer in the collision hash. */
5050 /* In Protocol B we might already have got a P_RECV_ACK
5051 but then get a P_NEG_ACK afterwards. */
c3afd8f5 5052 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 5053 }
2735a594 5054 return 0;
b411b363
PR
5055}
5056
1952e916 5057static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5058{
1952e916 5059 struct drbd_conf *mdev;
e658983a 5060 struct p_block_ack *p = pi->data;
b411b363
PR
5061 sector_t sector = be64_to_cpu(p->sector);
5062
1952e916
AG
5063 mdev = vnr_to_mdev(tconn, pi->vnr);
5064 if (!mdev)
2735a594 5065 return -EIO;
1952e916 5066
b411b363 5067 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 5068
380207d0 5069 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5070 (unsigned long long)sector, be32_to_cpu(p->blksize));
5071
5072 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5073 &mdev->read_requests, __func__,
5074 NEG_ACKED, false);
b411b363
PR
5075}
5076
1952e916 5077static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5078{
1952e916 5079 struct drbd_conf *mdev;
b411b363
PR
5080 sector_t sector;
5081 int size;
e658983a 5082 struct p_block_ack *p = pi->data;
1952e916
AG
5083
5084 mdev = vnr_to_mdev(tconn, pi->vnr);
5085 if (!mdev)
2735a594 5086 return -EIO;
b411b363
PR
5087
5088 sector = be64_to_cpu(p->sector);
5089 size = be32_to_cpu(p->blksize);
b411b363
PR
5090
5091 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5092
5093 dec_rs_pending(mdev);
5094
5095 if (get_ldev_if_state(mdev, D_FAILED)) {
5096 drbd_rs_complete_io(mdev, sector);
e05e1e59 5097 switch (pi->cmd) {
d612d309
PR
5098 case P_NEG_RS_DREPLY:
5099 drbd_rs_failed_io(mdev, sector, size);
5100 case P_RS_CANCEL:
5101 break;
5102 default:
2735a594 5103 BUG();
d612d309 5104 }
b411b363
PR
5105 put_ldev(mdev);
5106 }
5107
2735a594 5108 return 0;
b411b363
PR
5109}
5110
1952e916 5111static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5112{
e658983a 5113 struct p_barrier_ack *p = pi->data;
9ed57dcb
LE
5114 struct drbd_conf *mdev;
5115 int vnr;
1952e916 5116
9ed57dcb 5117 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 5118
9ed57dcb
LE
5119 rcu_read_lock();
5120 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5121 if (mdev->state.conn == C_AHEAD &&
5122 atomic_read(&mdev->ap_in_flight) == 0 &&
5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5124 mdev->start_resync_timer.expires = jiffies + HZ;
5125 add_timer(&mdev->start_resync_timer);
5126 }
c4752ef1 5127 }
9ed57dcb 5128 rcu_read_unlock();
c4752ef1 5129
2735a594 5130 return 0;
b411b363
PR
5131}
5132
1952e916 5133static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5134{
1952e916 5135 struct drbd_conf *mdev;
e658983a 5136 struct p_block_ack *p = pi->data;
b411b363
PR
5137 struct drbd_work *w;
5138 sector_t sector;
5139 int size;
5140
1952e916
AG
5141 mdev = vnr_to_mdev(tconn, pi->vnr);
5142 if (!mdev)
2735a594 5143 return -EIO;
1952e916 5144
b411b363
PR
5145 sector = be64_to_cpu(p->sector);
5146 size = be32_to_cpu(p->blksize);
5147
5148 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5149
5150 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 5151 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 5152 else
8f7bed77 5153 ov_out_of_sync_print(mdev);
b411b363 5154
1d53f09e 5155 if (!get_ldev(mdev))
2735a594 5156 return 0;
1d53f09e 5157
b411b363
PR
5158 drbd_rs_complete_io(mdev, sector);
5159 dec_rs_pending(mdev);
5160
ea5442af
LE
5161 --mdev->ov_left;
5162
5163 /* let's advance progress step marks only for every other megabyte */
5164 if ((mdev->ov_left & 0x200) == 0x200)
5165 drbd_advance_rs_marks(mdev, mdev->ov_left);
5166
5167 if (mdev->ov_left == 0) {
b411b363
PR
5168 w = kmalloc(sizeof(*w), GFP_NOIO);
5169 if (w) {
5170 w->cb = w_ov_finished;
a21e9298 5171 w->mdev = mdev;
d5b27b01 5172 drbd_queue_work(&mdev->tconn->sender_work, w);
b411b363
PR
5173 } else {
5174 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 5175 ov_out_of_sync_print(mdev);
b411b363
PR
5176 drbd_resync_finished(mdev);
5177 }
5178 }
1d53f09e 5179 put_ldev(mdev);
2735a594 5180 return 0;
b411b363
PR
5181}
5182
1952e916 5183static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 5184{
2735a594 5185 return 0;
b411b363
PR
5186}
5187
a990be46 5188static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
0ced55a3 5189{
082a3439 5190 struct drbd_conf *mdev;
c141ebda 5191 int vnr, not_empty = 0;
32862ec7
PR
5192
5193 do {
5194 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5195 flush_signals(current);
c141ebda
PR
5196
5197 rcu_read_lock();
5198 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5199 kref_get(&mdev->kref);
5200 rcu_read_unlock();
d3fcb490 5201 if (drbd_finish_peer_reqs(mdev)) {
c141ebda
PR
5202 kref_put(&mdev->kref, &drbd_minor_destroy);
5203 return 1;
d3fcb490 5204 }
c141ebda
PR
5205 kref_put(&mdev->kref, &drbd_minor_destroy);
5206 rcu_read_lock();
082a3439 5207 }
32862ec7 5208 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
5209
5210 spin_lock_irq(&tconn->req_lock);
c141ebda 5211 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
082a3439
PR
5212 not_empty = !list_empty(&mdev->done_ee);
5213 if (not_empty)
5214 break;
5215 }
5216 spin_unlock_irq(&tconn->req_lock);
c141ebda 5217 rcu_read_unlock();
32862ec7
PR
5218 } while (not_empty);
5219
5220 return 0;
0ced55a3
PR
5221}
5222
b411b363
PR
5223struct asender_cmd {
5224 size_t pkt_size;
1952e916 5225 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
b411b363
PR
5226};
5227
7201b972 5228static struct asender_cmd asender_tbl[] = {
e658983a
AG
5229 [P_PING] = { 0, got_Ping },
5230 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5231 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5232 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5233 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5234 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5235 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5236 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5237 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5238 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5239 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5240 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5241 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5242 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5243 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5244 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5245 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5246};
b411b363
PR
5247
5248int drbd_asender(struct drbd_thread *thi)
5249{
392c8801 5250 struct drbd_tconn *tconn = thi->tconn;
b411b363 5251 struct asender_cmd *cmd = NULL;
77351055 5252 struct packet_info pi;
257d0af6 5253 int rv;
e658983a 5254 void *buf = tconn->meta.rbuf;
b411b363 5255 int received = 0;
52b061a4
AG
5256 unsigned int header_size = drbd_header_size(tconn);
5257 int expect = header_size;
44ed167d
PR
5258 bool ping_timeout_active = false;
5259 struct net_conf *nc;
bb77d34e 5260 int ping_timeo, tcp_cork, ping_int;
3990e04d 5261 struct sched_param param = { .sched_priority = 2 };
b411b363 5262
3990e04d
PR
5263 rv = sched_setscheduler(current, SCHED_RR, &param);
5264 if (rv < 0)
5265 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
b411b363 5266
e77a0a5c 5267 while (get_t_state(thi) == RUNNING) {
80822284 5268 drbd_thread_current_set_cpu(thi);
b411b363 5269
44ed167d
PR
5270 rcu_read_lock();
5271 nc = rcu_dereference(tconn->net_conf);
5272 ping_timeo = nc->ping_timeo;
bb77d34e 5273 tcp_cork = nc->tcp_cork;
44ed167d
PR
5274 ping_int = nc->ping_int;
5275 rcu_read_unlock();
5276
32862ec7 5277 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
a17647aa 5278 if (drbd_send_ping(tconn)) {
32862ec7 5279 conn_err(tconn, "drbd_send_ping has failed\n");
b411b363 5280 goto reconnect;
841ce241 5281 }
44ed167d
PR
5282 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5283 ping_timeout_active = true;
b411b363
PR
5284 }
5285
32862ec7
PR
5286 /* TODO: conditionally cork; it may hurt latency if we cork without
5287 much to send */
bb77d34e 5288 if (tcp_cork)
32862ec7 5289 drbd_tcp_cork(tconn->meta.socket);
a990be46
AG
5290 if (tconn_finish_peer_reqs(tconn)) {
5291 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
32862ec7 5292 goto reconnect;
b411b363
PR
5293 }
5294 /* but unconditionally uncork unless disabled */
bb77d34e 5295 if (tcp_cork)
32862ec7 5296 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
5297
5298 /* short circuit, recv_msg would return EINTR anyways. */
5299 if (signal_pending(current))
5300 continue;
5301
32862ec7
PR
5302 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5303 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
5304
5305 flush_signals(current);
5306
5307 /* Note:
5308 * -EINTR (on meta) we got a signal
5309 * -EAGAIN (on meta) rcvtimeo expired
5310 * -ECONNRESET other side closed the connection
5311 * -ERESTARTSYS (on data) we got a signal
5312 * rv < 0 other than above: unexpected error!
5313 * rv == expected: full header or command
5314 * rv < expected: "woken" by signal during receive
5315 * rv == 0 : "connection shut down by peer"
5316 */
5317 if (likely(rv > 0)) {
5318 received += rv;
5319 buf += rv;
5320 } else if (rv == 0) {
b66623e3
PR
5321 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5322 long t;
5323 rcu_read_lock();
5324 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5325 rcu_read_unlock();
5326
5327 t = wait_event_timeout(tconn->ping_wait,
5328 tconn->cstate < C_WF_REPORT_PARAMS,
5329 t);
599377ac
PR
5330 if (t)
5331 break;
5332 }
32862ec7 5333 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
5334 goto reconnect;
5335 } else if (rv == -EAGAIN) {
cb6518cb
LE
5336 /* If the data socket received something meanwhile,
5337 * that is good enough: peer is still alive. */
32862ec7
PR
5338 if (time_after(tconn->last_received,
5339 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 5340 continue;
f36af18c 5341 if (ping_timeout_active) {
32862ec7 5342 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
5343 goto reconnect;
5344 }
32862ec7 5345 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
5346 continue;
5347 } else if (rv == -EINTR) {
5348 continue;
5349 } else {
32862ec7 5350 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5351 goto reconnect;
5352 }
5353
5354 if (received == expect && cmd == NULL) {
e658983a 5355 if (decode_header(tconn, tconn->meta.rbuf, &pi))
b411b363 5356 goto reconnect;
7201b972 5357 cmd = &asender_tbl[pi.cmd];
1952e916 5358 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
2fcb8f30
AG
5359 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5360 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5361 goto disconnect;
5362 }
e658983a 5363 expect = header_size + cmd->pkt_size;
52b061a4 5364 if (pi.size != expect - header_size) {
32862ec7 5365 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5366 pi.cmd, pi.size);
b411b363 5367 goto reconnect;
257d0af6 5368 }
b411b363
PR
5369 }
5370 if (received == expect) {
2735a594 5371 bool err;
a4fbda8e 5372
2735a594
AG
5373 err = cmd->fn(tconn, &pi);
5374 if (err) {
1952e916 5375 conn_err(tconn, "%pf failed\n", cmd->fn);
b411b363 5376 goto reconnect;
1952e916 5377 }
b411b363 5378
a4fbda8e 5379 tconn->last_received = jiffies;
f36af18c 5380
44ed167d
PR
5381 if (cmd == &asender_tbl[P_PING_ACK]) {
5382 /* restore idle timeout */
5383 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5384 ping_timeout_active = false;
5385 }
f36af18c 5386
e658983a 5387 buf = tconn->meta.rbuf;
b411b363 5388 received = 0;
52b061a4 5389 expect = header_size;
b411b363
PR
5390 cmd = NULL;
5391 }
5392 }
5393
5394 if (0) {
5395reconnect:
bbeb641c 5396 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
19fffd7b 5397 conn_md_sync(tconn);
b411b363
PR
5398 }
5399 if (0) {
5400disconnect:
bbeb641c 5401 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5402 }
32862ec7 5403 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 5404
32862ec7 5405 conn_info(tconn, "asender terminated\n");
b411b363
PR
5406
5407 return 0;
5408}