]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
drbd: use the cached meta_dev_idx
[mirror_ubuntu-bionic-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
e2857216
AG
53 unsigned int size;
54 unsigned int vnr;
e658983a 55 void *data;
77351055
PR
56};
57
b411b363
PR
58enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
6038178e 64static int drbd_do_features(struct drbd_tconn *tconn);
13e6037d 65static int drbd_do_auth(struct drbd_tconn *tconn);
c141ebda 66static int drbd_disconnected(struct drbd_conf *mdev);
b411b363 67
1e9dd291 68static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
99920dc5 69static int e_end_block(struct drbd_work *, int);
b411b363 70
b411b363
PR
71
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
45bb912b
LE
74/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
23ce4227
PR
92
93 if (!page)
94 return NULL;
95
45bb912b
LE
96 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
18c2d522
AG
153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
b411b363
PR
155{
156 struct page *page = NULL;
45bb912b 157 struct page *tmp = NULL;
18c2d522 158 unsigned int i = 0;
b411b363
PR
159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
45bb912b 162 if (drbd_pp_vacant >= number) {
b411b363 163 spin_lock(&drbd_pp_lock);
45bb912b
LE
164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
b411b363 167 spin_unlock(&drbd_pp_lock);
45bb912b
LE
168 if (page)
169 return page;
b411b363 170 }
45bb912b 171
b411b363
PR
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
45bb912b
LE
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
c37c8ecf 187 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
b411b363
PR
197}
198
a990be46
AG
199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
b411b363 201{
db830c46 202 struct drbd_peer_request *peer_req;
b411b363
PR
203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46 211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
045417f7 212 if (drbd_peer_req_has_active_page(peer_req))
b411b363
PR
213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
db830c46 221 struct drbd_peer_request *peer_req, *t;
b411b363 222
87eeee41 223 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
87eeee41 225 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 226
db830c46 227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 228 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
229}
230
231/**
c37c8ecf 232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b411b363 233 * @mdev: DRBD device.
45bb912b
LE
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
236 *
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 240 *
45bb912b 241 * Returns a page chain linked via page->private.
b411b363 242 */
c37c8ecf
AG
243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
b411b363
PR
245{
246 struct page *page = NULL;
44ed167d 247 struct net_conf *nc;
b411b363 248 DEFINE_WAIT(wait);
44ed167d 249 int mxb;
b411b363 250
45bb912b
LE
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
44ed167d
PR
253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
18c2d522 259 page = __drbd_alloc_pages(mdev, number);
b411b363 260
45bb912b 261 while (page == NULL) {
b411b363
PR
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
44ed167d 266 if (atomic_read(&mdev->pp_in_use) < mxb) {
18c2d522 267 page = __drbd_alloc_pages(mdev, number);
b411b363
PR
268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
c37c8ecf 276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
b411b363
PR
277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
45bb912b
LE
284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
286 return page;
287}
288
c37c8ecf 289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
87eeee41 290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
5cc287e0 293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 294{
435f0740 295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 296 int i;
435f0740 297
a73ff323
LE
298 if (page == NULL)
299 return;
300
81a5d60e 301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
b411b363 310 }
435f0740 311 i = atomic_sub_return(i, a);
45bb912b 312 if (i < 0)
435f0740
LE
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
3967deb1 323 drbd_free_peer_req()
0db55363 324 drbd_alloc_peer_req()
7721f567 325 drbd_free_peer_reqs()
b411b363 326 drbd_ee_fix_bhs()
a990be46 327 drbd_finish_peer_reqs()
b411b363
PR
328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
f6ffca9f 332struct drbd_peer_request *
0db55363
AG
333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 335{
db830c46 336 struct drbd_peer_request *peer_req;
a73ff323 337 struct page *page = NULL;
45bb912b 338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 339
0cf9d27e 340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
341 return NULL;
342
db830c46
AG
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
b411b363 345 if (!(gfp_mask & __GFP_NOWARN))
0db55363 346 dev_err(DEV, "%s: allocation failed\n", __func__);
b411b363
PR
347 return NULL;
348 }
349
a73ff323 350 if (data_size) {
81a3537a 351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
a73ff323
LE
352 if (!page)
353 goto fail;
354 }
b411b363 355
db830c46
AG
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
361
362 peer_req->epoch = NULL;
a21e9298 363 peer_req->w.mdev = mdev;
db830c46
AG
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
9a8e7753
AG
367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
db830c46 371 peer_req->block_id = id;
b411b363 372
db830c46 373 return peer_req;
b411b363 374
45bb912b 375 fail:
db830c46 376 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
377 return NULL;
378}
379
3967deb1 380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 381 int is_net)
b411b363 382{
db830c46
AG
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
5cc287e0 385 drbd_free_pages(mdev, peer_req->pages, is_net);
db830c46
AG
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
389}
390
7721f567 391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
b411b363
PR
392{
393 LIST_HEAD(work_list);
db830c46 394 struct drbd_peer_request *peer_req, *t;
b411b363 395 int count = 0;
435f0740 396 int is_net = list == &mdev->net_ee;
b411b363 397
87eeee41 398 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 399 list_splice_init(list, &work_list);
87eeee41 400 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 401
db830c46 402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
3967deb1 403 __drbd_free_peer_req(mdev, peer_req, is_net);
b411b363
PR
404 count++;
405 }
406 return count;
407}
408
b411b363 409/*
a990be46 410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 411 */
a990be46 412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
db830c46 416 struct drbd_peer_request *peer_req, *t;
e2b3032b 417 int err = 0;
b411b363 418
87eeee41 419 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
b411b363 421 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 422 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 423
db830c46 424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 425 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
426
427 /* possible callbacks here:
d4dabbe2 428 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
429 * all ignore the last argument.
430 */
db830c46 431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
432 int err2;
433
b411b363 434 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
3967deb1 438 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
439 }
440 wake_up(&mdev->ee_wait);
441
e2b3032b 442 return err;
b411b363
PR
443}
444
d4da1537
AG
445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
b411b363
PR
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 454 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 455 io_schedule();
b411b363 456 finish_wait(&mdev->ee_wait, &wait);
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
458 }
459}
460
d4da1537
AG
461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
b411b363 463{
87eeee41 464 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 465 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 466 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
467}
468
dbd9eea0 469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
de0ff338 491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363 492{
b411b363
PR
493 int rv;
494
1393b59f 495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
b411b363 496
dbd0820c
PR
497 if (rv < 0) {
498 if (rv == -ECONNRESET)
155522df 499 conn_info(tconn, "sock was reset by peer\n");
dbd0820c 500 else if (rv != -ERESTARTSYS)
155522df 501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
dbd0820c 502 } else if (rv == 0) {
b66623e3
PR
503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
599377ac
PR
511 if (t)
512 goto out;
513 }
b66623e3 514 conn_info(tconn, "sock was shut down by peer\n");
599377ac
PR
515 }
516
b411b363 517 if (rv != size)
bbeb641c 518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 519
599377ac 520out:
b411b363
PR
521 return rv;
522}
523
c6967746
AG
524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
a5c31904
AG
537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
5dbf1673
LE
547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
eac3e990 566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
44ed167d
PR
571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
69ef82de 574 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
575 int disconnect_on_error = 1;
576
44ed167d
PR
577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
b411b363 581 return NULL;
44ed167d 582 }
44ed167d
PR
583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
69ef82de 585 connect_int = nc->connect_int;
089c075d 586 rcu_read_unlock();
44ed167d 587
089c075d
AG
588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
44ed167d 590
089c075d 591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
44ed167d
PR
592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
089c075d
AG
596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
b411b363
PR
598
599 what = "sock_create_kern";
44ed167d
PR
600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
69ef82de 608 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
b411b363 618 what = "bind before connect";
44ed167d 619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
44ed167d 627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
eac3e990 645 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
646 }
647 if (disconnect_on_error)
bbeb641c 648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 649 }
44ed167d 650
b411b363
PR
651 return sock;
652}
653
7a426fd8
PR
654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
715306f6 662static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
663{
664 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 665 void (*state_change)(struct sock *sk);
7a426fd8 666
715306f6
AG
667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
7a426fd8
PR
671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 674{
1f3e509b 675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 676 struct sockaddr_in6 my_addr;
1f3e509b 677 struct socket *s_listen;
44ed167d 678 struct net_conf *nc;
b411b363
PR
679 const char *what;
680
44ed167d
PR
681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
7a426fd8 685 return -EIO;
44ed167d 686 }
44ed167d
PR
687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
44ed167d 689 rcu_read_unlock();
b411b363 690
089c075d
AG
691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
b411b363
PR
693
694 what = "sock_create_kern";
44ed167d 695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
98683650 702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
704
705 what = "bind before listen";
44ed167d 706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
707 if (err < 0)
708 goto out;
709
7a426fd8
PR
710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 713 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 716
2820fd39
PR
717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
7a426fd8 722 return 0;
b411b363
PR
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b
PR
728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
b411b363 732
7a426fd8 733 return -EIO;
b411b363
PR
734}
735
715306f6 736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 737{
715306f6
AG
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
742}
743
7a426fd8 744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 745{
1f3e509b
PR
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
1f3e509b
PR
748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
760 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
761
7a426fd8
PR
762 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
763 if (err <= 0)
764 return NULL;
b411b363 765
7a426fd8 766 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
767 if (err < 0) {
768 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b 769 conn_err(tconn, "accept failed, err = %d\n", err);
bbeb641c 770 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
771 }
772 }
b411b363 773
715306f6
AG
774 if (s_estab)
775 unregister_state_change(s_estab->sk, ad);
b411b363 776
b411b363
PR
777 return s_estab;
778}
b411b363 779
e658983a 780static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
b411b363 781
9f5bdc33
AG
782static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
783 enum drbd_packet cmd)
784{
785 if (!conn_prepare_command(tconn, sock))
786 return -EIO;
e658983a 787 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
b411b363
PR
788}
789
9f5bdc33 790static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
b411b363 791{
9f5bdc33
AG
792 unsigned int header_size = drbd_header_size(tconn);
793 struct packet_info pi;
794 int err;
b411b363 795
9f5bdc33
AG
796 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
797 if (err != header_size) {
798 if (err >= 0)
799 err = -EIO;
800 return err;
801 }
802 err = decode_header(tconn, tconn->data.rbuf, &pi);
803 if (err)
804 return err;
805 return pi.cmd;
b411b363
PR
806}
807
808/**
809 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
810 * @sock: pointer to the pointer to the socket.
811 */
dbd9eea0 812static int drbd_socket_okay(struct socket **sock)
b411b363
PR
813{
814 int rr;
815 char tb[4];
816
817 if (!*sock)
81e84650 818 return false;
b411b363 819
dbd9eea0 820 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
821
822 if (rr > 0 || rr == -EAGAIN) {
81e84650 823 return true;
b411b363
PR
824 } else {
825 sock_release(*sock);
826 *sock = NULL;
81e84650 827 return false;
b411b363
PR
828 }
829}
2325eb66
PR
830/* Gets called if a connection is established, or if a new minor gets created
831 in a connection */
c141ebda 832int drbd_connected(struct drbd_conf *mdev)
907599e0 833{
0829f5ed 834 int err;
907599e0
PR
835
836 atomic_set(&mdev->packet_seq, 0);
837 mdev->peer_seq = 0;
838
8410da8f
PR
839 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
840 &mdev->tconn->cstate_mutex :
841 &mdev->own_state_mutex;
842
0829f5ed
AG
843 err = drbd_send_sync_param(mdev);
844 if (!err)
845 err = drbd_send_sizes(mdev, 0, 0);
846 if (!err)
847 err = drbd_send_uuids(mdev);
848 if (!err)
43de7c85 849 err = drbd_send_current_state(mdev);
907599e0
PR
850 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
851 clear_bit(RESIZE_PENDING, &mdev->flags);
8b924f1d 852 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 853 return err;
907599e0 854}
b411b363
PR
855
856/*
857 * return values:
858 * 1 yes, we have a valid connection
859 * 0 oops, did not work out, please try again
860 * -1 peer talks different language,
861 * no point in trying again, please go standalone.
862 * -2 We do not have a network config...
863 */
81fa2e67 864static int conn_connect(struct drbd_tconn *tconn)
b411b363 865{
7da35862 866 struct drbd_socket sock, msock;
c141ebda 867 struct drbd_conf *mdev;
44ed167d 868 struct net_conf *nc;
92f14951 869 int vnr, timeout, h, ok;
08b165ba 870 bool discard_my_data;
197296ff 871 enum drbd_state_rv rv;
7a426fd8
PR
872 struct accept_wait_data ad = {
873 .tconn = tconn,
874 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
875 };
b411b363 876
b66623e3 877 clear_bit(DISCONNECT_SENT, &tconn->flags);
bbeb641c 878 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
879 return -2;
880
7da35862
PR
881 mutex_init(&sock.mutex);
882 sock.sbuf = tconn->data.sbuf;
883 sock.rbuf = tconn->data.rbuf;
884 sock.socket = NULL;
885 mutex_init(&msock.mutex);
886 msock.sbuf = tconn->meta.sbuf;
887 msock.rbuf = tconn->meta.rbuf;
888 msock.socket = NULL;
889
0916e0e3
AG
890 /* Assume that the peer only understands protocol 80 until we know better. */
891 tconn->agreed_pro_version = 80;
b411b363 892
7a426fd8
PR
893 if (prepare_listen_socket(tconn, &ad))
894 return 0;
b411b363
PR
895
896 do {
2bf89621 897 struct socket *s;
b411b363 898
92f14951 899 s = drbd_try_connect(tconn);
b411b363 900 if (s) {
7da35862
PR
901 if (!sock.socket) {
902 sock.socket = s;
903 send_first_packet(tconn, &sock, P_INITIAL_DATA);
904 } else if (!msock.socket) {
427c0434 905 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862
PR
906 msock.socket = s;
907 send_first_packet(tconn, &msock, P_INITIAL_META);
b411b363 908 } else {
81fa2e67 909 conn_err(tconn, "Logic error in conn_connect()\n");
b411b363
PR
910 goto out_release_sockets;
911 }
912 }
913
7da35862
PR
914 if (sock.socket && msock.socket) {
915 rcu_read_lock();
916 nc = rcu_dereference(tconn->net_conf);
917 timeout = nc->ping_timeo * HZ / 10;
918 rcu_read_unlock();
919 schedule_timeout_interruptible(timeout);
920 ok = drbd_socket_okay(&sock.socket);
921 ok = drbd_socket_okay(&msock.socket) && ok;
b411b363
PR
922 if (ok)
923 break;
924 }
925
926retry:
7a426fd8 927 s = drbd_wait_for_connect(tconn, &ad);
b411b363 928 if (s) {
92f14951 929 int fp = receive_first_packet(tconn, s);
7da35862
PR
930 drbd_socket_okay(&sock.socket);
931 drbd_socket_okay(&msock.socket);
92f14951 932 switch (fp) {
e5d6f33a 933 case P_INITIAL_DATA:
7da35862 934 if (sock.socket) {
907599e0 935 conn_warn(tconn, "initial packet S crossed\n");
7da35862 936 sock_release(sock.socket);
80c6eed4
PR
937 sock.socket = s;
938 goto randomize;
b411b363 939 }
7da35862 940 sock.socket = s;
b411b363 941 break;
e5d6f33a 942 case P_INITIAL_META:
427c0434 943 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862 944 if (msock.socket) {
907599e0 945 conn_warn(tconn, "initial packet M crossed\n");
7da35862 946 sock_release(msock.socket);
80c6eed4
PR
947 msock.socket = s;
948 goto randomize;
b411b363 949 }
7da35862 950 msock.socket = s;
b411b363
PR
951 break;
952 default:
907599e0 953 conn_warn(tconn, "Error receiving initial packet\n");
b411b363 954 sock_release(s);
80c6eed4 955randomize:
b411b363
PR
956 if (random32() & 1)
957 goto retry;
958 }
959 }
960
bbeb641c 961 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
962 goto out_release_sockets;
963 if (signal_pending(current)) {
964 flush_signals(current);
965 smp_rmb();
907599e0 966 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
967 goto out_release_sockets;
968 }
969
b666dbf8
PR
970 ok = drbd_socket_okay(&sock.socket);
971 ok = drbd_socket_okay(&msock.socket) && ok;
972 } while (!ok);
b411b363 973
7a426fd8
PR
974 if (ad.s_listen)
975 sock_release(ad.s_listen);
b411b363 976
98683650
PR
977 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
978 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 979
7da35862
PR
980 sock.socket->sk->sk_allocation = GFP_NOIO;
981 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 982
7da35862
PR
983 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
984 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 985
b411b363 986 /* NOT YET ...
7da35862
PR
987 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
988 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 989 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 990 * which we set to 4x the configured ping_timeout. */
44ed167d
PR
991 rcu_read_lock();
992 nc = rcu_dereference(tconn->net_conf);
993
7da35862
PR
994 sock.socket->sk->sk_sndtimeo =
995 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 996
7da35862 997 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 998 timeout = nc->timeout * HZ / 10;
08b165ba 999 discard_my_data = nc->discard_my_data;
44ed167d 1000 rcu_read_unlock();
b411b363 1001
7da35862 1002 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1003
1004 /* we don't want delays.
25985edc 1005 * we use TCP_CORK where appropriate, though */
7da35862
PR
1006 drbd_tcp_nodelay(sock.socket);
1007 drbd_tcp_nodelay(msock.socket);
b411b363 1008
7da35862
PR
1009 tconn->data.socket = sock.socket;
1010 tconn->meta.socket = msock.socket;
907599e0 1011 tconn->last_received = jiffies;
b411b363 1012
6038178e 1013 h = drbd_do_features(tconn);
b411b363
PR
1014 if (h <= 0)
1015 return h;
1016
907599e0 1017 if (tconn->cram_hmac_tfm) {
b411b363 1018 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 1019 switch (drbd_do_auth(tconn)) {
b10d96cb 1020 case -1:
907599e0 1021 conn_err(tconn, "Authentication of peer failed\n");
b411b363 1022 return -1;
b10d96cb 1023 case 0:
907599e0 1024 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 1025 return 0;
b411b363
PR
1026 }
1027 }
1028
7da35862
PR
1029 tconn->data.socket->sk->sk_sndtimeo = timeout;
1030 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1031
387eb308 1032 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 1033 return -1;
b411b363 1034
a1096a6e
PR
1035 set_bit(STATE_SENT, &tconn->flags);
1036
c141ebda
PR
1037 rcu_read_lock();
1038 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1039 kref_get(&mdev->kref);
13c76aba
PR
1040 /* Prevent a race between resync-handshake and
1041 * being promoted to Primary.
1042 *
1043 * Grab and release the state mutex, so we know that any current
1044 * drbd_set_role() is finished, and any incoming drbd_set_role
1045 * will see the STATE_SENT flag, and wait for it to be cleared.
1046 */
1047 mutex_lock(mdev->state_mutex);
1048 mutex_unlock(mdev->state_mutex);
1049
c141ebda 1050 rcu_read_unlock();
08b165ba
PR
1051
1052 if (discard_my_data)
1053 set_bit(DISCARD_MY_DATA, &mdev->flags);
1054 else
1055 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1056
c141ebda
PR
1057 drbd_connected(mdev);
1058 kref_put(&mdev->kref, &drbd_minor_destroy);
1059 rcu_read_lock();
1060 }
1061 rcu_read_unlock();
1062
a1096a6e 1063 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
ed635cb0 1064 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
a1096a6e 1065 clear_bit(STATE_SENT, &tconn->flags);
1e86ac48 1066 return 0;
a1096a6e 1067 }
1e86ac48 1068
823bd832 1069 drbd_thread_start(&tconn->asender);
b411b363 1070
08b165ba
PR
1071 mutex_lock(&tconn->conf_update);
1072 /* The discard_my_data flag is a single-shot modifier to the next
1073 * connection attempt, the handshake of which is now well underway.
1074 * No need for rcu style copying of the whole struct
1075 * just to clear a single value. */
1076 tconn->net_conf->discard_my_data = 0;
1077 mutex_unlock(&tconn->conf_update);
1078
d3fcb490 1079 return h;
b411b363
PR
1080
1081out_release_sockets:
7a426fd8
PR
1082 if (ad.s_listen)
1083 sock_release(ad.s_listen);
7da35862
PR
1084 if (sock.socket)
1085 sock_release(sock.socket);
1086 if (msock.socket)
1087 sock_release(msock.socket);
b411b363
PR
1088 return -1;
1089}
1090
e658983a 1091static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
b411b363 1092{
e658983a
AG
1093 unsigned int header_size = drbd_header_size(tconn);
1094
0c8e36d9
AG
1095 if (header_size == sizeof(struct p_header100) &&
1096 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1097 struct p_header100 *h = header;
1098 if (h->pad != 0) {
1099 conn_err(tconn, "Header padding is not zero\n");
1100 return -EINVAL;
1101 }
1102 pi->vnr = be16_to_cpu(h->volume);
1103 pi->cmd = be16_to_cpu(h->command);
1104 pi->size = be32_to_cpu(h->length);
1105 } else if (header_size == sizeof(struct p_header95) &&
1106 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1107 struct p_header95 *h = header;
e658983a 1108 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1109 pi->size = be32_to_cpu(h->length);
1110 pi->vnr = 0;
e658983a
AG
1111 } else if (header_size == sizeof(struct p_header80) &&
1112 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1113 struct p_header80 *h = header;
1114 pi->cmd = be16_to_cpu(h->command);
1115 pi->size = be16_to_cpu(h->length);
77351055 1116 pi->vnr = 0;
02918be2 1117 } else {
e658983a
AG
1118 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1119 be32_to_cpu(*(__be32 *)header),
1120 tconn->agreed_pro_version);
8172f3e9 1121 return -EINVAL;
b411b363 1122 }
e658983a 1123 pi->data = header + header_size;
8172f3e9 1124 return 0;
257d0af6 1125}
b411b363 1126
9ba7aa00 1127static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 1128{
e658983a 1129 void *buffer = tconn->data.rbuf;
69bc7bc3 1130 int err;
257d0af6 1131
e658983a 1132 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
a5c31904 1133 if (err)
69bc7bc3 1134 return err;
257d0af6 1135
e658983a 1136 err = decode_header(tconn, buffer, pi);
9ba7aa00 1137 tconn->last_received = jiffies;
b411b363 1138
69bc7bc3 1139 return err;
b411b363
PR
1140}
1141
4b0007c0 1142static void drbd_flush(struct drbd_tconn *tconn)
b411b363
PR
1143{
1144 int rv;
4b0007c0
PR
1145 struct drbd_conf *mdev;
1146 int vnr;
1147
1148 if (tconn->write_ordering >= WO_bdev_flush) {
615e087f 1149 rcu_read_lock();
4b0007c0 1150 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
615e087f
LE
1151 if (!get_ldev(mdev))
1152 continue;
1153 kref_get(&mdev->kref);
1154 rcu_read_unlock();
1155
1156 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1157 GFP_NOIO, NULL);
1158 if (rv) {
1159 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1160 /* would rather check on EOPNOTSUPP, but that is not reliable.
1161 * don't try again for ANY return value != 0
1162 * if (rv == -EOPNOTSUPP) */
1163 drbd_bump_write_ordering(tconn, WO_drain_io);
4b0007c0 1164 }
615e087f
LE
1165 put_ldev(mdev);
1166 kref_put(&mdev->kref, &drbd_minor_destroy);
b411b363 1167
615e087f
LE
1168 rcu_read_lock();
1169 if (rv)
1170 break;
b411b363 1171 }
615e087f 1172 rcu_read_unlock();
b411b363 1173 }
b411b363
PR
1174}
1175
1176/**
1177 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1178 * @mdev: DRBD device.
1179 * @epoch: Epoch object.
1180 * @ev: Epoch event.
1181 */
1e9dd291 1182static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
b411b363
PR
1183 struct drbd_epoch *epoch,
1184 enum epoch_event ev)
1185{
2451fc3b 1186 int epoch_size;
b411b363 1187 struct drbd_epoch *next_epoch;
b411b363
PR
1188 enum finish_epoch rv = FE_STILL_LIVE;
1189
12038a3a 1190 spin_lock(&tconn->epoch_lock);
b411b363
PR
1191 do {
1192 next_epoch = NULL;
b411b363
PR
1193
1194 epoch_size = atomic_read(&epoch->epoch_size);
1195
1196 switch (ev & ~EV_CLEANUP) {
1197 case EV_PUT:
1198 atomic_dec(&epoch->active);
1199 break;
1200 case EV_GOT_BARRIER_NR:
1201 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1202 break;
1203 case EV_BECAME_LAST:
1204 /* nothing to do*/
1205 break;
1206 }
1207
b411b363
PR
1208 if (epoch_size != 0 &&
1209 atomic_read(&epoch->active) == 0 &&
80f9fd55 1210 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1211 if (!(ev & EV_CLEANUP)) {
12038a3a 1212 spin_unlock(&tconn->epoch_lock);
9ed57dcb 1213 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
12038a3a 1214 spin_lock(&tconn->epoch_lock);
b411b363 1215 }
9ed57dcb
LE
1216#if 0
1217 /* FIXME: dec unacked on connection, once we have
1218 * something to count pending connection packets in. */
80f9fd55 1219 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
9ed57dcb
LE
1220 dec_unacked(epoch->tconn);
1221#endif
b411b363 1222
12038a3a 1223 if (tconn->current_epoch != epoch) {
b411b363
PR
1224 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1225 list_del(&epoch->list);
1226 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
12038a3a 1227 tconn->epochs--;
b411b363
PR
1228 kfree(epoch);
1229
1230 if (rv == FE_STILL_LIVE)
1231 rv = FE_DESTROYED;
1232 } else {
1233 epoch->flags = 0;
1234 atomic_set(&epoch->epoch_size, 0);
698f9315 1235 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1236 if (rv == FE_STILL_LIVE)
1237 rv = FE_RECYCLED;
1238 }
1239 }
1240
1241 if (!next_epoch)
1242 break;
1243
1244 epoch = next_epoch;
1245 } while (1);
1246
12038a3a 1247 spin_unlock(&tconn->epoch_lock);
b411b363 1248
b411b363
PR
1249 return rv;
1250}
1251
1252/**
1253 * drbd_bump_write_ordering() - Fall back to an other write ordering method
4b0007c0 1254 * @tconn: DRBD connection.
b411b363
PR
1255 * @wo: Write ordering method to try.
1256 */
4b0007c0 1257void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
b411b363 1258{
daeda1cc 1259 struct disk_conf *dc;
4b0007c0 1260 struct drbd_conf *mdev;
b411b363 1261 enum write_ordering_e pwo;
4b0007c0 1262 int vnr;
b411b363
PR
1263 static char *write_ordering_str[] = {
1264 [WO_none] = "none",
1265 [WO_drain_io] = "drain",
1266 [WO_bdev_flush] = "flush",
b411b363
PR
1267 };
1268
4b0007c0 1269 pwo = tconn->write_ordering;
b411b363 1270 wo = min(pwo, wo);
daeda1cc 1271 rcu_read_lock();
4b0007c0 1272 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
27eb13e9 1273 if (!get_ldev_if_state(mdev, D_ATTACHING))
4b0007c0
PR
1274 continue;
1275 dc = rcu_dereference(mdev->ldev->disk_conf);
1276
1277 if (wo == WO_bdev_flush && !dc->disk_flushes)
1278 wo = WO_drain_io;
1279 if (wo == WO_drain_io && !dc->disk_drain)
1280 wo = WO_none;
1281 put_ldev(mdev);
1282 }
daeda1cc 1283 rcu_read_unlock();
4b0007c0
PR
1284 tconn->write_ordering = wo;
1285 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1286 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
b411b363
PR
1287}
1288
45bb912b 1289/**
fbe29dec 1290 * drbd_submit_peer_request()
45bb912b 1291 * @mdev: DRBD device.
db830c46 1292 * @peer_req: peer request
45bb912b 1293 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1294 *
1295 * May spread the pages to multiple bios,
1296 * depending on bio_add_page restrictions.
1297 *
1298 * Returns 0 if all bios have been submitted,
1299 * -ENOMEM if we could not allocate enough bios,
1300 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1301 * single page to an empty bio (which should never happen and likely indicates
1302 * that the lower level IO stack is in some way broken). This has been observed
1303 * on certain Xen deployments.
45bb912b
LE
1304 */
1305/* TODO allocate from our own bio_set. */
fbe29dec
AG
1306int drbd_submit_peer_request(struct drbd_conf *mdev,
1307 struct drbd_peer_request *peer_req,
1308 const unsigned rw, const int fault_type)
45bb912b
LE
1309{
1310 struct bio *bios = NULL;
1311 struct bio *bio;
db830c46
AG
1312 struct page *page = peer_req->pages;
1313 sector_t sector = peer_req->i.sector;
1314 unsigned ds = peer_req->i.size;
45bb912b
LE
1315 unsigned n_bios = 0;
1316 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1317 int err = -ENOMEM;
45bb912b
LE
1318
1319 /* In most cases, we will only need one bio. But in case the lower
1320 * level restrictions happen to be different at this offset on this
1321 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1322 * request in more than one bio.
1323 *
1324 * Plain bio_alloc is good enough here, this is no DRBD internally
1325 * generated bio, but a bio allocated on behalf of the peer.
1326 */
45bb912b
LE
1327next_bio:
1328 bio = bio_alloc(GFP_NOIO, nr_pages);
1329 if (!bio) {
1330 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1331 goto fail;
1332 }
db830c46 1333 /* > peer_req->i.sector, unless this is the first bio */
45bb912b
LE
1334 bio->bi_sector = sector;
1335 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1336 bio->bi_rw = rw;
db830c46 1337 bio->bi_private = peer_req;
fcefa62e 1338 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1339
1340 bio->bi_next = bios;
1341 bios = bio;
1342 ++n_bios;
1343
1344 page_chain_for_each(page) {
1345 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1346 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1347 /* A single page must always be possible!
1348 * But in case it fails anyways,
1349 * we deal with it, and complain (below). */
1350 if (bio->bi_vcnt == 0) {
1351 dev_err(DEV,
1352 "bio_add_page failed for len=%u, "
1353 "bi_vcnt=0 (bi_sector=%llu)\n",
1354 len, (unsigned long long)bio->bi_sector);
1355 err = -ENOSPC;
1356 goto fail;
1357 }
45bb912b
LE
1358 goto next_bio;
1359 }
1360 ds -= len;
1361 sector += len >> 9;
1362 --nr_pages;
1363 }
1364 D_ASSERT(page == NULL);
1365 D_ASSERT(ds == 0);
1366
db830c46 1367 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1368 do {
1369 bio = bios;
1370 bios = bios->bi_next;
1371 bio->bi_next = NULL;
1372
45bb912b 1373 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1374 } while (bios);
45bb912b
LE
1375 return 0;
1376
1377fail:
1378 while (bios) {
1379 bio = bios;
1380 bios = bios->bi_next;
1381 bio_put(bio);
1382 }
10f6d992 1383 return err;
45bb912b
LE
1384}
1385
53840641 1386static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1387 struct drbd_peer_request *peer_req)
53840641 1388{
db830c46 1389 struct drbd_interval *i = &peer_req->i;
53840641
AG
1390
1391 drbd_remove_interval(&mdev->write_requests, i);
1392 drbd_clear_interval(i);
1393
6c852bec 1394 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1395 if (i->waiting)
1396 wake_up(&mdev->misc_wait);
1397}
1398
77fede51
PR
1399void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1400{
1401 struct drbd_conf *mdev;
1402 int vnr;
1403
1404 rcu_read_lock();
1405 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1406 kref_get(&mdev->kref);
1407 rcu_read_unlock();
1408 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1409 kref_put(&mdev->kref, &drbd_minor_destroy);
1410 rcu_read_lock();
1411 }
1412 rcu_read_unlock();
1413}
1414
4a76b161 1415static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1416{
2451fc3b 1417 int rv;
e658983a 1418 struct p_barrier *p = pi->data;
b411b363
PR
1419 struct drbd_epoch *epoch;
1420
9ed57dcb
LE
1421 /* FIXME these are unacked on connection,
1422 * not a specific (peer)device.
1423 */
12038a3a 1424 tconn->current_epoch->barrier_nr = p->barrier;
9ed57dcb 1425 tconn->current_epoch->tconn = tconn;
1e9dd291 1426 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1427
1428 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1429 * the activity log, which means it would not be resynced in case the
1430 * R_PRIMARY crashes now.
1431 * Therefore we must send the barrier_ack after the barrier request was
1432 * completed. */
4b0007c0 1433 switch (tconn->write_ordering) {
b411b363
PR
1434 case WO_none:
1435 if (rv == FE_RECYCLED)
82bc0194 1436 return 0;
2451fc3b
PR
1437
1438 /* receiver context, in the writeout path of the other node.
1439 * avoid potential distributed deadlock */
1440 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1441 if (epoch)
1442 break;
1443 else
9ed57dcb 1444 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1445 /* Fall through */
b411b363
PR
1446
1447 case WO_bdev_flush:
1448 case WO_drain_io:
77fede51 1449 conn_wait_active_ee_empty(tconn);
4b0007c0 1450 drbd_flush(tconn);
2451fc3b 1451
12038a3a 1452 if (atomic_read(&tconn->current_epoch->epoch_size)) {
2451fc3b
PR
1453 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1454 if (epoch)
1455 break;
b411b363
PR
1456 }
1457
82bc0194 1458 return 0;
2451fc3b 1459 default:
9ed57dcb 1460 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
82bc0194 1461 return -EIO;
b411b363
PR
1462 }
1463
1464 epoch->flags = 0;
1465 atomic_set(&epoch->epoch_size, 0);
1466 atomic_set(&epoch->active, 0);
1467
12038a3a
PR
1468 spin_lock(&tconn->epoch_lock);
1469 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1470 list_add(&epoch->list, &tconn->current_epoch->list);
1471 tconn->current_epoch = epoch;
1472 tconn->epochs++;
b411b363
PR
1473 } else {
1474 /* The current_epoch got recycled while we allocated this one... */
1475 kfree(epoch);
1476 }
12038a3a 1477 spin_unlock(&tconn->epoch_lock);
b411b363 1478
82bc0194 1479 return 0;
b411b363
PR
1480}
1481
1482/* used from receive_RSDataReply (recv_resync_read)
1483 * and from receive_Data */
f6ffca9f
AG
1484static struct drbd_peer_request *
1485read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1486 int data_size) __must_hold(local)
b411b363 1487{
6666032a 1488 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1489 struct drbd_peer_request *peer_req;
b411b363 1490 struct page *page;
a5c31904 1491 int dgs, ds, err;
a0638456
PR
1492 void *dig_in = mdev->tconn->int_dig_in;
1493 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1494 unsigned long *data;
b411b363 1495
88104ca4
AG
1496 dgs = 0;
1497 if (mdev->tconn->peer_integrity_tfm) {
1498 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
9f5bdc33
AG
1499 /*
1500 * FIXME: Receive the incoming digest into the receive buffer
1501 * here, together with its struct p_data?
1502 */
a5c31904
AG
1503 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1504 if (err)
b411b363 1505 return NULL;
88104ca4 1506 data_size -= dgs;
b411b363
PR
1507 }
1508
841ce241
AG
1509 if (!expect(IS_ALIGNED(data_size, 512)))
1510 return NULL;
1511 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1512 return NULL;
b411b363 1513
6666032a
LE
1514 /* even though we trust out peer,
1515 * we sometimes have to double check. */
1516 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1517 dev_err(DEV, "request from peer beyond end of local disk: "
1518 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1519 (unsigned long long)capacity,
1520 (unsigned long long)sector, data_size);
1521 return NULL;
1522 }
1523
b411b363
PR
1524 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1525 * "criss-cross" setup, that might cause write-out on some other DRBD,
1526 * which in turn might block on the other node at this very place. */
0db55363 1527 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
db830c46 1528 if (!peer_req)
b411b363 1529 return NULL;
45bb912b 1530
a73ff323 1531 if (!data_size)
81a3537a 1532 return peer_req;
a73ff323 1533
b411b363 1534 ds = data_size;
db830c46 1535 page = peer_req->pages;
45bb912b
LE
1536 page_chain_for_each(page) {
1537 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1538 data = kmap(page);
a5c31904 1539 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1540 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1541 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1542 data[0] = data[0] ^ (unsigned long)-1;
1543 }
b411b363 1544 kunmap(page);
a5c31904 1545 if (err) {
3967deb1 1546 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1547 return NULL;
1548 }
a5c31904 1549 ds -= len;
b411b363
PR
1550 }
1551
1552 if (dgs) {
5b614abe 1553 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
b411b363 1554 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1555 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1556 (unsigned long long)sector, data_size);
3967deb1 1557 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1558 return NULL;
1559 }
1560 }
1561 mdev->recv_cnt += data_size>>9;
db830c46 1562 return peer_req;
b411b363
PR
1563}
1564
1565/* drbd_drain_block() just takes a data block
1566 * out of the socket input buffer, and discards it.
1567 */
1568static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1569{
1570 struct page *page;
a5c31904 1571 int err = 0;
b411b363
PR
1572 void *data;
1573
c3470cde 1574 if (!data_size)
fc5be839 1575 return 0;
c3470cde 1576
c37c8ecf 1577 page = drbd_alloc_pages(mdev, 1, 1);
b411b363
PR
1578
1579 data = kmap(page);
1580 while (data_size) {
fc5be839
AG
1581 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1582
a5c31904
AG
1583 err = drbd_recv_all_warn(mdev->tconn, data, len);
1584 if (err)
b411b363 1585 break;
a5c31904 1586 data_size -= len;
b411b363
PR
1587 }
1588 kunmap(page);
5cc287e0 1589 drbd_free_pages(mdev, page, 0);
fc5be839 1590 return err;
b411b363
PR
1591}
1592
1593static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1594 sector_t sector, int data_size)
1595{
1596 struct bio_vec *bvec;
1597 struct bio *bio;
a5c31904 1598 int dgs, err, i, expect;
a0638456
PR
1599 void *dig_in = mdev->tconn->int_dig_in;
1600 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1601
88104ca4
AG
1602 dgs = 0;
1603 if (mdev->tconn->peer_integrity_tfm) {
1604 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a5c31904
AG
1605 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1606 if (err)
1607 return err;
88104ca4 1608 data_size -= dgs;
b411b363
PR
1609 }
1610
b411b363
PR
1611 /* optimistically update recv_cnt. if receiving fails below,
1612 * we disconnect anyways, and counters will be reset. */
1613 mdev->recv_cnt += data_size>>9;
1614
1615 bio = req->master_bio;
1616 D_ASSERT(sector == bio->bi_sector);
1617
1618 bio_for_each_segment(bvec, bio, i) {
a5c31904 1619 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
b411b363 1620 expect = min_t(int, data_size, bvec->bv_len);
a5c31904 1621 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
b411b363 1622 kunmap(bvec->bv_page);
a5c31904
AG
1623 if (err)
1624 return err;
1625 data_size -= expect;
b411b363
PR
1626 }
1627
1628 if (dgs) {
5b614abe 1629 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
b411b363
PR
1630 if (memcmp(dig_in, dig_vv, dgs)) {
1631 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1632 return -EINVAL;
b411b363
PR
1633 }
1634 }
1635
1636 D_ASSERT(data_size == 0);
28284cef 1637 return 0;
b411b363
PR
1638}
1639
a990be46
AG
1640/*
1641 * e_end_resync_block() is called in asender context via
1642 * drbd_finish_peer_reqs().
1643 */
99920dc5 1644static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1645{
8050e6d0
AG
1646 struct drbd_peer_request *peer_req =
1647 container_of(w, struct drbd_peer_request, w);
00d56944 1648 struct drbd_conf *mdev = w->mdev;
db830c46 1649 sector_t sector = peer_req->i.sector;
99920dc5 1650 int err;
b411b363 1651
db830c46 1652 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1653
db830c46
AG
1654 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1655 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1656 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1657 } else {
1658 /* Record failure to sync */
db830c46 1659 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1660
99920dc5 1661 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1662 }
1663 dec_unacked(mdev);
1664
99920dc5 1665 return err;
b411b363
PR
1666}
1667
1668static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1669{
db830c46 1670 struct drbd_peer_request *peer_req;
b411b363 1671
db830c46
AG
1672 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1673 if (!peer_req)
45bb912b 1674 goto fail;
b411b363
PR
1675
1676 dec_rs_pending(mdev);
1677
b411b363
PR
1678 inc_unacked(mdev);
1679 /* corresponding dec_unacked() in e_end_resync_block()
1680 * respective _drbd_clear_done_ee */
1681
db830c46 1682 peer_req->w.cb = e_end_resync_block;
45bb912b 1683
87eeee41 1684 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1685 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1686 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1687
0f0601f4 1688 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1689 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1690 return 0;
b411b363 1691
10f6d992
LE
1692 /* don't care for the reason here */
1693 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1694 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1695 list_del(&peer_req->w.list);
87eeee41 1696 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1697
3967deb1 1698 drbd_free_peer_req(mdev, peer_req);
45bb912b
LE
1699fail:
1700 put_ldev(mdev);
e1c1b0fc 1701 return -EIO;
b411b363
PR
1702}
1703
668eebc6 1704static struct drbd_request *
bc9c5c41
AG
1705find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1706 sector_t sector, bool missing_ok, const char *func)
51624585 1707{
51624585
AG
1708 struct drbd_request *req;
1709
bc9c5c41
AG
1710 /* Request object according to our peer */
1711 req = (struct drbd_request *)(unsigned long)id;
5e472264 1712 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1713 return req;
c3afd8f5 1714 if (!missing_ok) {
5af172ed 1715 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
1716 (unsigned long)id, (unsigned long long)sector);
1717 }
51624585 1718 return NULL;
b411b363
PR
1719}
1720
4a76b161 1721static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1722{
4a76b161 1723 struct drbd_conf *mdev;
b411b363
PR
1724 struct drbd_request *req;
1725 sector_t sector;
82bc0194 1726 int err;
e658983a 1727 struct p_data *p = pi->data;
4a76b161
AG
1728
1729 mdev = vnr_to_mdev(tconn, pi->vnr);
1730 if (!mdev)
1731 return -EIO;
b411b363
PR
1732
1733 sector = be64_to_cpu(p->sector);
1734
87eeee41 1735 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1736 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1737 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1738 if (unlikely(!req))
82bc0194 1739 return -EIO;
b411b363 1740
24c4830c 1741 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1742 * special casing it there for the various failure cases.
1743 * still no race with drbd_fail_pending_reads */
e2857216 1744 err = recv_dless_read(mdev, req, sector, pi->size);
82bc0194 1745 if (!err)
8554df1c 1746 req_mod(req, DATA_RECEIVED);
b411b363
PR
1747 /* else: nothing. handled from drbd_disconnect...
1748 * I don't think we may complete this just yet
1749 * in case we are "on-disconnect: freeze" */
1750
82bc0194 1751 return err;
b411b363
PR
1752}
1753
4a76b161 1754static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1755{
4a76b161 1756 struct drbd_conf *mdev;
b411b363 1757 sector_t sector;
82bc0194 1758 int err;
e658983a 1759 struct p_data *p = pi->data;
4a76b161
AG
1760
1761 mdev = vnr_to_mdev(tconn, pi->vnr);
1762 if (!mdev)
1763 return -EIO;
b411b363
PR
1764
1765 sector = be64_to_cpu(p->sector);
1766 D_ASSERT(p->block_id == ID_SYNCER);
1767
1768 if (get_ldev(mdev)) {
1769 /* data is submitted to disk within recv_resync_read.
1770 * corresponding put_ldev done below on error,
fcefa62e 1771 * or in drbd_peer_request_endio. */
e2857216 1772 err = recv_resync_read(mdev, sector, pi->size);
b411b363
PR
1773 } else {
1774 if (__ratelimit(&drbd_ratelimit_state))
1775 dev_err(DEV, "Can not write resync data to local disk.\n");
1776
e2857216 1777 err = drbd_drain_block(mdev, pi->size);
b411b363 1778
e2857216 1779 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
b411b363
PR
1780 }
1781
e2857216 1782 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
778f271d 1783
82bc0194 1784 return err;
b411b363
PR
1785}
1786
7be8da07
AG
1787static void restart_conflicting_writes(struct drbd_conf *mdev,
1788 sector_t sector, int size)
b411b363 1789{
7be8da07
AG
1790 struct drbd_interval *i;
1791 struct drbd_request *req;
1792
1793 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1794 if (!i->local)
1795 continue;
1796 req = container_of(i, struct drbd_request, i);
1797 if (req->rq_state & RQ_LOCAL_PENDING ||
1798 !(req->rq_state & RQ_POSTPONED))
1799 continue;
2312f0b3
LE
1800 /* as it is RQ_POSTPONED, this will cause it to
1801 * be queued on the retry workqueue. */
d4dabbe2 1802 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
1803 }
1804}
b411b363 1805
a990be46
AG
1806/*
1807 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
b411b363 1808 */
99920dc5 1809static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1810{
8050e6d0
AG
1811 struct drbd_peer_request *peer_req =
1812 container_of(w, struct drbd_peer_request, w);
00d56944 1813 struct drbd_conf *mdev = w->mdev;
db830c46 1814 sector_t sector = peer_req->i.sector;
99920dc5 1815 int err = 0, pcmd;
b411b363 1816
303d1448 1817 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 1818 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1819 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1820 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1821 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1822 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1823 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1824 if (pcmd == P_RS_WRITE_ACK)
db830c46 1825 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1826 } else {
99920dc5 1827 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1828 /* we expect it to be marked out of sync anyways...
1829 * maybe assert this? */
1830 }
1831 dec_unacked(mdev);
1832 }
1833 /* we delete from the conflict detection hash _after_ we sent out the
1834 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 1835 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
87eeee41 1836 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1837 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1838 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1839 if (peer_req->flags & EE_RESTART_REQUESTS)
1840 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1841 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1842 } else
db830c46 1843 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1844
1e9dd291 1845 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1846
99920dc5 1847 return err;
b411b363
PR
1848}
1849
7be8da07 1850static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1851{
7be8da07 1852 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1853 struct drbd_peer_request *peer_req =
1854 container_of(w, struct drbd_peer_request, w);
99920dc5 1855 int err;
b411b363 1856
99920dc5 1857 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1858 dec_unacked(mdev);
1859
99920dc5 1860 return err;
b411b363
PR
1861}
1862
d4dabbe2 1863static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 1864{
d4dabbe2 1865 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
1866}
1867
99920dc5 1868static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1869{
1870 struct drbd_tconn *tconn = w->mdev->tconn;
1871
1872 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
d4dabbe2 1873 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 1874}
b411b363 1875
3e394da1
AG
1876static bool seq_greater(u32 a, u32 b)
1877{
1878 /*
1879 * We assume 32-bit wrap-around here.
1880 * For 24-bit wrap-around, we would have to shift:
1881 * a <<= 8; b <<= 8;
1882 */
1883 return (s32)a - (s32)b > 0;
1884}
b411b363 1885
3e394da1
AG
1886static u32 seq_max(u32 a, u32 b)
1887{
1888 return seq_greater(a, b) ? a : b;
b411b363
PR
1889}
1890
7be8da07 1891static bool need_peer_seq(struct drbd_conf *mdev)
b411b363 1892{
7be8da07 1893 struct drbd_tconn *tconn = mdev->tconn;
302bdeae 1894 int tp;
b411b363 1895
7be8da07
AG
1896 /*
1897 * We only need to keep track of the last packet_seq number of our peer
427c0434 1898 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
7be8da07
AG
1899 * handle_write_conflicts().
1900 */
b411b363 1901
302bdeae
PR
1902 rcu_read_lock();
1903 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1904 rcu_read_unlock();
b411b363 1905
427c0434 1906 return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07 1907}
b411b363 1908
43ae077d 1909static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1910{
3c13b680 1911 unsigned int newest_peer_seq;
3e394da1 1912
7be8da07
AG
1913 if (need_peer_seq(mdev)) {
1914 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1915 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1916 mdev->peer_seq = newest_peer_seq;
7be8da07 1917 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1918 /* wake up only if we actually changed mdev->peer_seq */
1919 if (peer_seq == newest_peer_seq)
7be8da07
AG
1920 wake_up(&mdev->seq_wait);
1921 }
b411b363
PR
1922}
1923
d93f6302 1924static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 1925{
d93f6302
LE
1926 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1927}
b6a370ba 1928
d93f6302 1929/* maybe change sync_ee into interval trees as well? */
3ea35df8 1930static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
d93f6302
LE
1931{
1932 struct drbd_peer_request *rs_req;
b6a370ba
PR
1933 bool rv = 0;
1934
d93f6302
LE
1935 spin_lock_irq(&mdev->tconn->req_lock);
1936 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1937 if (overlaps(peer_req->i.sector, peer_req->i.size,
1938 rs_req->i.sector, rs_req->i.size)) {
b6a370ba
PR
1939 rv = 1;
1940 break;
1941 }
1942 }
d93f6302 1943 spin_unlock_irq(&mdev->tconn->req_lock);
b6a370ba
PR
1944
1945 return rv;
1946}
1947
b411b363
PR
1948/* Called from receive_Data.
1949 * Synchronize packets on sock with packets on msock.
1950 *
1951 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1952 * packet traveling on msock, they are still processed in the order they have
1953 * been sent.
1954 *
1955 * Note: we don't care for Ack packets overtaking P_DATA packets.
1956 *
1957 * In case packet_seq is larger than mdev->peer_seq number, there are
1958 * outstanding packets on the msock. We wait for them to arrive.
1959 * In case we are the logically next packet, we update mdev->peer_seq
1960 * ourselves. Correctly handles 32bit wrap around.
1961 *
1962 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1963 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1964 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1965 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1966 *
1967 * returns 0 if we may process the packet,
1968 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1969static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1970{
1971 DEFINE_WAIT(wait);
b411b363 1972 long timeout;
7be8da07
AG
1973 int ret;
1974
1975 if (!need_peer_seq(mdev))
1976 return 0;
1977
b411b363
PR
1978 spin_lock(&mdev->peer_seq_lock);
1979 for (;;) {
7be8da07
AG
1980 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1981 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1982 ret = 0;
b411b363 1983 break;
7be8da07 1984 }
b411b363
PR
1985 if (signal_pending(current)) {
1986 ret = -ERESTARTSYS;
1987 break;
1988 }
7be8da07 1989 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1990 spin_unlock(&mdev->peer_seq_lock);
44ed167d
PR
1991 rcu_read_lock();
1992 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1993 rcu_read_unlock();
71b1c1eb 1994 timeout = schedule_timeout(timeout);
b411b363 1995 spin_lock(&mdev->peer_seq_lock);
7be8da07 1996 if (!timeout) {
b411b363 1997 ret = -ETIMEDOUT;
71b1c1eb 1998 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
1999 break;
2000 }
2001 }
b411b363 2002 spin_unlock(&mdev->peer_seq_lock);
7be8da07 2003 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
2004 return ret;
2005}
2006
688593c5
LE
2007/* see also bio_flags_to_wire()
2008 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2009 * flags and back. We may replicate to other kernel versions. */
2010static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 2011{
688593c5
LE
2012 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2013 (dpf & DP_FUA ? REQ_FUA : 0) |
2014 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2015 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
2016}
2017
7be8da07
AG
2018static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2019 unsigned int size)
2020{
2021 struct drbd_interval *i;
2022
2023 repeat:
2024 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2025 struct drbd_request *req;
2026 struct bio_and_error m;
2027
2028 if (!i->local)
2029 continue;
2030 req = container_of(i, struct drbd_request, i);
2031 if (!(req->rq_state & RQ_POSTPONED))
2032 continue;
2033 req->rq_state &= ~RQ_POSTPONED;
2034 __req_mod(req, NEG_ACKED, &m);
2035 spin_unlock_irq(&mdev->tconn->req_lock);
2036 if (m.bio)
2037 complete_master_bio(mdev, &m);
2038 spin_lock_irq(&mdev->tconn->req_lock);
2039 goto repeat;
2040 }
2041}
2042
2043static int handle_write_conflicts(struct drbd_conf *mdev,
2044 struct drbd_peer_request *peer_req)
2045{
2046 struct drbd_tconn *tconn = mdev->tconn;
427c0434 2047 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07
AG
2048 sector_t sector = peer_req->i.sector;
2049 const unsigned int size = peer_req->i.size;
2050 struct drbd_interval *i;
2051 bool equal;
2052 int err;
2053
2054 /*
2055 * Inserting the peer request into the write_requests tree will prevent
2056 * new conflicting local requests from being added.
2057 */
2058 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2059
2060 repeat:
2061 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2062 if (i == &peer_req->i)
2063 continue;
2064
2065 if (!i->local) {
2066 /*
2067 * Our peer has sent a conflicting remote request; this
2068 * should not happen in a two-node setup. Wait for the
2069 * earlier peer request to complete.
2070 */
2071 err = drbd_wait_misc(mdev, i);
2072 if (err)
2073 goto out;
2074 goto repeat;
2075 }
2076
2077 equal = i->sector == sector && i->size == size;
2078 if (resolve_conflicts) {
2079 /*
2080 * If the peer request is fully contained within the
d4dabbe2
LE
2081 * overlapping request, it can be considered overwritten
2082 * and thus superseded; otherwise, it will be retried
2083 * once all overlapping requests have completed.
7be8da07 2084 */
d4dabbe2 2085 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2086 (i->size >> 9) >= sector + (size >> 9);
2087
2088 if (!equal)
2089 dev_alert(DEV, "Concurrent writes detected: "
2090 "local=%llus +%u, remote=%llus +%u, "
2091 "assuming %s came first\n",
2092 (unsigned long long)i->sector, i->size,
2093 (unsigned long long)sector, size,
d4dabbe2 2094 superseded ? "local" : "remote");
7be8da07
AG
2095
2096 inc_unacked(mdev);
d4dabbe2 2097 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07
AG
2098 e_send_retry_write;
2099 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2100 wake_asender(mdev->tconn);
2101
2102 err = -ENOENT;
2103 goto out;
2104 } else {
2105 struct drbd_request *req =
2106 container_of(i, struct drbd_request, i);
2107
2108 if (!equal)
2109 dev_alert(DEV, "Concurrent writes detected: "
2110 "local=%llus +%u, remote=%llus +%u\n",
2111 (unsigned long long)i->sector, i->size,
2112 (unsigned long long)sector, size);
2113
2114 if (req->rq_state & RQ_LOCAL_PENDING ||
2115 !(req->rq_state & RQ_POSTPONED)) {
2116 /*
2117 * Wait for the node with the discard flag to
d4dabbe2
LE
2118 * decide if this request has been superseded
2119 * or needs to be retried.
2120 * Requests that have been superseded will
7be8da07
AG
2121 * disappear from the write_requests tree.
2122 *
2123 * In addition, wait for the conflicting
2124 * request to finish locally before submitting
2125 * the conflicting peer request.
2126 */
2127 err = drbd_wait_misc(mdev, &req->i);
2128 if (err) {
2129 _conn_request_state(mdev->tconn,
2130 NS(conn, C_TIMEOUT),
2131 CS_HARD);
2132 fail_postponed_requests(mdev, sector, size);
2133 goto out;
2134 }
2135 goto repeat;
2136 }
2137 /*
2138 * Remember to restart the conflicting requests after
2139 * the new peer request has completed.
2140 */
2141 peer_req->flags |= EE_RESTART_REQUESTS;
2142 }
2143 }
2144 err = 0;
2145
2146 out:
2147 if (err)
2148 drbd_remove_epoch_entry_interval(mdev, peer_req);
2149 return err;
2150}
2151
b411b363 2152/* mirrored write */
4a76b161 2153static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2154{
4a76b161 2155 struct drbd_conf *mdev;
b411b363 2156 sector_t sector;
db830c46 2157 struct drbd_peer_request *peer_req;
e658983a 2158 struct p_data *p = pi->data;
7be8da07 2159 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
2160 int rw = WRITE;
2161 u32 dp_flags;
302bdeae 2162 int err, tp;
b411b363 2163
4a76b161
AG
2164 mdev = vnr_to_mdev(tconn, pi->vnr);
2165 if (!mdev)
2166 return -EIO;
b411b363 2167
7be8da07 2168 if (!get_ldev(mdev)) {
82bc0194
AG
2169 int err2;
2170
7be8da07 2171 err = wait_for_and_update_peer_seq(mdev, peer_seq);
e2857216 2172 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
12038a3a 2173 atomic_inc(&tconn->current_epoch->epoch_size);
e2857216 2174 err2 = drbd_drain_block(mdev, pi->size);
82bc0194
AG
2175 if (!err)
2176 err = err2;
2177 return err;
b411b363
PR
2178 }
2179
fcefa62e
AG
2180 /*
2181 * Corresponding put_ldev done either below (on various errors), or in
2182 * drbd_peer_request_endio, if we successfully submit the data at the
2183 * end of this function.
2184 */
b411b363
PR
2185
2186 sector = be64_to_cpu(p->sector);
e2857216 2187 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
db830c46 2188 if (!peer_req) {
b411b363 2189 put_ldev(mdev);
82bc0194 2190 return -EIO;
b411b363
PR
2191 }
2192
db830c46 2193 peer_req->w.cb = e_end_block;
b411b363 2194
688593c5
LE
2195 dp_flags = be32_to_cpu(p->dp_flags);
2196 rw |= wire_flags_to_bio(mdev, dp_flags);
81a3537a
LE
2197 if (peer_req->pages == NULL) {
2198 D_ASSERT(peer_req->i.size == 0);
a73ff323
LE
2199 D_ASSERT(dp_flags & DP_FLUSH);
2200 }
688593c5
LE
2201
2202 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2203 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2204
12038a3a
PR
2205 spin_lock(&tconn->epoch_lock);
2206 peer_req->epoch = tconn->current_epoch;
db830c46
AG
2207 atomic_inc(&peer_req->epoch->epoch_size);
2208 atomic_inc(&peer_req->epoch->active);
12038a3a 2209 spin_unlock(&tconn->epoch_lock);
b411b363 2210
302bdeae
PR
2211 rcu_read_lock();
2212 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2213 rcu_read_unlock();
2214 if (tp) {
2215 peer_req->flags |= EE_IN_INTERVAL_TREE;
7be8da07
AG
2216 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2217 if (err)
b411b363 2218 goto out_interrupted;
87eeee41 2219 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2220 err = handle_write_conflicts(mdev, peer_req);
2221 if (err) {
2222 spin_unlock_irq(&mdev->tconn->req_lock);
2223 if (err == -ENOENT) {
b411b363 2224 put_ldev(mdev);
82bc0194 2225 return 0;
b411b363 2226 }
7be8da07 2227 goto out_interrupted;
b411b363 2228 }
7be8da07
AG
2229 } else
2230 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2231 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2232 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2233
d93f6302 2234 if (mdev->state.conn == C_SYNC_TARGET)
3ea35df8 2235 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
b411b363 2236
303d1448 2237 if (mdev->tconn->agreed_pro_version < 100) {
44ed167d
PR
2238 rcu_read_lock();
2239 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
303d1448
PR
2240 case DRBD_PROT_C:
2241 dp_flags |= DP_SEND_WRITE_ACK;
2242 break;
2243 case DRBD_PROT_B:
2244 dp_flags |= DP_SEND_RECEIVE_ACK;
2245 break;
b411b363 2246 }
44ed167d 2247 rcu_read_unlock();
b411b363
PR
2248 }
2249
303d1448
PR
2250 if (dp_flags & DP_SEND_WRITE_ACK) {
2251 peer_req->flags |= EE_SEND_WRITE_ACK;
b411b363
PR
2252 inc_unacked(mdev);
2253 /* corresponding dec_unacked() in e_end_block()
2254 * respective _drbd_clear_done_ee */
303d1448
PR
2255 }
2256
2257 if (dp_flags & DP_SEND_RECEIVE_ACK) {
b411b363
PR
2258 /* I really don't like it that the receiver thread
2259 * sends on the msock, but anyways */
db830c46 2260 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2261 }
2262
6719fb03 2263 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2264 /* In case we have the only disk of the cluster, */
db830c46
AG
2265 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2266 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2267 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
181286ad 2268 drbd_al_begin_io(mdev, &peer_req->i);
b411b363
PR
2269 }
2270
82bc0194
AG
2271 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2272 if (!err)
2273 return 0;
b411b363 2274
10f6d992
LE
2275 /* don't care for the reason here */
2276 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2277 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2278 list_del(&peer_req->w.list);
2279 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2280 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46 2281 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
181286ad 2282 drbd_al_complete_io(mdev, &peer_req->i);
22cc37a9 2283
b411b363 2284out_interrupted:
1e9dd291 2285 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2286 put_ldev(mdev);
3967deb1 2287 drbd_free_peer_req(mdev, peer_req);
82bc0194 2288 return err;
b411b363
PR
2289}
2290
0f0601f4
LE
2291/* We may throttle resync, if the lower device seems to be busy,
2292 * and current sync rate is above c_min_rate.
2293 *
2294 * To decide whether or not the lower device is busy, we use a scheme similar
2295 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2296 * (more than 64 sectors) of activity we cannot account for with our own resync
2297 * activity, it obviously is "busy".
2298 *
2299 * The current sync rate used here uses only the most recent two step marks,
2300 * to have a short time average so we can react faster.
2301 */
e3555d85 2302int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2303{
2304 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2305 unsigned long db, dt, dbdt;
e3555d85 2306 struct lc_element *tmp;
0f0601f4
LE
2307 int curr_events;
2308 int throttle = 0;
daeda1cc
PR
2309 unsigned int c_min_rate;
2310
2311 rcu_read_lock();
2312 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2313 rcu_read_unlock();
0f0601f4
LE
2314
2315 /* feature disabled? */
daeda1cc 2316 if (c_min_rate == 0)
0f0601f4
LE
2317 return 0;
2318
e3555d85
PR
2319 spin_lock_irq(&mdev->al_lock);
2320 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2321 if (tmp) {
2322 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2323 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2324 spin_unlock_irq(&mdev->al_lock);
2325 return 0;
2326 }
2327 /* Do not slow down if app IO is already waiting for this extent */
2328 }
2329 spin_unlock_irq(&mdev->al_lock);
2330
0f0601f4
LE
2331 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2332 (int)part_stat_read(&disk->part0, sectors[1]) -
2333 atomic_read(&mdev->rs_sect_ev);
e3555d85 2334
0f0601f4
LE
2335 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2336 unsigned long rs_left;
2337 int i;
2338
2339 mdev->rs_last_events = curr_events;
2340
2341 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2342 * approx. */
2649f080
LE
2343 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2344
2345 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2346 rs_left = mdev->ov_left;
2347 else
2348 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2349
2350 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2351 if (!dt)
2352 dt++;
2353 db = mdev->rs_mark_left[i] - rs_left;
2354 dbdt = Bit2KB(db/dt);
2355
daeda1cc 2356 if (dbdt > c_min_rate)
0f0601f4
LE
2357 throttle = 1;
2358 }
2359 return throttle;
2360}
2361
2362
4a76b161 2363static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2364{
4a76b161 2365 struct drbd_conf *mdev;
b411b363 2366 sector_t sector;
4a76b161 2367 sector_t capacity;
db830c46 2368 struct drbd_peer_request *peer_req;
b411b363 2369 struct digest_info *di = NULL;
b18b37be 2370 int size, verb;
b411b363 2371 unsigned int fault_type;
e658983a 2372 struct p_block_req *p = pi->data;
4a76b161
AG
2373
2374 mdev = vnr_to_mdev(tconn, pi->vnr);
2375 if (!mdev)
2376 return -EIO;
2377 capacity = drbd_get_capacity(mdev->this_bdev);
b411b363
PR
2378
2379 sector = be64_to_cpu(p->sector);
2380 size = be32_to_cpu(p->blksize);
2381
c670a398 2382 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2383 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2384 (unsigned long long)sector, size);
82bc0194 2385 return -EINVAL;
b411b363
PR
2386 }
2387 if (sector + (size>>9) > capacity) {
2388 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2389 (unsigned long long)sector, size);
82bc0194 2390 return -EINVAL;
b411b363
PR
2391 }
2392
2393 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be 2394 verb = 1;
e2857216 2395 switch (pi->cmd) {
b18b37be
PR
2396 case P_DATA_REQUEST:
2397 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2398 break;
2399 case P_RS_DATA_REQUEST:
2400 case P_CSUM_RS_REQUEST:
2401 case P_OV_REQUEST:
2402 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2403 break;
2404 case P_OV_REPLY:
2405 verb = 0;
2406 dec_rs_pending(mdev);
2407 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2408 break;
2409 default:
49ba9b1b 2410 BUG();
b18b37be
PR
2411 }
2412 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2413 dev_err(DEV, "Can not satisfy peer's read request, "
2414 "no local data.\n");
b18b37be 2415
a821cc4a 2416 /* drain possibly payload */
e2857216 2417 return drbd_drain_block(mdev, pi->size);
b411b363
PR
2418 }
2419
2420 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2421 * "criss-cross" setup, that might cause write-out on some other DRBD,
2422 * which in turn might block on the other node at this very place. */
0db55363 2423 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
db830c46 2424 if (!peer_req) {
b411b363 2425 put_ldev(mdev);
82bc0194 2426 return -ENOMEM;
b411b363
PR
2427 }
2428
e2857216 2429 switch (pi->cmd) {
b411b363 2430 case P_DATA_REQUEST:
db830c46 2431 peer_req->w.cb = w_e_end_data_req;
b411b363 2432 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2433 /* application IO, don't drbd_rs_begin_io */
2434 goto submit;
2435
b411b363 2436 case P_RS_DATA_REQUEST:
db830c46 2437 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2438 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2439 /* used in the sector offset progress display */
2440 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2441 break;
2442
2443 case P_OV_REPLY:
2444 case P_CSUM_RS_REQUEST:
2445 fault_type = DRBD_FAULT_RS_RD;
e2857216 2446 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2447 if (!di)
2448 goto out_free_e;
2449
e2857216 2450 di->digest_size = pi->size;
b411b363
PR
2451 di->digest = (((char *)di)+sizeof(struct digest_info));
2452
db830c46
AG
2453 peer_req->digest = di;
2454 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2455
e2857216 2456 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
b411b363
PR
2457 goto out_free_e;
2458
e2857216 2459 if (pi->cmd == P_CSUM_RS_REQUEST) {
31890f4a 2460 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2461 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2462 /* used in the sector offset progress display */
2463 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
e2857216 2464 } else if (pi->cmd == P_OV_REPLY) {
2649f080
LE
2465 /* track progress, we may need to throttle */
2466 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2467 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2468 dec_rs_pending(mdev);
0f0601f4
LE
2469 /* drbd_rs_begin_io done when we sent this request,
2470 * but accounting still needs to be done. */
2471 goto submit_for_resync;
b411b363
PR
2472 }
2473 break;
2474
2475 case P_OV_REQUEST:
b411b363 2476 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2477 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2478 unsigned long now = jiffies;
2479 int i;
b411b363
PR
2480 mdev->ov_start_sector = sector;
2481 mdev->ov_position = sector;
30b743a2
LE
2482 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2483 mdev->rs_total = mdev->ov_left;
de228bba
LE
2484 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2485 mdev->rs_mark_left[i] = mdev->ov_left;
2486 mdev->rs_mark_time[i] = now;
2487 }
b411b363
PR
2488 dev_info(DEV, "Online Verify start sector: %llu\n",
2489 (unsigned long long)sector);
2490 }
db830c46 2491 peer_req->w.cb = w_e_end_ov_req;
b411b363 2492 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2493 break;
2494
b411b363 2495 default:
49ba9b1b 2496 BUG();
b411b363
PR
2497 }
2498
0f0601f4
LE
2499 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2500 * wrt the receiver, but it is not as straightforward as it may seem.
2501 * Various places in the resync start and stop logic assume resync
2502 * requests are processed in order, requeuing this on the worker thread
2503 * introduces a bunch of new code for synchronization between threads.
2504 *
2505 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2506 * "forever", throttling after drbd_rs_begin_io will lock that extent
2507 * for application writes for the same time. For now, just throttle
2508 * here, where the rest of the code expects the receiver to sleep for
2509 * a while, anyways.
2510 */
2511
2512 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2513 * this defers syncer requests for some time, before letting at least
2514 * on request through. The resync controller on the receiving side
2515 * will adapt to the incoming rate accordingly.
2516 *
2517 * We cannot throttle here if remote is Primary/SyncTarget:
2518 * we would also throttle its application reads.
2519 * In that case, throttling is done on the SyncTarget only.
2520 */
e3555d85
PR
2521 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2522 schedule_timeout_uninterruptible(HZ/10);
2523 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2524 goto out_free_e;
b411b363 2525
0f0601f4
LE
2526submit_for_resync:
2527 atomic_add(size >> 9, &mdev->rs_sect_ev);
2528
80a40e43 2529submit:
b411b363 2530 inc_unacked(mdev);
87eeee41 2531 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2532 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2533 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2534
fbe29dec 2535 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2536 return 0;
b411b363 2537
10f6d992
LE
2538 /* don't care for the reason here */
2539 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2540 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2541 list_del(&peer_req->w.list);
87eeee41 2542 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2543 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2544
b411b363 2545out_free_e:
b411b363 2546 put_ldev(mdev);
3967deb1 2547 drbd_free_peer_req(mdev, peer_req);
82bc0194 2548 return -EIO;
b411b363
PR
2549}
2550
2551static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2552{
2553 int self, peer, rv = -100;
2554 unsigned long ch_self, ch_peer;
44ed167d 2555 enum drbd_after_sb_p after_sb_0p;
b411b363
PR
2556
2557 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2558 peer = mdev->p_uuid[UI_BITMAP] & 1;
2559
2560 ch_peer = mdev->p_uuid[UI_SIZE];
2561 ch_self = mdev->comm_bm_set;
2562
44ed167d
PR
2563 rcu_read_lock();
2564 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2565 rcu_read_unlock();
2566 switch (after_sb_0p) {
b411b363
PR
2567 case ASB_CONSENSUS:
2568 case ASB_DISCARD_SECONDARY:
2569 case ASB_CALL_HELPER:
44ed167d 2570 case ASB_VIOLENTLY:
b411b363
PR
2571 dev_err(DEV, "Configuration error.\n");
2572 break;
2573 case ASB_DISCONNECT:
2574 break;
2575 case ASB_DISCARD_YOUNGER_PRI:
2576 if (self == 0 && peer == 1) {
2577 rv = -1;
2578 break;
2579 }
2580 if (self == 1 && peer == 0) {
2581 rv = 1;
2582 break;
2583 }
2584 /* Else fall through to one of the other strategies... */
2585 case ASB_DISCARD_OLDER_PRI:
2586 if (self == 0 && peer == 1) {
2587 rv = 1;
2588 break;
2589 }
2590 if (self == 1 && peer == 0) {
2591 rv = -1;
2592 break;
2593 }
2594 /* Else fall through to one of the other strategies... */
ad19bf6e 2595 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2596 "Using discard-least-changes instead\n");
2597 case ASB_DISCARD_ZERO_CHG:
2598 if (ch_peer == 0 && ch_self == 0) {
427c0434 2599 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2600 ? -1 : 1;
2601 break;
2602 } else {
2603 if (ch_peer == 0) { rv = 1; break; }
2604 if (ch_self == 0) { rv = -1; break; }
2605 }
44ed167d 2606 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2607 break;
2608 case ASB_DISCARD_LEAST_CHG:
2609 if (ch_self < ch_peer)
2610 rv = -1;
2611 else if (ch_self > ch_peer)
2612 rv = 1;
2613 else /* ( ch_self == ch_peer ) */
2614 /* Well, then use something else. */
427c0434 2615 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2616 ? -1 : 1;
2617 break;
2618 case ASB_DISCARD_LOCAL:
2619 rv = -1;
2620 break;
2621 case ASB_DISCARD_REMOTE:
2622 rv = 1;
2623 }
2624
2625 return rv;
2626}
2627
2628static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2629{
6184ea21 2630 int hg, rv = -100;
44ed167d 2631 enum drbd_after_sb_p after_sb_1p;
b411b363 2632
44ed167d
PR
2633 rcu_read_lock();
2634 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2635 rcu_read_unlock();
2636 switch (after_sb_1p) {
b411b363
PR
2637 case ASB_DISCARD_YOUNGER_PRI:
2638 case ASB_DISCARD_OLDER_PRI:
2639 case ASB_DISCARD_LEAST_CHG:
2640 case ASB_DISCARD_LOCAL:
2641 case ASB_DISCARD_REMOTE:
44ed167d 2642 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2643 dev_err(DEV, "Configuration error.\n");
2644 break;
2645 case ASB_DISCONNECT:
2646 break;
2647 case ASB_CONSENSUS:
2648 hg = drbd_asb_recover_0p(mdev);
2649 if (hg == -1 && mdev->state.role == R_SECONDARY)
2650 rv = hg;
2651 if (hg == 1 && mdev->state.role == R_PRIMARY)
2652 rv = hg;
2653 break;
2654 case ASB_VIOLENTLY:
2655 rv = drbd_asb_recover_0p(mdev);
2656 break;
2657 case ASB_DISCARD_SECONDARY:
2658 return mdev->state.role == R_PRIMARY ? 1 : -1;
2659 case ASB_CALL_HELPER:
2660 hg = drbd_asb_recover_0p(mdev);
2661 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2662 enum drbd_state_rv rv2;
2663
2664 drbd_set_role(mdev, R_SECONDARY, 0);
b411b363
PR
2665 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2666 * we might be here in C_WF_REPORT_PARAMS which is transient.
2667 * we do not need to wait for the after state change work either. */
bb437946
AG
2668 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2669 if (rv2 != SS_SUCCESS) {
b411b363
PR
2670 drbd_khelper(mdev, "pri-lost-after-sb");
2671 } else {
2672 dev_warn(DEV, "Successfully gave up primary role.\n");
2673 rv = hg;
2674 }
2675 } else
2676 rv = hg;
2677 }
2678
2679 return rv;
2680}
2681
2682static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2683{
6184ea21 2684 int hg, rv = -100;
44ed167d 2685 enum drbd_after_sb_p after_sb_2p;
b411b363 2686
44ed167d
PR
2687 rcu_read_lock();
2688 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2689 rcu_read_unlock();
2690 switch (after_sb_2p) {
b411b363
PR
2691 case ASB_DISCARD_YOUNGER_PRI:
2692 case ASB_DISCARD_OLDER_PRI:
2693 case ASB_DISCARD_LEAST_CHG:
2694 case ASB_DISCARD_LOCAL:
2695 case ASB_DISCARD_REMOTE:
2696 case ASB_CONSENSUS:
2697 case ASB_DISCARD_SECONDARY:
44ed167d 2698 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2699 dev_err(DEV, "Configuration error.\n");
2700 break;
2701 case ASB_VIOLENTLY:
2702 rv = drbd_asb_recover_0p(mdev);
2703 break;
2704 case ASB_DISCONNECT:
2705 break;
2706 case ASB_CALL_HELPER:
2707 hg = drbd_asb_recover_0p(mdev);
2708 if (hg == -1) {
bb437946
AG
2709 enum drbd_state_rv rv2;
2710
b411b363
PR
2711 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2712 * we might be here in C_WF_REPORT_PARAMS which is transient.
2713 * we do not need to wait for the after state change work either. */
bb437946
AG
2714 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2715 if (rv2 != SS_SUCCESS) {
b411b363
PR
2716 drbd_khelper(mdev, "pri-lost-after-sb");
2717 } else {
2718 dev_warn(DEV, "Successfully gave up primary role.\n");
2719 rv = hg;
2720 }
2721 } else
2722 rv = hg;
2723 }
2724
2725 return rv;
2726}
2727
2728static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2729 u64 bits, u64 flags)
2730{
2731 if (!uuid) {
2732 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2733 return;
2734 }
2735 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2736 text,
2737 (unsigned long long)uuid[UI_CURRENT],
2738 (unsigned long long)uuid[UI_BITMAP],
2739 (unsigned long long)uuid[UI_HISTORY_START],
2740 (unsigned long long)uuid[UI_HISTORY_END],
2741 (unsigned long long)bits,
2742 (unsigned long long)flags);
2743}
2744
2745/*
2746 100 after split brain try auto recover
2747 2 C_SYNC_SOURCE set BitMap
2748 1 C_SYNC_SOURCE use BitMap
2749 0 no Sync
2750 -1 C_SYNC_TARGET use BitMap
2751 -2 C_SYNC_TARGET set BitMap
2752 -100 after split brain, disconnect
2753-1000 unrelated data
4a23f264
PR
2754-1091 requires proto 91
2755-1096 requires proto 96
b411b363
PR
2756 */
2757static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2758{
2759 u64 self, peer;
2760 int i, j;
2761
2762 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2763 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2764
2765 *rule_nr = 10;
2766 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2767 return 0;
2768
2769 *rule_nr = 20;
2770 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2771 peer != UUID_JUST_CREATED)
2772 return -2;
2773
2774 *rule_nr = 30;
2775 if (self != UUID_JUST_CREATED &&
2776 (peer == UUID_JUST_CREATED || peer == (u64)0))
2777 return 2;
2778
2779 if (self == peer) {
2780 int rct, dc; /* roles at crash time */
2781
2782 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2783
31890f4a 2784 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2785 return -1091;
b411b363
PR
2786
2787 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2788 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2789 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
9f2247bb
PR
2790 drbd_uuid_move_history(mdev);
2791 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2792 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363
PR
2793
2794 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2795 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2796 *rule_nr = 34;
2797 } else {
2798 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2799 *rule_nr = 36;
2800 }
2801
2802 return 1;
2803 }
2804
2805 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2806
31890f4a 2807 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2808 return -1091;
b411b363
PR
2809
2810 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2811 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2812 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2813
2814 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2815 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2816 mdev->p_uuid[UI_BITMAP] = 0UL;
2817
2818 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2819 *rule_nr = 35;
2820 } else {
2821 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2822 *rule_nr = 37;
2823 }
2824
2825 return -1;
2826 }
2827
2828 /* Common power [off|failure] */
2829 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2830 (mdev->p_uuid[UI_FLAGS] & 2);
2831 /* lowest bit is set when we were primary,
2832 * next bit (weight 2) is set when peer was primary */
2833 *rule_nr = 40;
2834
2835 switch (rct) {
2836 case 0: /* !self_pri && !peer_pri */ return 0;
2837 case 1: /* self_pri && !peer_pri */ return 1;
2838 case 2: /* !self_pri && peer_pri */ return -1;
2839 case 3: /* self_pri && peer_pri */
427c0434 2840 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2841 return dc ? -1 : 1;
2842 }
2843 }
2844
2845 *rule_nr = 50;
2846 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2847 if (self == peer)
2848 return -1;
2849
2850 *rule_nr = 51;
2851 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2852 if (self == peer) {
31890f4a 2853 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2854 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2855 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2856 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2857 /* The last P_SYNC_UUID did not get though. Undo the last start of
2858 resync as sync source modifications of the peer's UUIDs. */
2859
31890f4a 2860 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2861 return -1091;
b411b363
PR
2862
2863 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2864 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264 2865
92b4ca29 2866 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
4a23f264
PR
2867 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2868
b411b363
PR
2869 return -1;
2870 }
2871 }
2872
2873 *rule_nr = 60;
2874 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2875 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2876 peer = mdev->p_uuid[i] & ~((u64)1);
2877 if (self == peer)
2878 return -2;
2879 }
2880
2881 *rule_nr = 70;
2882 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2883 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2884 if (self == peer)
2885 return 1;
2886
2887 *rule_nr = 71;
2888 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2889 if (self == peer) {
31890f4a 2890 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2891 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2892 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2893 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2894 /* The last P_SYNC_UUID did not get though. Undo the last start of
2895 resync as sync source modifications of our UUIDs. */
2896
31890f4a 2897 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2898 return -1091;
b411b363 2899
9f2247bb
PR
2900 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2901 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 2902
4a23f264 2903 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2904 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2905 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2906
2907 return 1;
2908 }
2909 }
2910
2911
2912 *rule_nr = 80;
d8c2a36b 2913 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2914 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2915 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2916 if (self == peer)
2917 return 2;
2918 }
2919
2920 *rule_nr = 90;
2921 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2922 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2923 if (self == peer && self != ((u64)0))
2924 return 100;
2925
2926 *rule_nr = 100;
2927 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2928 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2929 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2930 peer = mdev->p_uuid[j] & ~((u64)1);
2931 if (self == peer)
2932 return -100;
2933 }
2934 }
2935
2936 return -1000;
2937}
2938
2939/* drbd_sync_handshake() returns the new conn state on success, or
2940 CONN_MASK (-1) on failure.
2941 */
2942static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2943 enum drbd_disk_state peer_disk) __must_hold(local)
2944{
b411b363
PR
2945 enum drbd_conns rv = C_MASK;
2946 enum drbd_disk_state mydisk;
44ed167d 2947 struct net_conf *nc;
6dff2902 2948 int hg, rule_nr, rr_conflict, tentative;
b411b363
PR
2949
2950 mydisk = mdev->state.disk;
2951 if (mydisk == D_NEGOTIATING)
2952 mydisk = mdev->new_state_tmp.disk;
2953
2954 dev_info(DEV, "drbd_sync_handshake:\n");
9f2247bb
PR
2955
2956 spin_lock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2957 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2958 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2959 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2960
2961 hg = drbd_uuid_compare(mdev, &rule_nr);
9f2247bb 2962 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2963
2964 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2965
2966 if (hg == -1000) {
2967 dev_alert(DEV, "Unrelated data, aborting!\n");
2968 return C_MASK;
2969 }
4a23f264
PR
2970 if (hg < -1000) {
2971 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2972 return C_MASK;
2973 }
2974
2975 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2976 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2977 int f = (hg == -100) || abs(hg) == 2;
2978 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2979 if (f)
2980 hg = hg*2;
2981 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2982 hg > 0 ? "source" : "target");
2983 }
2984
3a11a487
AG
2985 if (abs(hg) == 100)
2986 drbd_khelper(mdev, "initial-split-brain");
2987
44ed167d
PR
2988 rcu_read_lock();
2989 nc = rcu_dereference(mdev->tconn->net_conf);
2990
2991 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b411b363
PR
2992 int pcount = (mdev->state.role == R_PRIMARY)
2993 + (peer_role == R_PRIMARY);
2994 int forced = (hg == -100);
2995
2996 switch (pcount) {
2997 case 0:
2998 hg = drbd_asb_recover_0p(mdev);
2999 break;
3000 case 1:
3001 hg = drbd_asb_recover_1p(mdev);
3002 break;
3003 case 2:
3004 hg = drbd_asb_recover_2p(mdev);
3005 break;
3006 }
3007 if (abs(hg) < 100) {
3008 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3009 "automatically solved. Sync from %s node\n",
3010 pcount, (hg < 0) ? "peer" : "this");
3011 if (forced) {
3012 dev_warn(DEV, "Doing a full sync, since"
3013 " UUIDs where ambiguous.\n");
3014 hg = hg*2;
3015 }
3016 }
3017 }
3018
3019 if (hg == -100) {
08b165ba 3020 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 3021 hg = -1;
08b165ba 3022 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
3023 hg = 1;
3024
3025 if (abs(hg) < 100)
3026 dev_warn(DEV, "Split-Brain detected, manually solved. "
3027 "Sync from %s node\n",
3028 (hg < 0) ? "peer" : "this");
3029 }
44ed167d 3030 rr_conflict = nc->rr_conflict;
6dff2902 3031 tentative = nc->tentative;
44ed167d 3032 rcu_read_unlock();
b411b363
PR
3033
3034 if (hg == -100) {
580b9767
LE
3035 /* FIXME this log message is not correct if we end up here
3036 * after an attempted attach on a diskless node.
3037 * We just refuse to attach -- well, we drop the "connection"
3038 * to that disk, in a way... */
3a11a487 3039 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
3040 drbd_khelper(mdev, "split-brain");
3041 return C_MASK;
3042 }
3043
3044 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3045 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3046 return C_MASK;
3047 }
3048
3049 if (hg < 0 && /* by intention we do not use mydisk here. */
3050 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
44ed167d 3051 switch (rr_conflict) {
b411b363
PR
3052 case ASB_CALL_HELPER:
3053 drbd_khelper(mdev, "pri-lost");
3054 /* fall through */
3055 case ASB_DISCONNECT:
3056 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3057 return C_MASK;
3058 case ASB_VIOLENTLY:
3059 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3060 "assumption\n");
3061 }
3062 }
3063
6dff2902 3064 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
3065 if (hg == 0)
3066 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3067 else
3068 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3069 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3070 abs(hg) >= 2 ? "full" : "bit-map based");
3071 return C_MASK;
3072 }
3073
b411b363
PR
3074 if (abs(hg) >= 2) {
3075 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
3076 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3077 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3078 return C_MASK;
3079 }
3080
3081 if (hg > 0) { /* become sync source. */
3082 rv = C_WF_BITMAP_S;
3083 } else if (hg < 0) { /* become sync target */
3084 rv = C_WF_BITMAP_T;
3085 } else {
3086 rv = C_CONNECTED;
3087 if (drbd_bm_total_weight(mdev)) {
3088 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3089 drbd_bm_total_weight(mdev));
3090 }
3091 }
3092
3093 return rv;
3094}
3095
f179d76d 3096static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3097{
3098 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3099 if (peer == ASB_DISCARD_REMOTE)
3100 return ASB_DISCARD_LOCAL;
b411b363
PR
3101
3102 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3103 if (peer == ASB_DISCARD_LOCAL)
3104 return ASB_DISCARD_REMOTE;
b411b363
PR
3105
3106 /* everything else is valid if they are equal on both sides. */
f179d76d 3107 return peer;
b411b363
PR
3108}
3109
e2857216 3110static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3111{
e658983a 3112 struct p_protocol *p = pi->data;
036b17ea
PR
3113 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3114 int p_proto, p_discard_my_data, p_two_primaries, cf;
3115 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3116 char integrity_alg[SHARED_SECRET_MAX] = "";
accdbcc5 3117 struct crypto_hash *peer_integrity_tfm = NULL;
7aca6c75 3118 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3119
b411b363
PR
3120 p_proto = be32_to_cpu(p->protocol);
3121 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3122 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3123 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3124 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3125 cf = be32_to_cpu(p->conn_flags);
6139f60d 3126 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3127
86db0618
AG
3128 if (tconn->agreed_pro_version >= 87) {
3129 int err;
cf14c2e9 3130
88104ca4 3131 if (pi->size > sizeof(integrity_alg))
86db0618 3132 return -EIO;
88104ca4 3133 err = drbd_recv_all(tconn, integrity_alg, pi->size);
86db0618
AG
3134 if (err)
3135 return err;
036b17ea 3136 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3137 }
3138
7d4c782c 3139 if (pi->cmd != P_PROTOCOL_UPDATE) {
fbc12f45 3140 clear_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3141
fbc12f45
AG
3142 if (cf & CF_DRY_RUN)
3143 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3144
fbc12f45
AG
3145 rcu_read_lock();
3146 nc = rcu_dereference(tconn->net_conf);
b411b363 3147
fbc12f45 3148 if (p_proto != nc->wire_protocol) {
d505d9be 3149 conn_err(tconn, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3150 goto disconnect_rcu_unlock;
3151 }
b411b363 3152
fbc12f45 3153 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
d505d9be 3154 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3155 goto disconnect_rcu_unlock;
3156 }
b411b363 3157
fbc12f45 3158 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
d505d9be 3159 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3160 goto disconnect_rcu_unlock;
3161 }
b411b363 3162
fbc12f45 3163 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
d505d9be 3164 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3165 goto disconnect_rcu_unlock;
3166 }
b411b363 3167
fbc12f45 3168 if (p_discard_my_data && nc->discard_my_data) {
d505d9be 3169 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3170 goto disconnect_rcu_unlock;
3171 }
b411b363 3172
fbc12f45 3173 if (p_two_primaries != nc->two_primaries) {
d505d9be 3174 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3175 goto disconnect_rcu_unlock;
3176 }
b411b363 3177
fbc12f45 3178 if (strcmp(integrity_alg, nc->integrity_alg)) {
d505d9be 3179 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3180 goto disconnect_rcu_unlock;
3181 }
b411b363 3182
fbc12f45 3183 rcu_read_unlock();
b411b363
PR
3184 }
3185
7d4c782c
AG
3186 if (integrity_alg[0]) {
3187 int hash_size;
3188
3189 /*
3190 * We can only change the peer data integrity algorithm
3191 * here. Changing our own data integrity algorithm
3192 * requires that we send a P_PROTOCOL_UPDATE packet at
3193 * the same time; otherwise, the peer has no way to
3194 * tell between which packets the algorithm should
3195 * change.
3196 */
b411b363 3197
7d4c782c
AG
3198 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3199 if (!peer_integrity_tfm) {
3200 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3201 integrity_alg);
3202 goto disconnect;
3203 }
b411b363 3204
7d4c782c
AG
3205 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3206 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3207 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3208 if (!(int_dig_in && int_dig_vv)) {
3209 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3210 goto disconnect;
3211 }
b411b363
PR
3212 }
3213
7d4c782c
AG
3214 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3215 if (!new_net_conf) {
3216 conn_err(tconn, "Allocation of new net_conf failed\n");
3217 goto disconnect;
3218 }
3219
3220 mutex_lock(&tconn->data.mutex);
3221 mutex_lock(&tconn->conf_update);
3222 old_net_conf = tconn->net_conf;
3223 *new_net_conf = *old_net_conf;
3224
3225 new_net_conf->wire_protocol = p_proto;
3226 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3227 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3228 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3229 new_net_conf->two_primaries = p_two_primaries;
3230
3231 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3232 mutex_unlock(&tconn->conf_update);
3233 mutex_unlock(&tconn->data.mutex);
3234
3235 crypto_free_hash(tconn->peer_integrity_tfm);
3236 kfree(tconn->int_dig_in);
3237 kfree(tconn->int_dig_vv);
3238 tconn->peer_integrity_tfm = peer_integrity_tfm;
3239 tconn->int_dig_in = int_dig_in;
3240 tconn->int_dig_vv = int_dig_vv;
3241
3242 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3243 conn_info(tconn, "peer data-integrity-alg: %s\n",
3244 integrity_alg[0] ? integrity_alg : "(none)");
3245
3246 synchronize_rcu();
3247 kfree(old_net_conf);
82bc0194 3248 return 0;
b411b363 3249
44ed167d
PR
3250disconnect_rcu_unlock:
3251 rcu_read_unlock();
b411b363 3252disconnect:
b792c35c 3253 crypto_free_hash(peer_integrity_tfm);
036b17ea
PR
3254 kfree(int_dig_in);
3255 kfree(int_dig_vv);
7204624c 3256 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3257 return -EIO;
b411b363
PR
3258}
3259
3260/* helper function
3261 * input: alg name, feature name
3262 * return: NULL (alg name was "")
3263 * ERR_PTR(error) if something goes wrong
3264 * or the crypto hash ptr, if it worked out ok. */
3265struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3266 const char *alg, const char *name)
3267{
3268 struct crypto_hash *tfm;
3269
3270 if (!alg[0])
3271 return NULL;
3272
3273 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3274 if (IS_ERR(tfm)) {
3275 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3276 alg, name, PTR_ERR(tfm));
3277 return tfm;
3278 }
b411b363
PR
3279 return tfm;
3280}
3281
4a76b161
AG
3282static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3283{
3284 void *buffer = tconn->data.rbuf;
3285 int size = pi->size;
3286
3287 while (size) {
3288 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3289 s = drbd_recv(tconn, buffer, s);
3290 if (s <= 0) {
3291 if (s < 0)
3292 return s;
3293 break;
3294 }
3295 size -= s;
3296 }
3297 if (size)
3298 return -EIO;
3299 return 0;
3300}
3301
3302/*
3303 * config_unknown_volume - device configuration command for unknown volume
3304 *
3305 * When a device is added to an existing connection, the node on which the
3306 * device is added first will send configuration commands to its peer but the
3307 * peer will not know about the device yet. It will warn and ignore these
3308 * commands. Once the device is added on the second node, the second node will
3309 * send the same device configuration commands, but in the other direction.
3310 *
3311 * (We can also end up here if drbd is misconfigured.)
3312 */
3313static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3314{
2fcb8f30
AG
3315 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3316 cmdname(pi->cmd), pi->vnr);
4a76b161
AG
3317 return ignore_remaining_packet(tconn, pi);
3318}
3319
3320static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3321{
4a76b161 3322 struct drbd_conf *mdev;
e658983a 3323 struct p_rs_param_95 *p;
b411b363
PR
3324 unsigned int header_size, data_size, exp_max_sz;
3325 struct crypto_hash *verify_tfm = NULL;
3326 struct crypto_hash *csums_tfm = NULL;
2ec91e0e 3327 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3328 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
4a76b161 3329 const int apv = tconn->agreed_pro_version;
813472ce 3330 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3331 int fifo_size = 0;
82bc0194 3332 int err;
b411b363 3333
4a76b161
AG
3334 mdev = vnr_to_mdev(tconn, pi->vnr);
3335 if (!mdev)
3336 return config_unknown_volume(tconn, pi);
b411b363
PR
3337
3338 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3339 : apv == 88 ? sizeof(struct p_rs_param)
3340 + SHARED_SECRET_MAX
8e26f9cc
PR
3341 : apv <= 94 ? sizeof(struct p_rs_param_89)
3342 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3343
e2857216 3344 if (pi->size > exp_max_sz) {
b411b363 3345 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3346 pi->size, exp_max_sz);
82bc0194 3347 return -EIO;
b411b363
PR
3348 }
3349
3350 if (apv <= 88) {
e658983a 3351 header_size = sizeof(struct p_rs_param);
e2857216 3352 data_size = pi->size - header_size;
8e26f9cc 3353 } else if (apv <= 94) {
e658983a 3354 header_size = sizeof(struct p_rs_param_89);
e2857216 3355 data_size = pi->size - header_size;
b411b363 3356 D_ASSERT(data_size == 0);
8e26f9cc 3357 } else {
e658983a 3358 header_size = sizeof(struct p_rs_param_95);
e2857216 3359 data_size = pi->size - header_size;
b411b363
PR
3360 D_ASSERT(data_size == 0);
3361 }
3362
3363 /* initialize verify_alg and csums_alg */
e658983a 3364 p = pi->data;
b411b363
PR
3365 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3366
e658983a 3367 err = drbd_recv_all(mdev->tconn, p, header_size);
82bc0194
AG
3368 if (err)
3369 return err;
b411b363 3370
daeda1cc
PR
3371 mutex_lock(&mdev->tconn->conf_update);
3372 old_net_conf = mdev->tconn->net_conf;
813472ce
PR
3373 if (get_ldev(mdev)) {
3374 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3375 if (!new_disk_conf) {
3376 put_ldev(mdev);
3377 mutex_unlock(&mdev->tconn->conf_update);
3378 dev_err(DEV, "Allocation of new disk_conf failed\n");
3379 return -ENOMEM;
3380 }
daeda1cc 3381
813472ce
PR
3382 old_disk_conf = mdev->ldev->disk_conf;
3383 *new_disk_conf = *old_disk_conf;
b411b363 3384
6394b935 3385 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3386 }
b411b363
PR
3387
3388 if (apv >= 88) {
3389 if (apv == 88) {
5de73827
PR
3390 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3391 dev_err(DEV, "verify-alg of wrong size, "
3392 "peer wants %u, accepting only up to %u byte\n",
3393 data_size, SHARED_SECRET_MAX);
813472ce
PR
3394 err = -EIO;
3395 goto reconnect;
b411b363
PR
3396 }
3397
82bc0194 3398 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
813472ce
PR
3399 if (err)
3400 goto reconnect;
b411b363
PR
3401 /* we expect NUL terminated string */
3402 /* but just in case someone tries to be evil */
3403 D_ASSERT(p->verify_alg[data_size-1] == 0);
3404 p->verify_alg[data_size-1] = 0;
3405
3406 } else /* apv >= 89 */ {
3407 /* we still expect NUL terminated strings */
3408 /* but just in case someone tries to be evil */
3409 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3410 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3411 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3412 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3413 }
3414
2ec91e0e 3415 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3416 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3417 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3418 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3419 goto disconnect;
3420 }
3421 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3422 p->verify_alg, "verify-alg");
3423 if (IS_ERR(verify_tfm)) {
3424 verify_tfm = NULL;
3425 goto disconnect;
3426 }
3427 }
3428
2ec91e0e 3429 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3430 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3431 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3432 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3433 goto disconnect;
3434 }
3435 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3436 p->csums_alg, "csums-alg");
3437 if (IS_ERR(csums_tfm)) {
3438 csums_tfm = NULL;
3439 goto disconnect;
3440 }
3441 }
3442
813472ce 3443 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3444 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3445 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3446 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3447 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3448
daeda1cc 3449 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 3450 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
3451 new_plan = fifo_alloc(fifo_size);
3452 if (!new_plan) {
778f271d 3453 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3454 put_ldev(mdev);
778f271d
PR
3455 goto disconnect;
3456 }
3457 }
8e26f9cc 3458 }
b411b363 3459
91fd4dad 3460 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3461 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3462 if (!new_net_conf) {
91fd4dad
PR
3463 dev_err(DEV, "Allocation of new net_conf failed\n");
3464 goto disconnect;
3465 }
3466
2ec91e0e 3467 *new_net_conf = *old_net_conf;
91fd4dad
PR
3468
3469 if (verify_tfm) {
2ec91e0e
PR
3470 strcpy(new_net_conf->verify_alg, p->verify_alg);
3471 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
91fd4dad
PR
3472 crypto_free_hash(mdev->tconn->verify_tfm);
3473 mdev->tconn->verify_tfm = verify_tfm;
3474 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3475 }
3476 if (csums_tfm) {
2ec91e0e
PR
3477 strcpy(new_net_conf->csums_alg, p->csums_alg);
3478 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
91fd4dad
PR
3479 crypto_free_hash(mdev->tconn->csums_tfm);
3480 mdev->tconn->csums_tfm = csums_tfm;
3481 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3482 }
2ec91e0e 3483 rcu_assign_pointer(tconn->net_conf, new_net_conf);
778f271d 3484 }
b411b363
PR
3485 }
3486
813472ce
PR
3487 if (new_disk_conf) {
3488 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3489 put_ldev(mdev);
3490 }
3491
3492 if (new_plan) {
3493 old_plan = mdev->rs_plan_s;
3494 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
b411b363 3495 }
daeda1cc
PR
3496
3497 mutex_unlock(&mdev->tconn->conf_update);
3498 synchronize_rcu();
3499 if (new_net_conf)
3500 kfree(old_net_conf);
3501 kfree(old_disk_conf);
813472ce 3502 kfree(old_plan);
daeda1cc 3503
82bc0194 3504 return 0;
b411b363 3505
813472ce
PR
3506reconnect:
3507 if (new_disk_conf) {
3508 put_ldev(mdev);
3509 kfree(new_disk_conf);
3510 }
3511 mutex_unlock(&mdev->tconn->conf_update);
3512 return -EIO;
3513
b411b363 3514disconnect:
813472ce
PR
3515 kfree(new_plan);
3516 if (new_disk_conf) {
3517 put_ldev(mdev);
3518 kfree(new_disk_conf);
3519 }
a0095508 3520 mutex_unlock(&mdev->tconn->conf_update);
b411b363
PR
3521 /* just for completeness: actually not needed,
3522 * as this is not reached if csums_tfm was ok. */
3523 crypto_free_hash(csums_tfm);
3524 /* but free the verify_tfm again, if csums_tfm did not work out */
3525 crypto_free_hash(verify_tfm);
38fa9988 3526 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3527 return -EIO;
b411b363
PR
3528}
3529
b411b363
PR
3530/* warn if the arguments differ by more than 12.5% */
3531static void warn_if_differ_considerably(struct drbd_conf *mdev,
3532 const char *s, sector_t a, sector_t b)
3533{
3534 sector_t d;
3535 if (a == 0 || b == 0)
3536 return;
3537 d = (a > b) ? (a - b) : (b - a);
3538 if (d > (a>>3) || d > (b>>3))
3539 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3540 (unsigned long long)a, (unsigned long long)b);
3541}
3542
4a76b161 3543static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3544{
4a76b161 3545 struct drbd_conf *mdev;
e658983a 3546 struct p_sizes *p = pi->data;
b411b363 3547 enum determine_dev_size dd = unchanged;
b411b363
PR
3548 sector_t p_size, p_usize, my_usize;
3549 int ldsc = 0; /* local disk size changed */
e89b591c 3550 enum dds_flags ddsf;
b411b363 3551
4a76b161
AG
3552 mdev = vnr_to_mdev(tconn, pi->vnr);
3553 if (!mdev)
3554 return config_unknown_volume(tconn, pi);
3555
b411b363
PR
3556 p_size = be64_to_cpu(p->d_size);
3557 p_usize = be64_to_cpu(p->u_size);
3558
b411b363
PR
3559 /* just store the peer's disk size for now.
3560 * we still need to figure out whether we accept that. */
3561 mdev->p_size = p_size;
3562
b411b363 3563 if (get_ldev(mdev)) {
daeda1cc
PR
3564 rcu_read_lock();
3565 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3566 rcu_read_unlock();
3567
b411b363
PR
3568 warn_if_differ_considerably(mdev, "lower level device sizes",
3569 p_size, drbd_get_max_capacity(mdev->ldev));
3570 warn_if_differ_considerably(mdev, "user requested size",
daeda1cc 3571 p_usize, my_usize);
b411b363
PR
3572
3573 /* if this is the first connect, or an otherwise expected
3574 * param exchange, choose the minimum */
3575 if (mdev->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 3576 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
3577
3578 /* Never shrink a device with usable data during connect.
3579 But allow online shrinking if we are connected. */
ef5e44a6 3580 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
daeda1cc
PR
3581 drbd_get_capacity(mdev->this_bdev) &&
3582 mdev->state.disk >= D_OUTDATED &&
3583 mdev->state.conn < C_CONNECTED) {
b411b363 3584 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3585 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 3586 put_ldev(mdev);
82bc0194 3587 return -EIO;
b411b363 3588 }
daeda1cc
PR
3589
3590 if (my_usize != p_usize) {
3591 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3592
3593 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3594 if (!new_disk_conf) {
3595 dev_err(DEV, "Allocation of new disk_conf failed\n");
3596 put_ldev(mdev);
3597 return -ENOMEM;
3598 }
3599
3600 mutex_lock(&mdev->tconn->conf_update);
3601 old_disk_conf = mdev->ldev->disk_conf;
3602 *new_disk_conf = *old_disk_conf;
3603 new_disk_conf->disk_size = p_usize;
3604
3605 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3606 mutex_unlock(&mdev->tconn->conf_update);
3607 synchronize_rcu();
3608 kfree(old_disk_conf);
3609
3610 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3611 (unsigned long)my_usize);
b411b363 3612 }
daeda1cc 3613
b411b363
PR
3614 put_ldev(mdev);
3615 }
b411b363 3616
e89b591c 3617 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3618 if (get_ldev(mdev)) {
24c4830c 3619 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
3620 put_ldev(mdev);
3621 if (dd == dev_size_error)
82bc0194 3622 return -EIO;
b411b363
PR
3623 drbd_md_sync(mdev);
3624 } else {
3625 /* I am diskless, need to accept the peer's size. */
3626 drbd_set_my_capacity(mdev, p_size);
3627 }
3628
99432fcc
PR
3629 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3630 drbd_reconsider_max_bio_size(mdev);
3631
b411b363
PR
3632 if (get_ldev(mdev)) {
3633 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3634 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3635 ldsc = 1;
3636 }
3637
b411b363
PR
3638 put_ldev(mdev);
3639 }
3640
3641 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3642 if (be64_to_cpu(p->c_size) !=
3643 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3644 /* we have different sizes, probably peer
3645 * needs to know my new size... */
e89b591c 3646 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3647 }
3648 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3649 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3650 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3651 mdev->state.disk >= D_INCONSISTENT) {
3652 if (ddsf & DDSF_NO_RESYNC)
3653 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3654 else
3655 resync_after_online_grow(mdev);
3656 } else
b411b363
PR
3657 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3658 }
3659 }
3660
82bc0194 3661 return 0;
b411b363
PR
3662}
3663
4a76b161 3664static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3665{
4a76b161 3666 struct drbd_conf *mdev;
e658983a 3667 struct p_uuids *p = pi->data;
b411b363 3668 u64 *p_uuid;
62b0da3a 3669 int i, updated_uuids = 0;
b411b363 3670
4a76b161
AG
3671 mdev = vnr_to_mdev(tconn, pi->vnr);
3672 if (!mdev)
3673 return config_unknown_volume(tconn, pi);
3674
b411b363 3675 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
063eacf8
JW
3676 if (!p_uuid) {
3677 dev_err(DEV, "kmalloc of p_uuid failed\n");
3678 return false;
3679 }
b411b363
PR
3680
3681 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3682 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3683
3684 kfree(mdev->p_uuid);
3685 mdev->p_uuid = p_uuid;
3686
3687 if (mdev->state.conn < C_CONNECTED &&
3688 mdev->state.disk < D_INCONSISTENT &&
3689 mdev->state.role == R_PRIMARY &&
3690 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3691 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3692 (unsigned long long)mdev->ed_uuid);
38fa9988 3693 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3694 return -EIO;
b411b363
PR
3695 }
3696
3697 if (get_ldev(mdev)) {
3698 int skip_initial_sync =
3699 mdev->state.conn == C_CONNECTED &&
31890f4a 3700 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3701 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3702 (p_uuid[UI_FLAGS] & 8);
3703 if (skip_initial_sync) {
3704 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3705 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3706 "clear_n_write from receive_uuids",
3707 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3708 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3709 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3710 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3711 CS_VERBOSE, NULL);
3712 drbd_md_sync(mdev);
62b0da3a 3713 updated_uuids = 1;
b411b363
PR
3714 }
3715 put_ldev(mdev);
18a50fa2
PR
3716 } else if (mdev->state.disk < D_INCONSISTENT &&
3717 mdev->state.role == R_PRIMARY) {
3718 /* I am a diskless primary, the peer just created a new current UUID
3719 for me. */
62b0da3a 3720 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3721 }
3722
3723 /* Before we test for the disk state, we should wait until an eventually
3724 ongoing cluster wide state change is finished. That is important if
3725 we are primary and are detaching from our disk. We need to see the
3726 new disk state... */
8410da8f
PR
3727 mutex_lock(mdev->state_mutex);
3728 mutex_unlock(mdev->state_mutex);
b411b363 3729 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3730 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3731
3732 if (updated_uuids)
3733 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3734
82bc0194 3735 return 0;
b411b363
PR
3736}
3737
3738/**
3739 * convert_state() - Converts the peer's view of the cluster state to our point of view
3740 * @ps: The state as seen by the peer.
3741 */
3742static union drbd_state convert_state(union drbd_state ps)
3743{
3744 union drbd_state ms;
3745
3746 static enum drbd_conns c_tab[] = {
369bea63 3747 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
3748 [C_CONNECTED] = C_CONNECTED,
3749
3750 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3751 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3752 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3753 [C_VERIFY_S] = C_VERIFY_T,
3754 [C_MASK] = C_MASK,
3755 };
3756
3757 ms.i = ps.i;
3758
3759 ms.conn = c_tab[ps.conn];
3760 ms.peer = ps.role;
3761 ms.role = ps.peer;
3762 ms.pdsk = ps.disk;
3763 ms.disk = ps.pdsk;
3764 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3765
3766 return ms;
3767}
3768
4a76b161 3769static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3770{
4a76b161 3771 struct drbd_conf *mdev;
e658983a 3772 struct p_req_state *p = pi->data;
b411b363 3773 union drbd_state mask, val;
bf885f8a 3774 enum drbd_state_rv rv;
b411b363 3775
4a76b161
AG
3776 mdev = vnr_to_mdev(tconn, pi->vnr);
3777 if (!mdev)
3778 return -EIO;
3779
b411b363
PR
3780 mask.i = be32_to_cpu(p->mask);
3781 val.i = be32_to_cpu(p->val);
3782
427c0434 3783 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
8410da8f 3784 mutex_is_locked(mdev->state_mutex)) {
b411b363 3785 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3786 return 0;
b411b363
PR
3787 }
3788
3789 mask = convert_state(mask);
3790 val = convert_state(val);
3791
dfafcc8a
PR
3792 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3793 drbd_send_sr_reply(mdev, rv);
b411b363 3794
b411b363
PR
3795 drbd_md_sync(mdev);
3796
82bc0194 3797 return 0;
b411b363
PR
3798}
3799
e2857216 3800static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3801{
e658983a 3802 struct p_req_state *p = pi->data;
b411b363 3803 union drbd_state mask, val;
bf885f8a 3804 enum drbd_state_rv rv;
b411b363 3805
b411b363
PR
3806 mask.i = be32_to_cpu(p->mask);
3807 val.i = be32_to_cpu(p->val);
3808
427c0434 3809 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
dfafcc8a
PR
3810 mutex_is_locked(&tconn->cstate_mutex)) {
3811 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3812 return 0;
b411b363
PR
3813 }
3814
3815 mask = convert_state(mask);
3816 val = convert_state(val);
3817
778bcf2e 3818 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
dfafcc8a 3819 conn_send_sr_reply(tconn, rv);
b411b363 3820
82bc0194 3821 return 0;
b411b363
PR
3822}
3823
4a76b161 3824static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3825{
4a76b161 3826 struct drbd_conf *mdev;
e658983a 3827 struct p_state *p = pi->data;
4ac4aada 3828 union drbd_state os, ns, peer_state;
b411b363 3829 enum drbd_disk_state real_peer_disk;
65d922c3 3830 enum chg_state_flags cs_flags;
b411b363
PR
3831 int rv;
3832
4a76b161
AG
3833 mdev = vnr_to_mdev(tconn, pi->vnr);
3834 if (!mdev)
3835 return config_unknown_volume(tconn, pi);
3836
b411b363
PR
3837 peer_state.i = be32_to_cpu(p->state);
3838
3839 real_peer_disk = peer_state.disk;
3840 if (peer_state.disk == D_NEGOTIATING) {
3841 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3842 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3843 }
3844
87eeee41 3845 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3846 retry:
78bae59b 3847 os = ns = drbd_read_state(mdev);
87eeee41 3848 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3849
545752d5
LE
3850 /* If some other part of the code (asender thread, timeout)
3851 * already decided to close the connection again,
3852 * we must not "re-establish" it here. */
3853 if (os.conn <= C_TEAR_DOWN)
58ffa580 3854 return -ECONNRESET;
545752d5 3855
40424e4a
LE
3856 /* If this is the "end of sync" confirmation, usually the peer disk
3857 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3858 * set) resync started in PausedSyncT, or if the timing of pause-/
3859 * unpause-sync events has been "just right", the peer disk may
3860 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3861 */
3862 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3863 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
3864 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3865 /* If we are (becoming) SyncSource, but peer is still in sync
3866 * preparation, ignore its uptodate-ness to avoid flapping, it
3867 * will change to inconsistent once the peer reaches active
3868 * syncing states.
3869 * It may have changed syncer-paused flags, however, so we
3870 * cannot ignore this completely. */
3871 if (peer_state.conn > C_CONNECTED &&
3872 peer_state.conn < C_SYNC_SOURCE)
3873 real_peer_disk = D_INCONSISTENT;
3874
3875 /* if peer_state changes to connected at the same time,
3876 * it explicitly notifies us that it finished resync.
3877 * Maybe we should finish it up, too? */
3878 else if (os.conn >= C_SYNC_SOURCE &&
3879 peer_state.conn == C_CONNECTED) {
3880 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3881 drbd_resync_finished(mdev);
82bc0194 3882 return 0;
e9ef7bb6
LE
3883 }
3884 }
3885
02b91b55
LE
3886 /* explicit verify finished notification, stop sector reached. */
3887 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3888 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
58ffa580 3889 ov_out_of_sync_print(mdev);
02b91b55 3890 drbd_resync_finished(mdev);
58ffa580 3891 return 0;
02b91b55
LE
3892 }
3893
e9ef7bb6
LE
3894 /* peer says his disk is inconsistent, while we think it is uptodate,
3895 * and this happens while the peer still thinks we have a sync going on,
3896 * but we think we are already done with the sync.
3897 * We ignore this to avoid flapping pdsk.
3898 * This should not happen, if the peer is a recent version of drbd. */
3899 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3900 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3901 real_peer_disk = D_UP_TO_DATE;
3902
4ac4aada
LE
3903 if (ns.conn == C_WF_REPORT_PARAMS)
3904 ns.conn = C_CONNECTED;
b411b363 3905
67531718
PR
3906 if (peer_state.conn == C_AHEAD)
3907 ns.conn = C_BEHIND;
3908
b411b363
PR
3909 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3910 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3911 int cr; /* consider resync */
3912
3913 /* if we established a new connection */
4ac4aada 3914 cr = (os.conn < C_CONNECTED);
b411b363
PR
3915 /* if we had an established connection
3916 * and one of the nodes newly attaches a disk */
4ac4aada 3917 cr |= (os.conn == C_CONNECTED &&
b411b363 3918 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3919 os.disk == D_NEGOTIATING));
b411b363
PR
3920 /* if we have both been inconsistent, and the peer has been
3921 * forced to be UpToDate with --overwrite-data */
3922 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3923 /* if we had been plain connected, and the admin requested to
3924 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3925 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3926 (peer_state.conn >= C_STARTING_SYNC_S &&
3927 peer_state.conn <= C_WF_BITMAP_T));
3928
3929 if (cr)
4ac4aada 3930 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3931
3932 put_ldev(mdev);
4ac4aada
LE
3933 if (ns.conn == C_MASK) {
3934 ns.conn = C_CONNECTED;
b411b363 3935 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3936 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3937 } else if (peer_state.disk == D_NEGOTIATING) {
3938 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3939 peer_state.disk = D_DISKLESS;
580b9767 3940 real_peer_disk = D_DISKLESS;
b411b363 3941 } else {
8169e41b 3942 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3943 return -EIO;
4ac4aada 3944 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3945 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3946 return -EIO;
b411b363
PR
3947 }
3948 }
3949 }
3950
87eeee41 3951 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b 3952 if (os.i != drbd_read_state(mdev).i)
b411b363
PR
3953 goto retry;
3954 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3955 ns.peer = peer_state.role;
3956 ns.pdsk = real_peer_disk;
3957 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3958 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3959 ns.disk = mdev->new_state_tmp.disk;
4ac4aada 3960 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
2aebfabb 3961 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3962 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3963 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3964 for temporal network outages! */
87eeee41 3965 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3966 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3967 tl_clear(mdev->tconn);
481c6f50
PR
3968 drbd_uuid_new_current(mdev);
3969 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3970 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3971 return -EIO;
481c6f50 3972 }
65d922c3 3973 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
78bae59b 3974 ns = drbd_read_state(mdev);
87eeee41 3975 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3976
3977 if (rv < SS_SUCCESS) {
38fa9988 3978 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3979 return -EIO;
b411b363
PR
3980 }
3981
4ac4aada
LE
3982 if (os.conn > C_WF_REPORT_PARAMS) {
3983 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3984 peer_state.disk != D_NEGOTIATING ) {
3985 /* we want resync, peer has not yet decided to sync... */
3986 /* Nowadays only used when forcing a node into primary role and
3987 setting its disk to UpToDate with that */
3988 drbd_send_uuids(mdev);
f479ea06 3989 drbd_send_current_state(mdev);
b411b363
PR
3990 }
3991 }
3992
08b165ba 3993 clear_bit(DISCARD_MY_DATA, &mdev->flags);
b411b363
PR
3994
3995 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3996
82bc0194 3997 return 0;
b411b363
PR
3998}
3999
4a76b161 4000static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4001{
4a76b161 4002 struct drbd_conf *mdev;
e658983a 4003 struct p_rs_uuid *p = pi->data;
4a76b161
AG
4004
4005 mdev = vnr_to_mdev(tconn, pi->vnr);
4006 if (!mdev)
4007 return -EIO;
b411b363
PR
4008
4009 wait_event(mdev->misc_wait,
4010 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 4011 mdev->state.conn == C_BEHIND ||
b411b363
PR
4012 mdev->state.conn < C_CONNECTED ||
4013 mdev->state.disk < D_NEGOTIATING);
4014
4015 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4016
b411b363
PR
4017 /* Here the _drbd_uuid_ functions are right, current should
4018 _not_ be rotated into the history */
4019 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4020 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4021 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4022
62b0da3a 4023 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
4024 drbd_start_resync(mdev, C_SYNC_TARGET);
4025
4026 put_ldev(mdev);
4027 } else
4028 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4029
82bc0194 4030 return 0;
b411b363
PR
4031}
4032
2c46407d
AG
4033/**
4034 * receive_bitmap_plain
4035 *
4036 * Return 0 when done, 1 when another iteration is needed, and a negative error
4037 * code upon failure.
4038 */
4039static int
50d0b1ad 4040receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
e658983a 4041 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4042{
50d0b1ad
AG
4043 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4044 drbd_header_size(mdev->tconn);
e658983a 4045 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4046 c->bm_words - c->word_offset);
e658983a 4047 unsigned int want = num_words * sizeof(*p);
2c46407d 4048 int err;
b411b363 4049
50d0b1ad
AG
4050 if (want != size) {
4051 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4052 return -EIO;
b411b363
PR
4053 }
4054 if (want == 0)
2c46407d 4055 return 0;
e658983a 4056 err = drbd_recv_all(mdev->tconn, p, want);
82bc0194 4057 if (err)
2c46407d 4058 return err;
b411b363 4059
e658983a 4060 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
b411b363
PR
4061
4062 c->word_offset += num_words;
4063 c->bit_offset = c->word_offset * BITS_PER_LONG;
4064 if (c->bit_offset > c->bm_bits)
4065 c->bit_offset = c->bm_bits;
4066
2c46407d 4067 return 1;
b411b363
PR
4068}
4069
a02d1240
AG
4070static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4071{
4072 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4073}
4074
4075static int dcbp_get_start(struct p_compressed_bm *p)
4076{
4077 return (p->encoding & 0x80) != 0;
4078}
4079
4080static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4081{
4082 return (p->encoding >> 4) & 0x7;
4083}
4084
2c46407d
AG
4085/**
4086 * recv_bm_rle_bits
4087 *
4088 * Return 0 when done, 1 when another iteration is needed, and a negative error
4089 * code upon failure.
4090 */
4091static int
b411b363
PR
4092recv_bm_rle_bits(struct drbd_conf *mdev,
4093 struct p_compressed_bm *p,
c6d25cfe
PR
4094 struct bm_xfer_ctx *c,
4095 unsigned int len)
b411b363
PR
4096{
4097 struct bitstream bs;
4098 u64 look_ahead;
4099 u64 rl;
4100 u64 tmp;
4101 unsigned long s = c->bit_offset;
4102 unsigned long e;
a02d1240 4103 int toggle = dcbp_get_start(p);
b411b363
PR
4104 int have;
4105 int bits;
4106
a02d1240 4107 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4108
4109 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4110 if (bits < 0)
2c46407d 4111 return -EIO;
b411b363
PR
4112
4113 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4114 bits = vli_decode_bits(&rl, look_ahead);
4115 if (bits <= 0)
2c46407d 4116 return -EIO;
b411b363
PR
4117
4118 if (toggle) {
4119 e = s + rl -1;
4120 if (e >= c->bm_bits) {
4121 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4122 return -EIO;
b411b363
PR
4123 }
4124 _drbd_bm_set_bits(mdev, s, e);
4125 }
4126
4127 if (have < bits) {
4128 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4129 have, bits, look_ahead,
4130 (unsigned int)(bs.cur.b - p->code),
4131 (unsigned int)bs.buf_len);
2c46407d 4132 return -EIO;
b411b363
PR
4133 }
4134 look_ahead >>= bits;
4135 have -= bits;
4136
4137 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4138 if (bits < 0)
2c46407d 4139 return -EIO;
b411b363
PR
4140 look_ahead |= tmp << have;
4141 have += bits;
4142 }
4143
4144 c->bit_offset = s;
4145 bm_xfer_ctx_bit_to_word_offset(c);
4146
2c46407d 4147 return (s != c->bm_bits);
b411b363
PR
4148}
4149
2c46407d
AG
4150/**
4151 * decode_bitmap_c
4152 *
4153 * Return 0 when done, 1 when another iteration is needed, and a negative error
4154 * code upon failure.
4155 */
4156static int
b411b363
PR
4157decode_bitmap_c(struct drbd_conf *mdev,
4158 struct p_compressed_bm *p,
c6d25cfe
PR
4159 struct bm_xfer_ctx *c,
4160 unsigned int len)
b411b363 4161{
a02d1240 4162 if (dcbp_get_code(p) == RLE_VLI_Bits)
e658983a 4163 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
b411b363
PR
4164
4165 /* other variants had been implemented for evaluation,
4166 * but have been dropped as this one turned out to be "best"
4167 * during all our tests. */
4168
4169 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 4170 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4171 return -EIO;
b411b363
PR
4172}
4173
4174void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4175 const char *direction, struct bm_xfer_ctx *c)
4176{
4177 /* what would it take to transfer it "plaintext" */
50d0b1ad
AG
4178 unsigned int header_size = drbd_header_size(mdev->tconn);
4179 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4180 unsigned int plain =
4181 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4182 c->bm_words * sizeof(unsigned long);
4183 unsigned int total = c->bytes[0] + c->bytes[1];
4184 unsigned int r;
b411b363
PR
4185
4186 /* total can not be zero. but just in case: */
4187 if (total == 0)
4188 return;
4189
4190 /* don't report if not compressed */
4191 if (total >= plain)
4192 return;
4193
4194 /* total < plain. check for overflow, still */
4195 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4196 : (1000 * total / plain);
4197
4198 if (r > 1000)
4199 r = 1000;
4200
4201 r = 1000 - r;
4202 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4203 "total %u; compression: %u.%u%%\n",
4204 direction,
4205 c->bytes[1], c->packets[1],
4206 c->bytes[0], c->packets[0],
4207 total, r/10, r % 10);
4208}
4209
4210/* Since we are processing the bitfield from lower addresses to higher,
4211 it does not matter if the process it in 32 bit chunks or 64 bit
4212 chunks as long as it is little endian. (Understand it as byte stream,
4213 beginning with the lowest byte...) If we would use big endian
4214 we would need to process it from the highest address to the lowest,
4215 in order to be agnostic to the 32 vs 64 bits issue.
4216
4217 returns 0 on failure, 1 if we successfully received it. */
4a76b161 4218static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4219{
4a76b161 4220 struct drbd_conf *mdev;
b411b363 4221 struct bm_xfer_ctx c;
2c46407d 4222 int err;
4a76b161
AG
4223
4224 mdev = vnr_to_mdev(tconn, pi->vnr);
4225 if (!mdev)
4226 return -EIO;
b411b363 4227
20ceb2b2
LE
4228 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4229 /* you are supposed to send additional out-of-sync information
4230 * if you actually set bits during this phase */
b411b363 4231
b411b363
PR
4232 c = (struct bm_xfer_ctx) {
4233 .bm_bits = drbd_bm_bits(mdev),
4234 .bm_words = drbd_bm_words(mdev),
4235 };
4236
2c46407d 4237 for(;;) {
e658983a
AG
4238 if (pi->cmd == P_BITMAP)
4239 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4240 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4241 /* MAYBE: sanity check that we speak proto >= 90,
4242 * and the feature is enabled! */
e658983a 4243 struct p_compressed_bm *p = pi->data;
b411b363 4244
50d0b1ad 4245 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
b411b363 4246 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 4247 err = -EIO;
b411b363
PR
4248 goto out;
4249 }
e658983a 4250 if (pi->size <= sizeof(*p)) {
e2857216 4251 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4252 err = -EIO;
78fcbdae 4253 goto out;
b411b363 4254 }
e658983a
AG
4255 err = drbd_recv_all(mdev->tconn, p, pi->size);
4256 if (err)
4257 goto out;
e2857216 4258 err = decode_bitmap_c(mdev, p, &c, pi->size);
b411b363 4259 } else {
e2857216 4260 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4261 err = -EIO;
b411b363
PR
4262 goto out;
4263 }
4264
e2857216 4265 c.packets[pi->cmd == P_BITMAP]++;
50d0b1ad 4266 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
b411b363 4267
2c46407d
AG
4268 if (err <= 0) {
4269 if (err < 0)
4270 goto out;
b411b363 4271 break;
2c46407d 4272 }
e2857216 4273 err = drbd_recv_header(mdev->tconn, pi);
82bc0194 4274 if (err)
b411b363 4275 goto out;
2c46407d 4276 }
b411b363
PR
4277
4278 INFO_bm_xfer_stats(mdev, "receive", &c);
4279
4280 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4281 enum drbd_state_rv rv;
4282
82bc0194
AG
4283 err = drbd_send_bitmap(mdev);
4284 if (err)
b411b363
PR
4285 goto out;
4286 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
4287 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4288 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
4289 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4290 /* admin may have requested C_DISCONNECTING,
4291 * other threads may have noticed network errors */
4292 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4293 drbd_conn_str(mdev->state.conn));
4294 }
82bc0194 4295 err = 0;
b411b363 4296
b411b363 4297 out:
20ceb2b2 4298 drbd_bm_unlock(mdev);
82bc0194 4299 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363 4300 drbd_start_resync(mdev, C_SYNC_SOURCE);
82bc0194 4301 return err;
b411b363
PR
4302}
4303
4a76b161 4304static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4305{
4a76b161 4306 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4307 pi->cmd, pi->size);
b411b363 4308
4a76b161 4309 return ignore_remaining_packet(tconn, pi);
b411b363
PR
4310}
4311
4a76b161 4312static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 4313{
e7f52dfb
LE
4314 /* Make sure we've acked all the TCP data associated
4315 * with the data requests being unplugged */
4a76b161 4316 drbd_tcp_quickack(tconn->data.socket);
0ced55a3 4317
82bc0194 4318 return 0;
0ced55a3
PR
4319}
4320
4a76b161 4321static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
73a01a18 4322{
4a76b161 4323 struct drbd_conf *mdev;
e658983a 4324 struct p_block_desc *p = pi->data;
4a76b161
AG
4325
4326 mdev = vnr_to_mdev(tconn, pi->vnr);
4327 if (!mdev)
4328 return -EIO;
73a01a18 4329
f735e363
LE
4330 switch (mdev->state.conn) {
4331 case C_WF_SYNC_UUID:
4332 case C_WF_BITMAP_T:
4333 case C_BEHIND:
4334 break;
4335 default:
4336 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4337 drbd_conn_str(mdev->state.conn));
4338 }
4339
73a01a18
PR
4340 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4341
82bc0194 4342 return 0;
73a01a18
PR
4343}
4344
02918be2
PR
4345struct data_cmd {
4346 int expect_payload;
4347 size_t pkt_size;
4a76b161 4348 int (*fn)(struct drbd_tconn *, struct packet_info *);
02918be2
PR
4349};
4350
4351static struct data_cmd drbd_cmd_handler[] = {
4352 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4353 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4354 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4355 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4356 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4357 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4358 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4359 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4360 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4361 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4362 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4363 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4364 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4365 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4366 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4367 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4368 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4369 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4373 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4374 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4375 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
b411b363
PR
4376};
4377
eefc2f7d 4378static void drbdd(struct drbd_tconn *tconn)
b411b363 4379{
77351055 4380 struct packet_info pi;
02918be2 4381 size_t shs; /* sub header size */
82bc0194 4382 int err;
b411b363 4383
eefc2f7d 4384 while (get_t_state(&tconn->receiver) == RUNNING) {
deebe195 4385 struct data_cmd *cmd;
b411b363 4386
eefc2f7d 4387 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 4388 if (drbd_recv_header(tconn, &pi))
02918be2 4389 goto err_out;
b411b363 4390
deebe195 4391 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4392 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
2fcb8f30
AG
4393 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4394 cmdname(pi.cmd), pi.cmd);
02918be2 4395 goto err_out;
0b33a916 4396 }
b411b363 4397
e658983a
AG
4398 shs = cmd->pkt_size;
4399 if (pi.size > shs && !cmd->expect_payload) {
2fcb8f30
AG
4400 conn_err(tconn, "No payload expected %s l:%d\n",
4401 cmdname(pi.cmd), pi.size);
02918be2 4402 goto err_out;
b411b363 4403 }
b411b363 4404
c13f7e1a 4405 if (shs) {
e658983a 4406 err = drbd_recv_all_warn(tconn, pi.data, shs);
a5c31904 4407 if (err)
c13f7e1a 4408 goto err_out;
e2857216 4409 pi.size -= shs;
c13f7e1a
LE
4410 }
4411
4a76b161
AG
4412 err = cmd->fn(tconn, &pi);
4413 if (err) {
9f5bdc33
AG
4414 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4415 cmdname(pi.cmd), err, pi.size);
02918be2 4416 goto err_out;
b411b363
PR
4417 }
4418 }
82bc0194 4419 return;
b411b363 4420
82bc0194
AG
4421 err_out:
4422 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4423}
4424
0e29d163 4425void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
4426{
4427 struct drbd_wq_barrier barr;
4428
4429 barr.w.cb = w_prev_work_done;
0e29d163 4430 barr.w.tconn = tconn;
b411b363 4431 init_completion(&barr.done);
d5b27b01 4432 drbd_queue_work(&tconn->sender_work, &barr.w);
b411b363
PR
4433 wait_for_completion(&barr.done);
4434}
4435
81fa2e67 4436static void conn_disconnect(struct drbd_tconn *tconn)
b411b363 4437{
c141ebda 4438 struct drbd_conf *mdev;
bbeb641c 4439 enum drbd_conns oc;
376694a0 4440 int vnr;
b411b363 4441
bbeb641c 4442 if (tconn->cstate == C_STANDALONE)
b411b363 4443 return;
b411b363 4444
545752d5
LE
4445 /* We are about to start the cleanup after connection loss.
4446 * Make sure drbd_make_request knows about that.
4447 * Usually we should be in some network failure state already,
4448 * but just in case we are not, we fix it up here.
4449 */
b8853dbd 4450 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4451
b411b363 4452 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4453 drbd_thread_stop(&tconn->asender);
4454 drbd_free_sock(tconn);
4455
c141ebda
PR
4456 rcu_read_lock();
4457 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4458 kref_get(&mdev->kref);
4459 rcu_read_unlock();
4460 drbd_disconnected(mdev);
4461 kref_put(&mdev->kref, &drbd_minor_destroy);
4462 rcu_read_lock();
4463 }
4464 rcu_read_unlock();
4465
12038a3a
PR
4466 if (!list_empty(&tconn->current_epoch->list))
4467 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4468 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4469 atomic_set(&tconn->current_epoch->epoch_size, 0);
b6dd1a89 4470 tconn->send.seen_any_write_yet = false;
12038a3a 4471
360cc740
PR
4472 conn_info(tconn, "Connection closed\n");
4473
cb703454
PR
4474 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4475 conn_try_outdate_peer_async(tconn);
4476
360cc740 4477 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4478 oc = tconn->cstate;
4479 if (oc >= C_UNCONNECTED)
376694a0 4480 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 4481
360cc740
PR
4482 spin_unlock_irq(&tconn->req_lock);
4483
f3dfa40a 4484 if (oc == C_DISCONNECTING)
d9cc6e23 4485 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
4486}
4487
c141ebda 4488static int drbd_disconnected(struct drbd_conf *mdev)
360cc740 4489{
360cc740 4490 unsigned int i;
b411b363 4491
85719573 4492 /* wait for current activity to cease. */
87eeee41 4493 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4494 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4495 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4496 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4497 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4498
4499 /* We do not have data structures that would allow us to
4500 * get the rs_pending_cnt down to 0 again.
4501 * * On C_SYNC_TARGET we do not have any data structures describing
4502 * the pending RSDataRequest's we have sent.
4503 * * On C_SYNC_SOURCE there is no data structure that tracks
4504 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4505 * And no, it is not the sum of the reference counts in the
4506 * resync_LRU. The resync_LRU tracks the whole operation including
4507 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4508 * on the fly. */
4509 drbd_rs_cancel_all(mdev);
4510 mdev->rs_total = 0;
4511 mdev->rs_failed = 0;
4512 atomic_set(&mdev->rs_pending_cnt, 0);
4513 wake_up(&mdev->misc_wait);
4514
b411b363 4515 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4516 resync_timer_fn((unsigned long)mdev);
4517
b411b363
PR
4518 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4519 * w_make_resync_request etc. which may still be on the worker queue
4520 * to be "canceled" */
4521 drbd_flush_workqueue(mdev);
4522
a990be46 4523 drbd_finish_peer_reqs(mdev);
b411b363 4524
d10b4ea3
PR
4525 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4526 might have issued a work again. The one before drbd_finish_peer_reqs() is
4527 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4528 drbd_flush_workqueue(mdev);
4529
08332d73
LE
4530 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4531 * again via drbd_try_clear_on_disk_bm(). */
4532 drbd_rs_cancel_all(mdev);
b411b363
PR
4533
4534 kfree(mdev->p_uuid);
4535 mdev->p_uuid = NULL;
4536
2aebfabb 4537 if (!drbd_suspended(mdev))
2f5cdd0b 4538 tl_clear(mdev->tconn);
b411b363
PR
4539
4540 drbd_md_sync(mdev);
4541
20ceb2b2
LE
4542 /* serialize with bitmap writeout triggered by the state change,
4543 * if any. */
4544 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4545
b411b363
PR
4546 /* tcp_close and release of sendpage pages can be deferred. I don't
4547 * want to use SO_LINGER, because apparently it can be deferred for
4548 * more than 20 seconds (longest time I checked).
4549 *
4550 * Actually we don't care for exactly when the network stack does its
4551 * put_page(), but release our reference on these pages right here.
4552 */
7721f567 4553 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
4554 if (i)
4555 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4556 i = atomic_read(&mdev->pp_in_use_by_net);
4557 if (i)
4558 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4559 i = atomic_read(&mdev->pp_in_use);
4560 if (i)
45bb912b 4561 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4562
4563 D_ASSERT(list_empty(&mdev->read_ee));
4564 D_ASSERT(list_empty(&mdev->active_ee));
4565 D_ASSERT(list_empty(&mdev->sync_ee));
4566 D_ASSERT(list_empty(&mdev->done_ee));
4567
360cc740 4568 return 0;
b411b363
PR
4569}
4570
4571/*
4572 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4573 * we can agree on is stored in agreed_pro_version.
4574 *
4575 * feature flags and the reserved array should be enough room for future
4576 * enhancements of the handshake protocol, and possible plugins...
4577 *
4578 * for now, they are expected to be zero, but ignored.
4579 */
6038178e 4580static int drbd_send_features(struct drbd_tconn *tconn)
b411b363 4581{
9f5bdc33
AG
4582 struct drbd_socket *sock;
4583 struct p_connection_features *p;
b411b363 4584
9f5bdc33
AG
4585 sock = &tconn->data;
4586 p = conn_prepare_command(tconn, sock);
4587 if (!p)
e8d17b01 4588 return -EIO;
b411b363
PR
4589 memset(p, 0, sizeof(*p));
4590 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4591 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
9f5bdc33 4592 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
4593}
4594
4595/*
4596 * return values:
4597 * 1 yes, we have a valid connection
4598 * 0 oops, did not work out, please try again
4599 * -1 peer talks different language,
4600 * no point in trying again, please go standalone.
4601 */
6038178e 4602static int drbd_do_features(struct drbd_tconn *tconn)
b411b363 4603{
65d11ed6 4604 /* ASSERT current == tconn->receiver ... */
e658983a
AG
4605 struct p_connection_features *p;
4606 const int expect = sizeof(struct p_connection_features);
77351055 4607 struct packet_info pi;
a5c31904 4608 int err;
b411b363 4609
6038178e 4610 err = drbd_send_features(tconn);
e8d17b01 4611 if (err)
b411b363
PR
4612 return 0;
4613
69bc7bc3
AG
4614 err = drbd_recv_header(tconn, &pi);
4615 if (err)
b411b363
PR
4616 return 0;
4617
6038178e
AG
4618 if (pi.cmd != P_CONNECTION_FEATURES) {
4619 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 4620 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4621 return -1;
4622 }
4623
77351055 4624 if (pi.size != expect) {
6038178e 4625 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 4626 expect, pi.size);
b411b363
PR
4627 return -1;
4628 }
4629
e658983a
AG
4630 p = pi.data;
4631 err = drbd_recv_all_warn(tconn, p, expect);
a5c31904 4632 if (err)
b411b363 4633 return 0;
b411b363 4634
b411b363
PR
4635 p->protocol_min = be32_to_cpu(p->protocol_min);
4636 p->protocol_max = be32_to_cpu(p->protocol_max);
4637 if (p->protocol_max == 0)
4638 p->protocol_max = p->protocol_min;
4639
4640 if (PRO_VERSION_MAX < p->protocol_min ||
4641 PRO_VERSION_MIN > p->protocol_max)
4642 goto incompat;
4643
65d11ed6 4644 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4645
65d11ed6
PR
4646 conn_info(tconn, "Handshake successful: "
4647 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4648
4649 return 1;
4650
4651 incompat:
65d11ed6 4652 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4653 "I support %d-%d, peer supports %d-%d\n",
4654 PRO_VERSION_MIN, PRO_VERSION_MAX,
4655 p->protocol_min, p->protocol_max);
4656 return -1;
4657}
4658
4659#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4660static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363
PR
4661{
4662 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4663 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4664 return -1;
b411b363
PR
4665}
4666#else
4667#define CHALLENGE_LEN 64
b10d96cb
JT
4668
4669/* Return value:
4670 1 - auth succeeded,
4671 0 - failed, try again (network error),
4672 -1 - auth failed, don't try again.
4673*/
4674
13e6037d 4675static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4676{
9f5bdc33 4677 struct drbd_socket *sock;
b411b363
PR
4678 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4679 struct scatterlist sg;
4680 char *response = NULL;
4681 char *right_response = NULL;
4682 char *peers_ch = NULL;
44ed167d
PR
4683 unsigned int key_len;
4684 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363
PR
4685 unsigned int resp_size;
4686 struct hash_desc desc;
77351055 4687 struct packet_info pi;
44ed167d 4688 struct net_conf *nc;
69bc7bc3 4689 int err, rv;
b411b363 4690
9f5bdc33 4691 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 4692
44ed167d
PR
4693 rcu_read_lock();
4694 nc = rcu_dereference(tconn->net_conf);
4695 key_len = strlen(nc->shared_secret);
4696 memcpy(secret, nc->shared_secret, key_len);
4697 rcu_read_unlock();
4698
13e6037d 4699 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4700 desc.flags = 0;
4701
44ed167d 4702 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 4703 if (rv) {
13e6037d 4704 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4705 rv = -1;
b411b363
PR
4706 goto fail;
4707 }
4708
4709 get_random_bytes(my_challenge, CHALLENGE_LEN);
4710
9f5bdc33
AG
4711 sock = &tconn->data;
4712 if (!conn_prepare_command(tconn, sock)) {
4713 rv = 0;
4714 goto fail;
4715 }
e658983a 4716 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 4717 my_challenge, CHALLENGE_LEN);
b411b363
PR
4718 if (!rv)
4719 goto fail;
4720
69bc7bc3
AG
4721 err = drbd_recv_header(tconn, &pi);
4722 if (err) {
4723 rv = 0;
b411b363 4724 goto fail;
69bc7bc3 4725 }
b411b363 4726
77351055 4727 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4728 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 4729 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4730 rv = 0;
4731 goto fail;
4732 }
4733
77351055 4734 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4735 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4736 rv = -1;
b411b363
PR
4737 goto fail;
4738 }
4739
77351055 4740 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4741 if (peers_ch == NULL) {
13e6037d 4742 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4743 rv = -1;
b411b363
PR
4744 goto fail;
4745 }
4746
a5c31904
AG
4747 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4748 if (err) {
b411b363
PR
4749 rv = 0;
4750 goto fail;
4751 }
4752
13e6037d 4753 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4754 response = kmalloc(resp_size, GFP_NOIO);
4755 if (response == NULL) {
13e6037d 4756 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4757 rv = -1;
b411b363
PR
4758 goto fail;
4759 }
4760
4761 sg_init_table(&sg, 1);
77351055 4762 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4763
4764 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4765 if (rv) {
13e6037d 4766 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4767 rv = -1;
b411b363
PR
4768 goto fail;
4769 }
4770
9f5bdc33
AG
4771 if (!conn_prepare_command(tconn, sock)) {
4772 rv = 0;
b411b363 4773 goto fail;
9f5bdc33 4774 }
e658983a 4775 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 4776 response, resp_size);
b411b363
PR
4777 if (!rv)
4778 goto fail;
4779
69bc7bc3
AG
4780 err = drbd_recv_header(tconn, &pi);
4781 if (err) {
b411b363
PR
4782 rv = 0;
4783 goto fail;
4784 }
4785
77351055 4786 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4787 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 4788 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4789 rv = 0;
4790 goto fail;
4791 }
4792
77351055 4793 if (pi.size != resp_size) {
13e6037d 4794 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4795 rv = 0;
4796 goto fail;
4797 }
b411b363 4798
a5c31904
AG
4799 err = drbd_recv_all_warn(tconn, response , resp_size);
4800 if (err) {
b411b363
PR
4801 rv = 0;
4802 goto fail;
4803 }
4804
4805 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4806 if (right_response == NULL) {
13e6037d 4807 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4808 rv = -1;
b411b363
PR
4809 goto fail;
4810 }
4811
4812 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4813
4814 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4815 if (rv) {
13e6037d 4816 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4817 rv = -1;
b411b363
PR
4818 goto fail;
4819 }
4820
4821 rv = !memcmp(response, right_response, resp_size);
4822
4823 if (rv)
44ed167d
PR
4824 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4825 resp_size);
b10d96cb
JT
4826 else
4827 rv = -1;
b411b363
PR
4828
4829 fail:
4830 kfree(peers_ch);
4831 kfree(response);
4832 kfree(right_response);
4833
4834 return rv;
4835}
4836#endif
4837
4838int drbdd_init(struct drbd_thread *thi)
4839{
392c8801 4840 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4841 int h;
4842
4d641dd7 4843 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4844
4845 do {
81fa2e67 4846 h = conn_connect(tconn);
b411b363 4847 if (h == 0) {
81fa2e67 4848 conn_disconnect(tconn);
20ee6390 4849 schedule_timeout_interruptible(HZ);
b411b363
PR
4850 }
4851 if (h == -1) {
4d641dd7 4852 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4853 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4854 }
4855 } while (h == 0);
4856
91fd4dad
PR
4857 if (h > 0)
4858 drbdd(tconn);
b411b363 4859
81fa2e67 4860 conn_disconnect(tconn);
b411b363 4861
4d641dd7 4862 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4863 return 0;
4864}
4865
4866/* ********* acknowledge sender ******** */
4867
e05e1e59 4868static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4869{
e658983a 4870 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
4871 int retcode = be32_to_cpu(p->retcode);
4872
4873 if (retcode >= SS_SUCCESS) {
4874 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4875 } else {
4876 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4877 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4878 drbd_set_st_err_str(retcode), retcode);
4879 }
4880 wake_up(&tconn->ping_wait);
4881
2735a594 4882 return 0;
e4f78ede 4883}
b411b363 4884
1952e916 4885static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4886{
1952e916 4887 struct drbd_conf *mdev;
e658983a 4888 struct p_req_state_reply *p = pi->data;
b411b363
PR
4889 int retcode = be32_to_cpu(p->retcode);
4890
1952e916
AG
4891 mdev = vnr_to_mdev(tconn, pi->vnr);
4892 if (!mdev)
2735a594 4893 return -EIO;
1952e916 4894
4d0fc3fd
PR
4895 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4896 D_ASSERT(tconn->agreed_pro_version < 100);
4897 return got_conn_RqSReply(tconn, pi);
4898 }
4899
b411b363 4900 if (retcode >= SS_SUCCESS) {
e4f78ede 4901 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
b411b363 4902 } else {
e4f78ede 4903 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
b411b363 4904 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 4905 drbd_set_st_err_str(retcode), retcode);
b411b363
PR
4906 }
4907 wake_up(&mdev->state_wait);
4908
2735a594 4909 return 0;
b411b363
PR
4910}
4911
e05e1e59 4912static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4913{
2735a594 4914 return drbd_send_ping_ack(tconn);
b411b363
PR
4915
4916}
4917
e05e1e59 4918static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363
PR
4919{
4920 /* restore idle timeout */
2a67d8b9
PR
4921 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4922 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4923 wake_up(&tconn->ping_wait);
b411b363 4924
2735a594 4925 return 0;
b411b363
PR
4926}
4927
1952e916 4928static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4929{
1952e916 4930 struct drbd_conf *mdev;
e658983a 4931 struct p_block_ack *p = pi->data;
b411b363
PR
4932 sector_t sector = be64_to_cpu(p->sector);
4933 int blksize = be32_to_cpu(p->blksize);
4934
1952e916
AG
4935 mdev = vnr_to_mdev(tconn, pi->vnr);
4936 if (!mdev)
2735a594 4937 return -EIO;
1952e916 4938
31890f4a 4939 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4940
4941 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4942
1d53f09e
LE
4943 if (get_ldev(mdev)) {
4944 drbd_rs_complete_io(mdev, sector);
4945 drbd_set_in_sync(mdev, sector, blksize);
4946 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4947 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4948 put_ldev(mdev);
4949 }
b411b363 4950 dec_rs_pending(mdev);
778f271d 4951 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4952
2735a594 4953 return 0;
b411b363
PR
4954}
4955
bc9c5c41
AG
4956static int
4957validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4958 struct rb_root *root, const char *func,
4959 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4960{
4961 struct drbd_request *req;
4962 struct bio_and_error m;
4963
87eeee41 4964 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4965 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4966 if (unlikely(!req)) {
87eeee41 4967 spin_unlock_irq(&mdev->tconn->req_lock);
85997675 4968 return -EIO;
b411b363
PR
4969 }
4970 __req_mod(req, what, &m);
87eeee41 4971 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4972
4973 if (m.bio)
4974 complete_master_bio(mdev, &m);
85997675 4975 return 0;
b411b363
PR
4976}
4977
1952e916 4978static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4979{
1952e916 4980 struct drbd_conf *mdev;
e658983a 4981 struct p_block_ack *p = pi->data;
b411b363
PR
4982 sector_t sector = be64_to_cpu(p->sector);
4983 int blksize = be32_to_cpu(p->blksize);
4984 enum drbd_req_event what;
4985
1952e916
AG
4986 mdev = vnr_to_mdev(tconn, pi->vnr);
4987 if (!mdev)
2735a594 4988 return -EIO;
1952e916 4989
b411b363
PR
4990 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4991
579b57ed 4992 if (p->block_id == ID_SYNCER) {
b411b363
PR
4993 drbd_set_in_sync(mdev, sector, blksize);
4994 dec_rs_pending(mdev);
2735a594 4995 return 0;
b411b363 4996 }
e05e1e59 4997 switch (pi->cmd) {
b411b363 4998 case P_RS_WRITE_ACK:
8554df1c 4999 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
5000 break;
5001 case P_WRITE_ACK:
8554df1c 5002 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5003 break;
5004 case P_RECV_ACK:
8554df1c 5005 what = RECV_ACKED_BY_PEER;
b411b363 5006 break;
d4dabbe2
LE
5007 case P_SUPERSEDED:
5008 what = CONFLICT_RESOLVED;
b411b363 5009 break;
7be8da07 5010 case P_RETRY_WRITE:
7be8da07 5011 what = POSTPONE_WRITE;
b411b363
PR
5012 break;
5013 default:
2735a594 5014 BUG();
b411b363
PR
5015 }
5016
5017 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5018 &mdev->write_requests, __func__,
5019 what, false);
b411b363
PR
5020}
5021
1952e916 5022static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5023{
1952e916 5024 struct drbd_conf *mdev;
e658983a 5025 struct p_block_ack *p = pi->data;
b411b363 5026 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5027 int size = be32_to_cpu(p->blksize);
85997675 5028 int err;
b411b363 5029
1952e916
AG
5030 mdev = vnr_to_mdev(tconn, pi->vnr);
5031 if (!mdev)
2735a594 5032 return -EIO;
b411b363
PR
5033
5034 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5035
579b57ed 5036 if (p->block_id == ID_SYNCER) {
b411b363
PR
5037 dec_rs_pending(mdev);
5038 drbd_rs_failed_io(mdev, sector, size);
2735a594 5039 return 0;
b411b363 5040 }
2deb8336 5041
85997675
AG
5042 err = validate_req_change_req_state(mdev, p->block_id, sector,
5043 &mdev->write_requests, __func__,
303d1448 5044 NEG_ACKED, true);
85997675 5045 if (err) {
c3afd8f5
AG
5046 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5047 The master bio might already be completed, therefore the
5048 request is no longer in the collision hash. */
5049 /* In Protocol B we might already have got a P_RECV_ACK
5050 but then get a P_NEG_ACK afterwards. */
c3afd8f5 5051 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 5052 }
2735a594 5053 return 0;
b411b363
PR
5054}
5055
1952e916 5056static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5057{
1952e916 5058 struct drbd_conf *mdev;
e658983a 5059 struct p_block_ack *p = pi->data;
b411b363
PR
5060 sector_t sector = be64_to_cpu(p->sector);
5061
1952e916
AG
5062 mdev = vnr_to_mdev(tconn, pi->vnr);
5063 if (!mdev)
2735a594 5064 return -EIO;
1952e916 5065
b411b363 5066 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 5067
380207d0 5068 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5069 (unsigned long long)sector, be32_to_cpu(p->blksize));
5070
5071 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5072 &mdev->read_requests, __func__,
5073 NEG_ACKED, false);
b411b363
PR
5074}
5075
1952e916 5076static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5077{
1952e916 5078 struct drbd_conf *mdev;
b411b363
PR
5079 sector_t sector;
5080 int size;
e658983a 5081 struct p_block_ack *p = pi->data;
1952e916
AG
5082
5083 mdev = vnr_to_mdev(tconn, pi->vnr);
5084 if (!mdev)
2735a594 5085 return -EIO;
b411b363
PR
5086
5087 sector = be64_to_cpu(p->sector);
5088 size = be32_to_cpu(p->blksize);
b411b363
PR
5089
5090 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5091
5092 dec_rs_pending(mdev);
5093
5094 if (get_ldev_if_state(mdev, D_FAILED)) {
5095 drbd_rs_complete_io(mdev, sector);
e05e1e59 5096 switch (pi->cmd) {
d612d309
PR
5097 case P_NEG_RS_DREPLY:
5098 drbd_rs_failed_io(mdev, sector, size);
5099 case P_RS_CANCEL:
5100 break;
5101 default:
2735a594 5102 BUG();
d612d309 5103 }
b411b363
PR
5104 put_ldev(mdev);
5105 }
5106
2735a594 5107 return 0;
b411b363
PR
5108}
5109
1952e916 5110static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5111{
e658983a 5112 struct p_barrier_ack *p = pi->data;
9ed57dcb
LE
5113 struct drbd_conf *mdev;
5114 int vnr;
1952e916 5115
9ed57dcb 5116 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 5117
9ed57dcb
LE
5118 rcu_read_lock();
5119 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5120 if (mdev->state.conn == C_AHEAD &&
5121 atomic_read(&mdev->ap_in_flight) == 0 &&
5122 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5123 mdev->start_resync_timer.expires = jiffies + HZ;
5124 add_timer(&mdev->start_resync_timer);
5125 }
c4752ef1 5126 }
9ed57dcb 5127 rcu_read_unlock();
c4752ef1 5128
2735a594 5129 return 0;
b411b363
PR
5130}
5131
1952e916 5132static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5133{
1952e916 5134 struct drbd_conf *mdev;
e658983a 5135 struct p_block_ack *p = pi->data;
b411b363
PR
5136 struct drbd_work *w;
5137 sector_t sector;
5138 int size;
5139
1952e916
AG
5140 mdev = vnr_to_mdev(tconn, pi->vnr);
5141 if (!mdev)
2735a594 5142 return -EIO;
1952e916 5143
b411b363
PR
5144 sector = be64_to_cpu(p->sector);
5145 size = be32_to_cpu(p->blksize);
5146
5147 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5148
5149 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 5150 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 5151 else
8f7bed77 5152 ov_out_of_sync_print(mdev);
b411b363 5153
1d53f09e 5154 if (!get_ldev(mdev))
2735a594 5155 return 0;
1d53f09e 5156
b411b363
PR
5157 drbd_rs_complete_io(mdev, sector);
5158 dec_rs_pending(mdev);
5159
ea5442af
LE
5160 --mdev->ov_left;
5161
5162 /* let's advance progress step marks only for every other megabyte */
5163 if ((mdev->ov_left & 0x200) == 0x200)
5164 drbd_advance_rs_marks(mdev, mdev->ov_left);
5165
5166 if (mdev->ov_left == 0) {
b411b363
PR
5167 w = kmalloc(sizeof(*w), GFP_NOIO);
5168 if (w) {
5169 w->cb = w_ov_finished;
a21e9298 5170 w->mdev = mdev;
d5b27b01 5171 drbd_queue_work(&mdev->tconn->sender_work, w);
b411b363
PR
5172 } else {
5173 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 5174 ov_out_of_sync_print(mdev);
b411b363
PR
5175 drbd_resync_finished(mdev);
5176 }
5177 }
1d53f09e 5178 put_ldev(mdev);
2735a594 5179 return 0;
b411b363
PR
5180}
5181
1952e916 5182static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 5183{
2735a594 5184 return 0;
b411b363
PR
5185}
5186
a990be46 5187static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
0ced55a3 5188{
082a3439 5189 struct drbd_conf *mdev;
c141ebda 5190 int vnr, not_empty = 0;
32862ec7
PR
5191
5192 do {
5193 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5194 flush_signals(current);
c141ebda
PR
5195
5196 rcu_read_lock();
5197 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5198 kref_get(&mdev->kref);
5199 rcu_read_unlock();
d3fcb490 5200 if (drbd_finish_peer_reqs(mdev)) {
c141ebda
PR
5201 kref_put(&mdev->kref, &drbd_minor_destroy);
5202 return 1;
d3fcb490 5203 }
c141ebda
PR
5204 kref_put(&mdev->kref, &drbd_minor_destroy);
5205 rcu_read_lock();
082a3439 5206 }
32862ec7 5207 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
5208
5209 spin_lock_irq(&tconn->req_lock);
c141ebda 5210 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
082a3439
PR
5211 not_empty = !list_empty(&mdev->done_ee);
5212 if (not_empty)
5213 break;
5214 }
5215 spin_unlock_irq(&tconn->req_lock);
c141ebda 5216 rcu_read_unlock();
32862ec7
PR
5217 } while (not_empty);
5218
5219 return 0;
0ced55a3
PR
5220}
5221
b411b363
PR
5222struct asender_cmd {
5223 size_t pkt_size;
1952e916 5224 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
b411b363
PR
5225};
5226
7201b972 5227static struct asender_cmd asender_tbl[] = {
e658983a
AG
5228 [P_PING] = { 0, got_Ping },
5229 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5230 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5231 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5232 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5233 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5234 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5235 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5236 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5237 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5238 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5239 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5240 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5241 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5242 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5243 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5244 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5245};
b411b363
PR
5246
5247int drbd_asender(struct drbd_thread *thi)
5248{
392c8801 5249 struct drbd_tconn *tconn = thi->tconn;
b411b363 5250 struct asender_cmd *cmd = NULL;
77351055 5251 struct packet_info pi;
257d0af6 5252 int rv;
e658983a 5253 void *buf = tconn->meta.rbuf;
b411b363 5254 int received = 0;
52b061a4
AG
5255 unsigned int header_size = drbd_header_size(tconn);
5256 int expect = header_size;
44ed167d
PR
5257 bool ping_timeout_active = false;
5258 struct net_conf *nc;
bb77d34e 5259 int ping_timeo, tcp_cork, ping_int;
b411b363
PR
5260
5261 current->policy = SCHED_RR; /* Make this a realtime task! */
5262 current->rt_priority = 2; /* more important than all other tasks */
5263
e77a0a5c 5264 while (get_t_state(thi) == RUNNING) {
80822284 5265 drbd_thread_current_set_cpu(thi);
b411b363 5266
44ed167d
PR
5267 rcu_read_lock();
5268 nc = rcu_dereference(tconn->net_conf);
5269 ping_timeo = nc->ping_timeo;
bb77d34e 5270 tcp_cork = nc->tcp_cork;
44ed167d
PR
5271 ping_int = nc->ping_int;
5272 rcu_read_unlock();
5273
32862ec7 5274 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
a17647aa 5275 if (drbd_send_ping(tconn)) {
32862ec7 5276 conn_err(tconn, "drbd_send_ping has failed\n");
b411b363 5277 goto reconnect;
841ce241 5278 }
44ed167d
PR
5279 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5280 ping_timeout_active = true;
b411b363
PR
5281 }
5282
32862ec7
PR
5283 /* TODO: conditionally cork; it may hurt latency if we cork without
5284 much to send */
bb77d34e 5285 if (tcp_cork)
32862ec7 5286 drbd_tcp_cork(tconn->meta.socket);
a990be46
AG
5287 if (tconn_finish_peer_reqs(tconn)) {
5288 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
32862ec7 5289 goto reconnect;
b411b363
PR
5290 }
5291 /* but unconditionally uncork unless disabled */
bb77d34e 5292 if (tcp_cork)
32862ec7 5293 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
5294
5295 /* short circuit, recv_msg would return EINTR anyways. */
5296 if (signal_pending(current))
5297 continue;
5298
32862ec7
PR
5299 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5300 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
5301
5302 flush_signals(current);
5303
5304 /* Note:
5305 * -EINTR (on meta) we got a signal
5306 * -EAGAIN (on meta) rcvtimeo expired
5307 * -ECONNRESET other side closed the connection
5308 * -ERESTARTSYS (on data) we got a signal
5309 * rv < 0 other than above: unexpected error!
5310 * rv == expected: full header or command
5311 * rv < expected: "woken" by signal during receive
5312 * rv == 0 : "connection shut down by peer"
5313 */
5314 if (likely(rv > 0)) {
5315 received += rv;
5316 buf += rv;
5317 } else if (rv == 0) {
b66623e3
PR
5318 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5319 long t;
5320 rcu_read_lock();
5321 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5322 rcu_read_unlock();
5323
5324 t = wait_event_timeout(tconn->ping_wait,
5325 tconn->cstate < C_WF_REPORT_PARAMS,
5326 t);
599377ac
PR
5327 if (t)
5328 break;
5329 }
32862ec7 5330 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
5331 goto reconnect;
5332 } else if (rv == -EAGAIN) {
cb6518cb
LE
5333 /* If the data socket received something meanwhile,
5334 * that is good enough: peer is still alive. */
32862ec7
PR
5335 if (time_after(tconn->last_received,
5336 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 5337 continue;
f36af18c 5338 if (ping_timeout_active) {
32862ec7 5339 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
5340 goto reconnect;
5341 }
32862ec7 5342 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
5343 continue;
5344 } else if (rv == -EINTR) {
5345 continue;
5346 } else {
32862ec7 5347 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5348 goto reconnect;
5349 }
5350
5351 if (received == expect && cmd == NULL) {
e658983a 5352 if (decode_header(tconn, tconn->meta.rbuf, &pi))
b411b363 5353 goto reconnect;
7201b972 5354 cmd = &asender_tbl[pi.cmd];
1952e916 5355 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
2fcb8f30
AG
5356 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5357 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5358 goto disconnect;
5359 }
e658983a 5360 expect = header_size + cmd->pkt_size;
52b061a4 5361 if (pi.size != expect - header_size) {
32862ec7 5362 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5363 pi.cmd, pi.size);
b411b363 5364 goto reconnect;
257d0af6 5365 }
b411b363
PR
5366 }
5367 if (received == expect) {
2735a594 5368 bool err;
a4fbda8e 5369
2735a594
AG
5370 err = cmd->fn(tconn, &pi);
5371 if (err) {
1952e916 5372 conn_err(tconn, "%pf failed\n", cmd->fn);
b411b363 5373 goto reconnect;
1952e916 5374 }
b411b363 5375
a4fbda8e 5376 tconn->last_received = jiffies;
f36af18c 5377
44ed167d
PR
5378 if (cmd == &asender_tbl[P_PING_ACK]) {
5379 /* restore idle timeout */
5380 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5381 ping_timeout_active = false;
5382 }
f36af18c 5383
e658983a 5384 buf = tconn->meta.rbuf;
b411b363 5385 received = 0;
52b061a4 5386 expect = header_size;
b411b363
PR
5387 cmd = NULL;
5388 }
5389 }
5390
5391 if (0) {
5392reconnect:
bbeb641c 5393 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
19fffd7b 5394 conn_md_sync(tconn);
b411b363
PR
5395 }
5396 if (0) {
5397disconnect:
bbeb641c 5398 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5399 }
32862ec7 5400 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 5401
32862ec7 5402 conn_info(tconn, "asender terminated\n");
b411b363
PR
5403
5404 return 0;
5405}