]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
block: Abstract out bvec iterator
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
e2857216
AG
53 unsigned int size;
54 unsigned int vnr;
e658983a 55 void *data;
77351055
PR
56};
57
b411b363
PR
58enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
6038178e 64static int drbd_do_features(struct drbd_tconn *tconn);
13e6037d 65static int drbd_do_auth(struct drbd_tconn *tconn);
c141ebda 66static int drbd_disconnected(struct drbd_conf *mdev);
b411b363 67
1e9dd291 68static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
99920dc5 69static int e_end_block(struct drbd_work *, int);
b411b363 70
b411b363
PR
71
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
45bb912b
LE
74/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
23ce4227
PR
92
93 if (!page)
94 return NULL;
95
45bb912b
LE
96 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
18c2d522
AG
153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
b411b363
PR
155{
156 struct page *page = NULL;
45bb912b 157 struct page *tmp = NULL;
18c2d522 158 unsigned int i = 0;
b411b363
PR
159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
45bb912b 162 if (drbd_pp_vacant >= number) {
b411b363 163 spin_lock(&drbd_pp_lock);
45bb912b
LE
164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
b411b363 167 spin_unlock(&drbd_pp_lock);
45bb912b
LE
168 if (page)
169 return page;
b411b363 170 }
45bb912b 171
b411b363
PR
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
45bb912b
LE
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
c37c8ecf 187 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
b411b363
PR
197}
198
a990be46
AG
199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
b411b363 201{
db830c46 202 struct drbd_peer_request *peer_req;
b411b363
PR
203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46 211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
045417f7 212 if (drbd_peer_req_has_active_page(peer_req))
b411b363
PR
213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
db830c46 221 struct drbd_peer_request *peer_req, *t;
b411b363 222
87eeee41 223 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
87eeee41 225 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 226
db830c46 227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 228 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
229}
230
231/**
c37c8ecf 232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b411b363 233 * @mdev: DRBD device.
45bb912b
LE
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
236 *
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 240 *
45bb912b 241 * Returns a page chain linked via page->private.
b411b363 242 */
c37c8ecf
AG
243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
b411b363
PR
245{
246 struct page *page = NULL;
44ed167d 247 struct net_conf *nc;
b411b363 248 DEFINE_WAIT(wait);
44ed167d 249 int mxb;
b411b363 250
45bb912b
LE
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
44ed167d
PR
253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
18c2d522 259 page = __drbd_alloc_pages(mdev, number);
b411b363 260
45bb912b 261 while (page == NULL) {
b411b363
PR
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
44ed167d 266 if (atomic_read(&mdev->pp_in_use) < mxb) {
18c2d522 267 page = __drbd_alloc_pages(mdev, number);
b411b363
PR
268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
c37c8ecf 276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
b411b363
PR
277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
45bb912b
LE
284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
286 return page;
287}
288
c37c8ecf 289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
87eeee41 290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
5cc287e0 293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 294{
435f0740 295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 296 int i;
435f0740 297
a73ff323
LE
298 if (page == NULL)
299 return;
300
81a5d60e 301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
b411b363 310 }
435f0740 311 i = atomic_sub_return(i, a);
45bb912b 312 if (i < 0)
435f0740
LE
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
3967deb1 323 drbd_free_peer_req()
0db55363 324 drbd_alloc_peer_req()
7721f567 325 drbd_free_peer_reqs()
b411b363 326 drbd_ee_fix_bhs()
a990be46 327 drbd_finish_peer_reqs()
b411b363
PR
328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
f6ffca9f 332struct drbd_peer_request *
0db55363
AG
333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 335{
db830c46 336 struct drbd_peer_request *peer_req;
a73ff323 337 struct page *page = NULL;
45bb912b 338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 339
0cf9d27e 340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
341 return NULL;
342
db830c46
AG
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
b411b363 345 if (!(gfp_mask & __GFP_NOWARN))
0db55363 346 dev_err(DEV, "%s: allocation failed\n", __func__);
b411b363
PR
347 return NULL;
348 }
349
a73ff323 350 if (data_size) {
81a3537a 351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
a73ff323
LE
352 if (!page)
353 goto fail;
354 }
b411b363 355
db830c46
AG
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
361
362 peer_req->epoch = NULL;
a21e9298 363 peer_req->w.mdev = mdev;
db830c46
AG
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
9a8e7753
AG
367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
db830c46 371 peer_req->block_id = id;
b411b363 372
db830c46 373 return peer_req;
b411b363 374
45bb912b 375 fail:
db830c46 376 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
377 return NULL;
378}
379
3967deb1 380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 381 int is_net)
b411b363 382{
db830c46
AG
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
5cc287e0 385 drbd_free_pages(mdev, peer_req->pages, is_net);
db830c46
AG
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
389}
390
7721f567 391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
b411b363
PR
392{
393 LIST_HEAD(work_list);
db830c46 394 struct drbd_peer_request *peer_req, *t;
b411b363 395 int count = 0;
435f0740 396 int is_net = list == &mdev->net_ee;
b411b363 397
87eeee41 398 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 399 list_splice_init(list, &work_list);
87eeee41 400 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 401
db830c46 402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
3967deb1 403 __drbd_free_peer_req(mdev, peer_req, is_net);
b411b363
PR
404 count++;
405 }
406 return count;
407}
408
b411b363 409/*
a990be46 410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 411 */
a990be46 412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
db830c46 416 struct drbd_peer_request *peer_req, *t;
e2b3032b 417 int err = 0;
b411b363 418
87eeee41 419 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
b411b363 421 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 422 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 423
db830c46 424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 425 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
426
427 /* possible callbacks here:
d4dabbe2 428 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
429 * all ignore the last argument.
430 */
db830c46 431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
432 int err2;
433
b411b363 434 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
3967deb1 438 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
439 }
440 wake_up(&mdev->ee_wait);
441
e2b3032b 442 return err;
b411b363
PR
443}
444
d4da1537
AG
445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
b411b363
PR
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 454 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 455 io_schedule();
b411b363 456 finish_wait(&mdev->ee_wait, &wait);
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
458 }
459}
460
d4da1537
AG
461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
b411b363 463{
87eeee41 464 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 465 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 466 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
467}
468
dbd9eea0 469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
de0ff338 491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363 492{
b411b363
PR
493 int rv;
494
1393b59f 495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
b411b363 496
dbd0820c
PR
497 if (rv < 0) {
498 if (rv == -ECONNRESET)
155522df 499 conn_info(tconn, "sock was reset by peer\n");
dbd0820c 500 else if (rv != -ERESTARTSYS)
155522df 501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
dbd0820c 502 } else if (rv == 0) {
b66623e3
PR
503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
599377ac
PR
511 if (t)
512 goto out;
513 }
b66623e3 514 conn_info(tconn, "sock was shut down by peer\n");
599377ac
PR
515 }
516
b411b363 517 if (rv != size)
bbeb641c 518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 519
599377ac 520out:
b411b363
PR
521 return rv;
522}
523
c6967746
AG
524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
a5c31904
AG
537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
5dbf1673
LE
547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
eac3e990 566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
44ed167d
PR
571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
69ef82de 574 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
575 int disconnect_on_error = 1;
576
44ed167d
PR
577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
b411b363 581 return NULL;
44ed167d 582 }
44ed167d
PR
583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
69ef82de 585 connect_int = nc->connect_int;
089c075d 586 rcu_read_unlock();
44ed167d 587
089c075d
AG
588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
44ed167d 590
089c075d 591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
44ed167d
PR
592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
089c075d
AG
596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
b411b363
PR
598
599 what = "sock_create_kern";
44ed167d
PR
600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
69ef82de 608 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
b411b363 618 what = "bind before connect";
44ed167d 619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
44ed167d 627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
eac3e990 645 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
646 }
647 if (disconnect_on_error)
bbeb641c 648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 649 }
44ed167d 650
b411b363
PR
651 return sock;
652}
653
7a426fd8
PR
654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
715306f6 662static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
663{
664 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 665 void (*state_change)(struct sock *sk);
7a426fd8 666
715306f6
AG
667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
7a426fd8
PR
671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 674{
1f3e509b 675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 676 struct sockaddr_in6 my_addr;
1f3e509b 677 struct socket *s_listen;
44ed167d 678 struct net_conf *nc;
b411b363
PR
679 const char *what;
680
44ed167d
PR
681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
7a426fd8 685 return -EIO;
44ed167d 686 }
44ed167d
PR
687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
44ed167d 689 rcu_read_unlock();
b411b363 690
089c075d
AG
691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
b411b363
PR
693
694 what = "sock_create_kern";
44ed167d 695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
98683650 702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
704
705 what = "bind before listen";
44ed167d 706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
707 if (err < 0)
708 goto out;
709
7a426fd8
PR
710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 713 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 716
2820fd39
PR
717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
7a426fd8 722 return 0;
b411b363
PR
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b
PR
728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
b411b363 732
7a426fd8 733 return -EIO;
b411b363
PR
734}
735
715306f6 736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 737{
715306f6
AG
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
742}
743
7a426fd8 744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 745{
1f3e509b
PR
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
1f3e509b
PR
748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
38b682b2
AM
760 /* 28.5% random jitter */
761 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 762
7a426fd8
PR
763 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 if (err <= 0)
765 return NULL;
b411b363 766
7a426fd8 767 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
768 if (err < 0) {
769 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b 770 conn_err(tconn, "accept failed, err = %d\n", err);
bbeb641c 771 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
772 }
773 }
b411b363 774
715306f6
AG
775 if (s_estab)
776 unregister_state_change(s_estab->sk, ad);
b411b363 777
b411b363
PR
778 return s_estab;
779}
b411b363 780
e658983a 781static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
b411b363 782
9f5bdc33
AG
783static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 enum drbd_packet cmd)
785{
786 if (!conn_prepare_command(tconn, sock))
787 return -EIO;
e658983a 788 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
b411b363
PR
789}
790
9f5bdc33 791static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
b411b363 792{
9f5bdc33
AG
793 unsigned int header_size = drbd_header_size(tconn);
794 struct packet_info pi;
795 int err;
b411b363 796
9f5bdc33
AG
797 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 if (err != header_size) {
799 if (err >= 0)
800 err = -EIO;
801 return err;
802 }
803 err = decode_header(tconn, tconn->data.rbuf, &pi);
804 if (err)
805 return err;
806 return pi.cmd;
b411b363
PR
807}
808
809/**
810 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
811 * @sock: pointer to the pointer to the socket.
812 */
dbd9eea0 813static int drbd_socket_okay(struct socket **sock)
b411b363
PR
814{
815 int rr;
816 char tb[4];
817
818 if (!*sock)
81e84650 819 return false;
b411b363 820
dbd9eea0 821 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
822
823 if (rr > 0 || rr == -EAGAIN) {
81e84650 824 return true;
b411b363
PR
825 } else {
826 sock_release(*sock);
827 *sock = NULL;
81e84650 828 return false;
b411b363
PR
829 }
830}
2325eb66
PR
831/* Gets called if a connection is established, or if a new minor gets created
832 in a connection */
c141ebda 833int drbd_connected(struct drbd_conf *mdev)
907599e0 834{
0829f5ed 835 int err;
907599e0
PR
836
837 atomic_set(&mdev->packet_seq, 0);
838 mdev->peer_seq = 0;
839
8410da8f
PR
840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 &mdev->tconn->cstate_mutex :
842 &mdev->own_state_mutex;
843
0829f5ed
AG
844 err = drbd_send_sync_param(mdev);
845 if (!err)
846 err = drbd_send_sizes(mdev, 0, 0);
847 if (!err)
848 err = drbd_send_uuids(mdev);
849 if (!err)
43de7c85 850 err = drbd_send_current_state(mdev);
907599e0
PR
851 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 clear_bit(RESIZE_PENDING, &mdev->flags);
2d56a974 853 atomic_set(&mdev->ap_in_flight, 0);
8b924f1d 854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 855 return err;
907599e0 856}
b411b363
PR
857
858/*
859 * return values:
860 * 1 yes, we have a valid connection
861 * 0 oops, did not work out, please try again
862 * -1 peer talks different language,
863 * no point in trying again, please go standalone.
864 * -2 We do not have a network config...
865 */
81fa2e67 866static int conn_connect(struct drbd_tconn *tconn)
b411b363 867{
7da35862 868 struct drbd_socket sock, msock;
c141ebda 869 struct drbd_conf *mdev;
44ed167d 870 struct net_conf *nc;
92f14951 871 int vnr, timeout, h, ok;
08b165ba 872 bool discard_my_data;
197296ff 873 enum drbd_state_rv rv;
7a426fd8
PR
874 struct accept_wait_data ad = {
875 .tconn = tconn,
876 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 };
b411b363 878
b66623e3 879 clear_bit(DISCONNECT_SENT, &tconn->flags);
bbeb641c 880 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
881 return -2;
882
7da35862
PR
883 mutex_init(&sock.mutex);
884 sock.sbuf = tconn->data.sbuf;
885 sock.rbuf = tconn->data.rbuf;
886 sock.socket = NULL;
887 mutex_init(&msock.mutex);
888 msock.sbuf = tconn->meta.sbuf;
889 msock.rbuf = tconn->meta.rbuf;
890 msock.socket = NULL;
891
0916e0e3
AG
892 /* Assume that the peer only understands protocol 80 until we know better. */
893 tconn->agreed_pro_version = 80;
b411b363 894
7a426fd8
PR
895 if (prepare_listen_socket(tconn, &ad))
896 return 0;
b411b363
PR
897
898 do {
2bf89621 899 struct socket *s;
b411b363 900
92f14951 901 s = drbd_try_connect(tconn);
b411b363 902 if (s) {
7da35862
PR
903 if (!sock.socket) {
904 sock.socket = s;
905 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 } else if (!msock.socket) {
427c0434 907 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862
PR
908 msock.socket = s;
909 send_first_packet(tconn, &msock, P_INITIAL_META);
b411b363 910 } else {
81fa2e67 911 conn_err(tconn, "Logic error in conn_connect()\n");
b411b363
PR
912 goto out_release_sockets;
913 }
914 }
915
7da35862
PR
916 if (sock.socket && msock.socket) {
917 rcu_read_lock();
918 nc = rcu_dereference(tconn->net_conf);
919 timeout = nc->ping_timeo * HZ / 10;
920 rcu_read_unlock();
921 schedule_timeout_interruptible(timeout);
922 ok = drbd_socket_okay(&sock.socket);
923 ok = drbd_socket_okay(&msock.socket) && ok;
b411b363
PR
924 if (ok)
925 break;
926 }
927
928retry:
7a426fd8 929 s = drbd_wait_for_connect(tconn, &ad);
b411b363 930 if (s) {
92f14951 931 int fp = receive_first_packet(tconn, s);
7da35862
PR
932 drbd_socket_okay(&sock.socket);
933 drbd_socket_okay(&msock.socket);
92f14951 934 switch (fp) {
e5d6f33a 935 case P_INITIAL_DATA:
7da35862 936 if (sock.socket) {
907599e0 937 conn_warn(tconn, "initial packet S crossed\n");
7da35862 938 sock_release(sock.socket);
80c6eed4
PR
939 sock.socket = s;
940 goto randomize;
b411b363 941 }
7da35862 942 sock.socket = s;
b411b363 943 break;
e5d6f33a 944 case P_INITIAL_META:
427c0434 945 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862 946 if (msock.socket) {
907599e0 947 conn_warn(tconn, "initial packet M crossed\n");
7da35862 948 sock_release(msock.socket);
80c6eed4
PR
949 msock.socket = s;
950 goto randomize;
b411b363 951 }
7da35862 952 msock.socket = s;
b411b363
PR
953 break;
954 default:
907599e0 955 conn_warn(tconn, "Error receiving initial packet\n");
b411b363 956 sock_release(s);
80c6eed4 957randomize:
38b682b2 958 if (prandom_u32() & 1)
b411b363
PR
959 goto retry;
960 }
961 }
962
bbeb641c 963 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
964 goto out_release_sockets;
965 if (signal_pending(current)) {
966 flush_signals(current);
967 smp_rmb();
907599e0 968 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
969 goto out_release_sockets;
970 }
971
b666dbf8
PR
972 ok = drbd_socket_okay(&sock.socket);
973 ok = drbd_socket_okay(&msock.socket) && ok;
974 } while (!ok);
b411b363 975
7a426fd8
PR
976 if (ad.s_listen)
977 sock_release(ad.s_listen);
b411b363 978
98683650
PR
979 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 981
7da35862
PR
982 sock.socket->sk->sk_allocation = GFP_NOIO;
983 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 984
7da35862
PR
985 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 987
b411b363 988 /* NOT YET ...
7da35862
PR
989 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 991 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 992 * which we set to 4x the configured ping_timeout. */
44ed167d
PR
993 rcu_read_lock();
994 nc = rcu_dereference(tconn->net_conf);
995
7da35862
PR
996 sock.socket->sk->sk_sndtimeo =
997 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 998
7da35862 999 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1000 timeout = nc->timeout * HZ / 10;
08b165ba 1001 discard_my_data = nc->discard_my_data;
44ed167d 1002 rcu_read_unlock();
b411b363 1003
7da35862 1004 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1005
1006 /* we don't want delays.
25985edc 1007 * we use TCP_CORK where appropriate, though */
7da35862
PR
1008 drbd_tcp_nodelay(sock.socket);
1009 drbd_tcp_nodelay(msock.socket);
b411b363 1010
7da35862
PR
1011 tconn->data.socket = sock.socket;
1012 tconn->meta.socket = msock.socket;
907599e0 1013 tconn->last_received = jiffies;
b411b363 1014
6038178e 1015 h = drbd_do_features(tconn);
b411b363
PR
1016 if (h <= 0)
1017 return h;
1018
907599e0 1019 if (tconn->cram_hmac_tfm) {
b411b363 1020 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 1021 switch (drbd_do_auth(tconn)) {
b10d96cb 1022 case -1:
907599e0 1023 conn_err(tconn, "Authentication of peer failed\n");
b411b363 1024 return -1;
b10d96cb 1025 case 0:
907599e0 1026 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 1027 return 0;
b411b363
PR
1028 }
1029 }
1030
7da35862
PR
1031 tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1033
387eb308 1034 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 1035 return -1;
b411b363 1036
a1096a6e
PR
1037 set_bit(STATE_SENT, &tconn->flags);
1038
c141ebda
PR
1039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref);
26ea8f92
AG
1042 rcu_read_unlock();
1043
13c76aba
PR
1044 /* Prevent a race between resync-handshake and
1045 * being promoted to Primary.
1046 *
1047 * Grab and release the state mutex, so we know that any current
1048 * drbd_set_role() is finished, and any incoming drbd_set_role
1049 * will see the STATE_SENT flag, and wait for it to be cleared.
1050 */
1051 mutex_lock(mdev->state_mutex);
1052 mutex_unlock(mdev->state_mutex);
1053
08b165ba
PR
1054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
c141ebda
PR
1059 drbd_connected(mdev);
1060 kref_put(&mdev->kref, &drbd_minor_destroy);
1061 rcu_read_lock();
1062 }
1063 rcu_read_unlock();
1064
a1096a6e 1065 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
ed635cb0 1066 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
a1096a6e 1067 clear_bit(STATE_SENT, &tconn->flags);
1e86ac48 1068 return 0;
a1096a6e 1069 }
1e86ac48 1070
823bd832 1071 drbd_thread_start(&tconn->asender);
b411b363 1072
08b165ba
PR
1073 mutex_lock(&tconn->conf_update);
1074 /* The discard_my_data flag is a single-shot modifier to the next
1075 * connection attempt, the handshake of which is now well underway.
1076 * No need for rcu style copying of the whole struct
1077 * just to clear a single value. */
1078 tconn->net_conf->discard_my_data = 0;
1079 mutex_unlock(&tconn->conf_update);
1080
d3fcb490 1081 return h;
b411b363
PR
1082
1083out_release_sockets:
7a426fd8
PR
1084 if (ad.s_listen)
1085 sock_release(ad.s_listen);
7da35862
PR
1086 if (sock.socket)
1087 sock_release(sock.socket);
1088 if (msock.socket)
1089 sock_release(msock.socket);
b411b363
PR
1090 return -1;
1091}
1092
e658983a 1093static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
b411b363 1094{
e658983a
AG
1095 unsigned int header_size = drbd_header_size(tconn);
1096
0c8e36d9
AG
1097 if (header_size == sizeof(struct p_header100) &&
1098 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 struct p_header100 *h = header;
1100 if (h->pad != 0) {
1101 conn_err(tconn, "Header padding is not zero\n");
1102 return -EINVAL;
1103 }
1104 pi->vnr = be16_to_cpu(h->volume);
1105 pi->cmd = be16_to_cpu(h->command);
1106 pi->size = be32_to_cpu(h->length);
1107 } else if (header_size == sizeof(struct p_header95) &&
1108 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1109 struct p_header95 *h = header;
e658983a 1110 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1111 pi->size = be32_to_cpu(h->length);
1112 pi->vnr = 0;
e658983a
AG
1113 } else if (header_size == sizeof(struct p_header80) &&
1114 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 struct p_header80 *h = header;
1116 pi->cmd = be16_to_cpu(h->command);
1117 pi->size = be16_to_cpu(h->length);
77351055 1118 pi->vnr = 0;
02918be2 1119 } else {
e658983a
AG
1120 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 be32_to_cpu(*(__be32 *)header),
1122 tconn->agreed_pro_version);
8172f3e9 1123 return -EINVAL;
b411b363 1124 }
e658983a 1125 pi->data = header + header_size;
8172f3e9 1126 return 0;
257d0af6 1127}
b411b363 1128
9ba7aa00 1129static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 1130{
e658983a 1131 void *buffer = tconn->data.rbuf;
69bc7bc3 1132 int err;
257d0af6 1133
e658983a 1134 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
a5c31904 1135 if (err)
69bc7bc3 1136 return err;
257d0af6 1137
e658983a 1138 err = decode_header(tconn, buffer, pi);
9ba7aa00 1139 tconn->last_received = jiffies;
b411b363 1140
69bc7bc3 1141 return err;
b411b363
PR
1142}
1143
4b0007c0 1144static void drbd_flush(struct drbd_tconn *tconn)
b411b363
PR
1145{
1146 int rv;
4b0007c0
PR
1147 struct drbd_conf *mdev;
1148 int vnr;
1149
1150 if (tconn->write_ordering >= WO_bdev_flush) {
615e087f 1151 rcu_read_lock();
4b0007c0 1152 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
615e087f
LE
1153 if (!get_ldev(mdev))
1154 continue;
1155 kref_get(&mdev->kref);
1156 rcu_read_unlock();
1157
1158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 GFP_NOIO, NULL);
1160 if (rv) {
1161 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163 * don't try again for ANY return value != 0
1164 * if (rv == -EOPNOTSUPP) */
1165 drbd_bump_write_ordering(tconn, WO_drain_io);
4b0007c0 1166 }
615e087f
LE
1167 put_ldev(mdev);
1168 kref_put(&mdev->kref, &drbd_minor_destroy);
b411b363 1169
615e087f
LE
1170 rcu_read_lock();
1171 if (rv)
1172 break;
b411b363 1173 }
615e087f 1174 rcu_read_unlock();
b411b363 1175 }
b411b363
PR
1176}
1177
1178/**
1179 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180 * @mdev: DRBD device.
1181 * @epoch: Epoch object.
1182 * @ev: Epoch event.
1183 */
1e9dd291 1184static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
b411b363
PR
1185 struct drbd_epoch *epoch,
1186 enum epoch_event ev)
1187{
2451fc3b 1188 int epoch_size;
b411b363 1189 struct drbd_epoch *next_epoch;
b411b363
PR
1190 enum finish_epoch rv = FE_STILL_LIVE;
1191
12038a3a 1192 spin_lock(&tconn->epoch_lock);
b411b363
PR
1193 do {
1194 next_epoch = NULL;
b411b363
PR
1195
1196 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198 switch (ev & ~EV_CLEANUP) {
1199 case EV_PUT:
1200 atomic_dec(&epoch->active);
1201 break;
1202 case EV_GOT_BARRIER_NR:
1203 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1204 break;
1205 case EV_BECAME_LAST:
1206 /* nothing to do*/
1207 break;
1208 }
1209
b411b363
PR
1210 if (epoch_size != 0 &&
1211 atomic_read(&epoch->active) == 0 &&
80f9fd55 1212 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1213 if (!(ev & EV_CLEANUP)) {
12038a3a 1214 spin_unlock(&tconn->epoch_lock);
9ed57dcb 1215 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
12038a3a 1216 spin_lock(&tconn->epoch_lock);
b411b363 1217 }
9ed57dcb
LE
1218#if 0
1219 /* FIXME: dec unacked on connection, once we have
1220 * something to count pending connection packets in. */
80f9fd55 1221 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
9ed57dcb
LE
1222 dec_unacked(epoch->tconn);
1223#endif
b411b363 1224
12038a3a 1225 if (tconn->current_epoch != epoch) {
b411b363
PR
1226 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 list_del(&epoch->list);
1228 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
12038a3a 1229 tconn->epochs--;
b411b363
PR
1230 kfree(epoch);
1231
1232 if (rv == FE_STILL_LIVE)
1233 rv = FE_DESTROYED;
1234 } else {
1235 epoch->flags = 0;
1236 atomic_set(&epoch->epoch_size, 0);
698f9315 1237 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1238 if (rv == FE_STILL_LIVE)
1239 rv = FE_RECYCLED;
1240 }
1241 }
1242
1243 if (!next_epoch)
1244 break;
1245
1246 epoch = next_epoch;
1247 } while (1);
1248
12038a3a 1249 spin_unlock(&tconn->epoch_lock);
b411b363 1250
b411b363
PR
1251 return rv;
1252}
1253
1254/**
1255 * drbd_bump_write_ordering() - Fall back to an other write ordering method
4b0007c0 1256 * @tconn: DRBD connection.
b411b363
PR
1257 * @wo: Write ordering method to try.
1258 */
4b0007c0 1259void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
b411b363 1260{
daeda1cc 1261 struct disk_conf *dc;
4b0007c0 1262 struct drbd_conf *mdev;
b411b363 1263 enum write_ordering_e pwo;
4b0007c0 1264 int vnr;
b411b363
PR
1265 static char *write_ordering_str[] = {
1266 [WO_none] = "none",
1267 [WO_drain_io] = "drain",
1268 [WO_bdev_flush] = "flush",
b411b363
PR
1269 };
1270
4b0007c0 1271 pwo = tconn->write_ordering;
b411b363 1272 wo = min(pwo, wo);
daeda1cc 1273 rcu_read_lock();
4b0007c0 1274 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
27eb13e9 1275 if (!get_ldev_if_state(mdev, D_ATTACHING))
4b0007c0
PR
1276 continue;
1277 dc = rcu_dereference(mdev->ldev->disk_conf);
1278
1279 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 wo = WO_drain_io;
1281 if (wo == WO_drain_io && !dc->disk_drain)
1282 wo = WO_none;
1283 put_ldev(mdev);
1284 }
daeda1cc 1285 rcu_read_unlock();
4b0007c0
PR
1286 tconn->write_ordering = wo;
1287 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
b411b363
PR
1289}
1290
45bb912b 1291/**
fbe29dec 1292 * drbd_submit_peer_request()
45bb912b 1293 * @mdev: DRBD device.
db830c46 1294 * @peer_req: peer request
45bb912b 1295 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1296 *
1297 * May spread the pages to multiple bios,
1298 * depending on bio_add_page restrictions.
1299 *
1300 * Returns 0 if all bios have been submitted,
1301 * -ENOMEM if we could not allocate enough bios,
1302 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303 * single page to an empty bio (which should never happen and likely indicates
1304 * that the lower level IO stack is in some way broken). This has been observed
1305 * on certain Xen deployments.
45bb912b
LE
1306 */
1307/* TODO allocate from our own bio_set. */
fbe29dec
AG
1308int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 struct drbd_peer_request *peer_req,
1310 const unsigned rw, const int fault_type)
45bb912b
LE
1311{
1312 struct bio *bios = NULL;
1313 struct bio *bio;
db830c46
AG
1314 struct page *page = peer_req->pages;
1315 sector_t sector = peer_req->i.sector;
1316 unsigned ds = peer_req->i.size;
45bb912b
LE
1317 unsigned n_bios = 0;
1318 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1319 int err = -ENOMEM;
45bb912b
LE
1320
1321 /* In most cases, we will only need one bio. But in case the lower
1322 * level restrictions happen to be different at this offset on this
1323 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1324 * request in more than one bio.
1325 *
1326 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 * generated bio, but a bio allocated on behalf of the peer.
1328 */
45bb912b
LE
1329next_bio:
1330 bio = bio_alloc(GFP_NOIO, nr_pages);
1331 if (!bio) {
1332 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 goto fail;
1334 }
db830c46 1335 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1336 bio->bi_iter.bi_sector = sector;
45bb912b 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1338 bio->bi_rw = rw;
db830c46 1339 bio->bi_private = peer_req;
fcefa62e 1340 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1341
1342 bio->bi_next = bios;
1343 bios = bio;
1344 ++n_bios;
1345
1346 page_chain_for_each(page) {
1347 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1349 /* A single page must always be possible!
1350 * But in case it fails anyways,
1351 * we deal with it, and complain (below). */
1352 if (bio->bi_vcnt == 0) {
1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n",
4f024f37 1356 len, (uint64_t)bio->bi_iter.bi_sector);
10f6d992
LE
1357 err = -ENOSPC;
1358 goto fail;
1359 }
45bb912b
LE
1360 goto next_bio;
1361 }
1362 ds -= len;
1363 sector += len >> 9;
1364 --nr_pages;
1365 }
1366 D_ASSERT(page == NULL);
1367 D_ASSERT(ds == 0);
1368
db830c46 1369 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1370 do {
1371 bio = bios;
1372 bios = bios->bi_next;
1373 bio->bi_next = NULL;
1374
45bb912b 1375 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1376 } while (bios);
45bb912b
LE
1377 return 0;
1378
1379fail:
1380 while (bios) {
1381 bio = bios;
1382 bios = bios->bi_next;
1383 bio_put(bio);
1384 }
10f6d992 1385 return err;
45bb912b
LE
1386}
1387
53840641 1388static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1389 struct drbd_peer_request *peer_req)
53840641 1390{
db830c46 1391 struct drbd_interval *i = &peer_req->i;
53840641
AG
1392
1393 drbd_remove_interval(&mdev->write_requests, i);
1394 drbd_clear_interval(i);
1395
6c852bec 1396 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1397 if (i->waiting)
1398 wake_up(&mdev->misc_wait);
1399}
1400
77fede51
PR
1401void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402{
1403 struct drbd_conf *mdev;
1404 int vnr;
1405
1406 rcu_read_lock();
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 kref_get(&mdev->kref);
1409 rcu_read_unlock();
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 kref_put(&mdev->kref, &drbd_minor_destroy);
1412 rcu_read_lock();
1413 }
1414 rcu_read_unlock();
1415}
1416
4a76b161 1417static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1418{
2451fc3b 1419 int rv;
e658983a 1420 struct p_barrier *p = pi->data;
b411b363
PR
1421 struct drbd_epoch *epoch;
1422
9ed57dcb
LE
1423 /* FIXME these are unacked on connection,
1424 * not a specific (peer)device.
1425 */
12038a3a 1426 tconn->current_epoch->barrier_nr = p->barrier;
9ed57dcb 1427 tconn->current_epoch->tconn = tconn;
1e9dd291 1428 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1429
1430 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 * the activity log, which means it would not be resynced in case the
1432 * R_PRIMARY crashes now.
1433 * Therefore we must send the barrier_ack after the barrier request was
1434 * completed. */
4b0007c0 1435 switch (tconn->write_ordering) {
b411b363
PR
1436 case WO_none:
1437 if (rv == FE_RECYCLED)
82bc0194 1438 return 0;
2451fc3b
PR
1439
1440 /* receiver context, in the writeout path of the other node.
1441 * avoid potential distributed deadlock */
1442 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 if (epoch)
1444 break;
1445 else
9ed57dcb 1446 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1447 /* Fall through */
b411b363
PR
1448
1449 case WO_bdev_flush:
1450 case WO_drain_io:
77fede51 1451 conn_wait_active_ee_empty(tconn);
4b0007c0 1452 drbd_flush(tconn);
2451fc3b 1453
12038a3a 1454 if (atomic_read(&tconn->current_epoch->epoch_size)) {
2451fc3b
PR
1455 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 if (epoch)
1457 break;
b411b363
PR
1458 }
1459
82bc0194 1460 return 0;
2451fc3b 1461 default:
9ed57dcb 1462 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
82bc0194 1463 return -EIO;
b411b363
PR
1464 }
1465
1466 epoch->flags = 0;
1467 atomic_set(&epoch->epoch_size, 0);
1468 atomic_set(&epoch->active, 0);
1469
12038a3a
PR
1470 spin_lock(&tconn->epoch_lock);
1471 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 list_add(&epoch->list, &tconn->current_epoch->list);
1473 tconn->current_epoch = epoch;
1474 tconn->epochs++;
b411b363
PR
1475 } else {
1476 /* The current_epoch got recycled while we allocated this one... */
1477 kfree(epoch);
1478 }
12038a3a 1479 spin_unlock(&tconn->epoch_lock);
b411b363 1480
82bc0194 1481 return 0;
b411b363
PR
1482}
1483
1484/* used from receive_RSDataReply (recv_resync_read)
1485 * and from receive_Data */
f6ffca9f
AG
1486static struct drbd_peer_request *
1487read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 int data_size) __must_hold(local)
b411b363 1489{
6666032a 1490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1491 struct drbd_peer_request *peer_req;
b411b363 1492 struct page *page;
a5c31904 1493 int dgs, ds, err;
a0638456
PR
1494 void *dig_in = mdev->tconn->int_dig_in;
1495 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1496 unsigned long *data;
b411b363 1497
88104ca4
AG
1498 dgs = 0;
1499 if (mdev->tconn->peer_integrity_tfm) {
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
9f5bdc33
AG
1501 /*
1502 * FIXME: Receive the incoming digest into the receive buffer
1503 * here, together with its struct p_data?
1504 */
a5c31904
AG
1505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 if (err)
b411b363 1507 return NULL;
88104ca4 1508 data_size -= dgs;
b411b363
PR
1509 }
1510
841ce241
AG
1511 if (!expect(IS_ALIGNED(data_size, 512)))
1512 return NULL;
1513 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 return NULL;
b411b363 1515
6666032a
LE
1516 /* even though we trust out peer,
1517 * we sometimes have to double check. */
1518 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1519 dev_err(DEV, "request from peer beyond end of local disk: "
1520 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1521 (unsigned long long)capacity,
1522 (unsigned long long)sector, data_size);
1523 return NULL;
1524 }
1525
b411b363
PR
1526 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 * which in turn might block on the other node at this very place. */
0db55363 1529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
db830c46 1530 if (!peer_req)
b411b363 1531 return NULL;
45bb912b 1532
a73ff323 1533 if (!data_size)
81a3537a 1534 return peer_req;
a73ff323 1535
b411b363 1536 ds = data_size;
db830c46 1537 page = peer_req->pages;
45bb912b
LE
1538 page_chain_for_each(page) {
1539 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1540 data = kmap(page);
a5c31904 1541 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1543 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 data[0] = data[0] ^ (unsigned long)-1;
1545 }
b411b363 1546 kunmap(page);
a5c31904 1547 if (err) {
3967deb1 1548 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1549 return NULL;
1550 }
a5c31904 1551 ds -= len;
b411b363
PR
1552 }
1553
1554 if (dgs) {
5b614abe 1555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
b411b363 1556 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1557 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 (unsigned long long)sector, data_size);
3967deb1 1559 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1560 return NULL;
1561 }
1562 }
1563 mdev->recv_cnt += data_size>>9;
db830c46 1564 return peer_req;
b411b363
PR
1565}
1566
1567/* drbd_drain_block() just takes a data block
1568 * out of the socket input buffer, and discards it.
1569 */
1570static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571{
1572 struct page *page;
a5c31904 1573 int err = 0;
b411b363
PR
1574 void *data;
1575
c3470cde 1576 if (!data_size)
fc5be839 1577 return 0;
c3470cde 1578
c37c8ecf 1579 page = drbd_alloc_pages(mdev, 1, 1);
b411b363
PR
1580
1581 data = kmap(page);
1582 while (data_size) {
fc5be839
AG
1583 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
a5c31904
AG
1585 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 if (err)
b411b363 1587 break;
a5c31904 1588 data_size -= len;
b411b363
PR
1589 }
1590 kunmap(page);
5cc287e0 1591 drbd_free_pages(mdev, page, 0);
fc5be839 1592 return err;
b411b363
PR
1593}
1594
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size)
1597{
1598 struct bio_vec *bvec;
1599 struct bio *bio;
a5c31904 1600 int dgs, err, i, expect;
a0638456
PR
1601 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1603
88104ca4
AG
1604 dgs = 0;
1605 if (mdev->tconn->peer_integrity_tfm) {
1606 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a5c31904
AG
1607 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1608 if (err)
1609 return err;
88104ca4 1610 data_size -= dgs;
b411b363
PR
1611 }
1612
b411b363
PR
1613 /* optimistically update recv_cnt. if receiving fails below,
1614 * we disconnect anyways, and counters will be reset. */
1615 mdev->recv_cnt += data_size>>9;
1616
1617 bio = req->master_bio;
4f024f37 1618 D_ASSERT(sector == bio->bi_iter.bi_sector);
b411b363
PR
1619
1620 bio_for_each_segment(bvec, bio, i) {
a5c31904 1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
b411b363 1622 expect = min_t(int, data_size, bvec->bv_len);
a5c31904 1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
b411b363 1624 kunmap(bvec->bv_page);
a5c31904
AG
1625 if (err)
1626 return err;
1627 data_size -= expect;
b411b363
PR
1628 }
1629
1630 if (dgs) {
5b614abe 1631 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
b411b363
PR
1632 if (memcmp(dig_in, dig_vv, dgs)) {
1633 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1634 return -EINVAL;
b411b363
PR
1635 }
1636 }
1637
1638 D_ASSERT(data_size == 0);
28284cef 1639 return 0;
b411b363
PR
1640}
1641
a990be46
AG
1642/*
1643 * e_end_resync_block() is called in asender context via
1644 * drbd_finish_peer_reqs().
1645 */
99920dc5 1646static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1647{
8050e6d0
AG
1648 struct drbd_peer_request *peer_req =
1649 container_of(w, struct drbd_peer_request, w);
00d56944 1650 struct drbd_conf *mdev = w->mdev;
db830c46 1651 sector_t sector = peer_req->i.sector;
99920dc5 1652 int err;
b411b363 1653
db830c46 1654 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1655
db830c46
AG
1656 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1657 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1658 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1659 } else {
1660 /* Record failure to sync */
db830c46 1661 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1662
99920dc5 1663 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1664 }
1665 dec_unacked(mdev);
1666
99920dc5 1667 return err;
b411b363
PR
1668}
1669
1670static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1671{
db830c46 1672 struct drbd_peer_request *peer_req;
b411b363 1673
db830c46
AG
1674 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1675 if (!peer_req)
45bb912b 1676 goto fail;
b411b363
PR
1677
1678 dec_rs_pending(mdev);
1679
b411b363
PR
1680 inc_unacked(mdev);
1681 /* corresponding dec_unacked() in e_end_resync_block()
1682 * respective _drbd_clear_done_ee */
1683
db830c46 1684 peer_req->w.cb = e_end_resync_block;
45bb912b 1685
87eeee41 1686 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1687 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1688 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1689
0f0601f4 1690 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1691 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1692 return 0;
b411b363 1693
10f6d992
LE
1694 /* don't care for the reason here */
1695 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1696 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1697 list_del(&peer_req->w.list);
87eeee41 1698 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1699
3967deb1 1700 drbd_free_peer_req(mdev, peer_req);
45bb912b
LE
1701fail:
1702 put_ldev(mdev);
e1c1b0fc 1703 return -EIO;
b411b363
PR
1704}
1705
668eebc6 1706static struct drbd_request *
bc9c5c41
AG
1707find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1708 sector_t sector, bool missing_ok, const char *func)
51624585 1709{
51624585
AG
1710 struct drbd_request *req;
1711
bc9c5c41
AG
1712 /* Request object according to our peer */
1713 req = (struct drbd_request *)(unsigned long)id;
5e472264 1714 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1715 return req;
c3afd8f5 1716 if (!missing_ok) {
5af172ed 1717 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
1718 (unsigned long)id, (unsigned long long)sector);
1719 }
51624585 1720 return NULL;
b411b363
PR
1721}
1722
4a76b161 1723static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1724{
4a76b161 1725 struct drbd_conf *mdev;
b411b363
PR
1726 struct drbd_request *req;
1727 sector_t sector;
82bc0194 1728 int err;
e658983a 1729 struct p_data *p = pi->data;
4a76b161
AG
1730
1731 mdev = vnr_to_mdev(tconn, pi->vnr);
1732 if (!mdev)
1733 return -EIO;
b411b363
PR
1734
1735 sector = be64_to_cpu(p->sector);
1736
87eeee41 1737 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1738 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1739 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1740 if (unlikely(!req))
82bc0194 1741 return -EIO;
b411b363 1742
24c4830c 1743 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1744 * special casing it there for the various failure cases.
1745 * still no race with drbd_fail_pending_reads */
e2857216 1746 err = recv_dless_read(mdev, req, sector, pi->size);
82bc0194 1747 if (!err)
8554df1c 1748 req_mod(req, DATA_RECEIVED);
b411b363
PR
1749 /* else: nothing. handled from drbd_disconnect...
1750 * I don't think we may complete this just yet
1751 * in case we are "on-disconnect: freeze" */
1752
82bc0194 1753 return err;
b411b363
PR
1754}
1755
4a76b161 1756static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1757{
4a76b161 1758 struct drbd_conf *mdev;
b411b363 1759 sector_t sector;
82bc0194 1760 int err;
e658983a 1761 struct p_data *p = pi->data;
4a76b161
AG
1762
1763 mdev = vnr_to_mdev(tconn, pi->vnr);
1764 if (!mdev)
1765 return -EIO;
b411b363
PR
1766
1767 sector = be64_to_cpu(p->sector);
1768 D_ASSERT(p->block_id == ID_SYNCER);
1769
1770 if (get_ldev(mdev)) {
1771 /* data is submitted to disk within recv_resync_read.
1772 * corresponding put_ldev done below on error,
fcefa62e 1773 * or in drbd_peer_request_endio. */
e2857216 1774 err = recv_resync_read(mdev, sector, pi->size);
b411b363
PR
1775 } else {
1776 if (__ratelimit(&drbd_ratelimit_state))
1777 dev_err(DEV, "Can not write resync data to local disk.\n");
1778
e2857216 1779 err = drbd_drain_block(mdev, pi->size);
b411b363 1780
e2857216 1781 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
b411b363
PR
1782 }
1783
e2857216 1784 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
778f271d 1785
82bc0194 1786 return err;
b411b363
PR
1787}
1788
7be8da07
AG
1789static void restart_conflicting_writes(struct drbd_conf *mdev,
1790 sector_t sector, int size)
b411b363 1791{
7be8da07
AG
1792 struct drbd_interval *i;
1793 struct drbd_request *req;
1794
1795 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1796 if (!i->local)
1797 continue;
1798 req = container_of(i, struct drbd_request, i);
1799 if (req->rq_state & RQ_LOCAL_PENDING ||
1800 !(req->rq_state & RQ_POSTPONED))
1801 continue;
2312f0b3
LE
1802 /* as it is RQ_POSTPONED, this will cause it to
1803 * be queued on the retry workqueue. */
d4dabbe2 1804 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
1805 }
1806}
b411b363 1807
a990be46
AG
1808/*
1809 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
b411b363 1810 */
99920dc5 1811static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1812{
8050e6d0
AG
1813 struct drbd_peer_request *peer_req =
1814 container_of(w, struct drbd_peer_request, w);
00d56944 1815 struct drbd_conf *mdev = w->mdev;
db830c46 1816 sector_t sector = peer_req->i.sector;
99920dc5 1817 int err = 0, pcmd;
b411b363 1818
303d1448 1819 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 1820 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1821 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1822 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1823 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1824 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1825 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1826 if (pcmd == P_RS_WRITE_ACK)
db830c46 1827 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1828 } else {
99920dc5 1829 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1830 /* we expect it to be marked out of sync anyways...
1831 * maybe assert this? */
1832 }
1833 dec_unacked(mdev);
1834 }
1835 /* we delete from the conflict detection hash _after_ we sent out the
1836 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 1837 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
87eeee41 1838 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1839 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1840 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1841 if (peer_req->flags & EE_RESTART_REQUESTS)
1842 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1843 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1844 } else
db830c46 1845 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1846
1e9dd291 1847 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1848
99920dc5 1849 return err;
b411b363
PR
1850}
1851
7be8da07 1852static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1853{
7be8da07 1854 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1855 struct drbd_peer_request *peer_req =
1856 container_of(w, struct drbd_peer_request, w);
99920dc5 1857 int err;
b411b363 1858
99920dc5 1859 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1860 dec_unacked(mdev);
1861
99920dc5 1862 return err;
b411b363
PR
1863}
1864
d4dabbe2 1865static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 1866{
d4dabbe2 1867 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
1868}
1869
99920dc5 1870static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1871{
1872 struct drbd_tconn *tconn = w->mdev->tconn;
1873
1874 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
d4dabbe2 1875 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 1876}
b411b363 1877
3e394da1
AG
1878static bool seq_greater(u32 a, u32 b)
1879{
1880 /*
1881 * We assume 32-bit wrap-around here.
1882 * For 24-bit wrap-around, we would have to shift:
1883 * a <<= 8; b <<= 8;
1884 */
1885 return (s32)a - (s32)b > 0;
1886}
b411b363 1887
3e394da1
AG
1888static u32 seq_max(u32 a, u32 b)
1889{
1890 return seq_greater(a, b) ? a : b;
b411b363
PR
1891}
1892
43ae077d 1893static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1894{
3c13b680 1895 unsigned int newest_peer_seq;
3e394da1 1896
b874d231 1897 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
7be8da07 1898 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1899 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1900 mdev->peer_seq = newest_peer_seq;
7be8da07 1901 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1902 /* wake up only if we actually changed mdev->peer_seq */
1903 if (peer_seq == newest_peer_seq)
7be8da07
AG
1904 wake_up(&mdev->seq_wait);
1905 }
b411b363
PR
1906}
1907
d93f6302 1908static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 1909{
d93f6302
LE
1910 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1911}
b6a370ba 1912
d93f6302 1913/* maybe change sync_ee into interval trees as well? */
3ea35df8 1914static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
d93f6302
LE
1915{
1916 struct drbd_peer_request *rs_req;
b6a370ba
PR
1917 bool rv = 0;
1918
d93f6302
LE
1919 spin_lock_irq(&mdev->tconn->req_lock);
1920 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1921 if (overlaps(peer_req->i.sector, peer_req->i.size,
1922 rs_req->i.sector, rs_req->i.size)) {
b6a370ba
PR
1923 rv = 1;
1924 break;
1925 }
1926 }
d93f6302 1927 spin_unlock_irq(&mdev->tconn->req_lock);
b6a370ba
PR
1928
1929 return rv;
1930}
1931
b411b363
PR
1932/* Called from receive_Data.
1933 * Synchronize packets on sock with packets on msock.
1934 *
1935 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1936 * packet traveling on msock, they are still processed in the order they have
1937 * been sent.
1938 *
1939 * Note: we don't care for Ack packets overtaking P_DATA packets.
1940 *
1941 * In case packet_seq is larger than mdev->peer_seq number, there are
1942 * outstanding packets on the msock. We wait for them to arrive.
1943 * In case we are the logically next packet, we update mdev->peer_seq
1944 * ourselves. Correctly handles 32bit wrap around.
1945 *
1946 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1947 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1948 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1949 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1950 *
1951 * returns 0 if we may process the packet,
1952 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1953static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1954{
1955 DEFINE_WAIT(wait);
b411b363 1956 long timeout;
b874d231 1957 int ret = 0, tp;
7be8da07 1958
b874d231 1959 if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
7be8da07
AG
1960 return 0;
1961
b411b363
PR
1962 spin_lock(&mdev->peer_seq_lock);
1963 for (;;) {
7be8da07
AG
1964 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1965 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
b411b363 1966 break;
7be8da07 1967 }
b874d231 1968
b411b363
PR
1969 if (signal_pending(current)) {
1970 ret = -ERESTARTSYS;
1971 break;
1972 }
b874d231
PR
1973
1974 rcu_read_lock();
1975 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1976 rcu_read_unlock();
1977
1978 if (!tp)
1979 break;
1980
1981 /* Only need to wait if two_primaries is enabled */
7be8da07 1982 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1983 spin_unlock(&mdev->peer_seq_lock);
44ed167d
PR
1984 rcu_read_lock();
1985 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1986 rcu_read_unlock();
71b1c1eb 1987 timeout = schedule_timeout(timeout);
b411b363 1988 spin_lock(&mdev->peer_seq_lock);
7be8da07 1989 if (!timeout) {
b411b363 1990 ret = -ETIMEDOUT;
71b1c1eb 1991 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
1992 break;
1993 }
1994 }
b411b363 1995 spin_unlock(&mdev->peer_seq_lock);
7be8da07 1996 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
1997 return ret;
1998}
1999
688593c5
LE
2000/* see also bio_flags_to_wire()
2001 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2002 * flags and back. We may replicate to other kernel versions. */
2003static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 2004{
688593c5
LE
2005 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2006 (dpf & DP_FUA ? REQ_FUA : 0) |
2007 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2008 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
2009}
2010
7be8da07
AG
2011static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2012 unsigned int size)
2013{
2014 struct drbd_interval *i;
2015
2016 repeat:
2017 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2018 struct drbd_request *req;
2019 struct bio_and_error m;
2020
2021 if (!i->local)
2022 continue;
2023 req = container_of(i, struct drbd_request, i);
2024 if (!(req->rq_state & RQ_POSTPONED))
2025 continue;
2026 req->rq_state &= ~RQ_POSTPONED;
2027 __req_mod(req, NEG_ACKED, &m);
2028 spin_unlock_irq(&mdev->tconn->req_lock);
2029 if (m.bio)
2030 complete_master_bio(mdev, &m);
2031 spin_lock_irq(&mdev->tconn->req_lock);
2032 goto repeat;
2033 }
2034}
2035
2036static int handle_write_conflicts(struct drbd_conf *mdev,
2037 struct drbd_peer_request *peer_req)
2038{
2039 struct drbd_tconn *tconn = mdev->tconn;
427c0434 2040 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07
AG
2041 sector_t sector = peer_req->i.sector;
2042 const unsigned int size = peer_req->i.size;
2043 struct drbd_interval *i;
2044 bool equal;
2045 int err;
2046
2047 /*
2048 * Inserting the peer request into the write_requests tree will prevent
2049 * new conflicting local requests from being added.
2050 */
2051 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2052
2053 repeat:
2054 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2055 if (i == &peer_req->i)
2056 continue;
2057
2058 if (!i->local) {
2059 /*
2060 * Our peer has sent a conflicting remote request; this
2061 * should not happen in a two-node setup. Wait for the
2062 * earlier peer request to complete.
2063 */
2064 err = drbd_wait_misc(mdev, i);
2065 if (err)
2066 goto out;
2067 goto repeat;
2068 }
2069
2070 equal = i->sector == sector && i->size == size;
2071 if (resolve_conflicts) {
2072 /*
2073 * If the peer request is fully contained within the
d4dabbe2
LE
2074 * overlapping request, it can be considered overwritten
2075 * and thus superseded; otherwise, it will be retried
2076 * once all overlapping requests have completed.
7be8da07 2077 */
d4dabbe2 2078 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2079 (i->size >> 9) >= sector + (size >> 9);
2080
2081 if (!equal)
2082 dev_alert(DEV, "Concurrent writes detected: "
2083 "local=%llus +%u, remote=%llus +%u, "
2084 "assuming %s came first\n",
2085 (unsigned long long)i->sector, i->size,
2086 (unsigned long long)sector, size,
d4dabbe2 2087 superseded ? "local" : "remote");
7be8da07
AG
2088
2089 inc_unacked(mdev);
d4dabbe2 2090 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07
AG
2091 e_send_retry_write;
2092 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2093 wake_asender(mdev->tconn);
2094
2095 err = -ENOENT;
2096 goto out;
2097 } else {
2098 struct drbd_request *req =
2099 container_of(i, struct drbd_request, i);
2100
2101 if (!equal)
2102 dev_alert(DEV, "Concurrent writes detected: "
2103 "local=%llus +%u, remote=%llus +%u\n",
2104 (unsigned long long)i->sector, i->size,
2105 (unsigned long long)sector, size);
2106
2107 if (req->rq_state & RQ_LOCAL_PENDING ||
2108 !(req->rq_state & RQ_POSTPONED)) {
2109 /*
2110 * Wait for the node with the discard flag to
d4dabbe2
LE
2111 * decide if this request has been superseded
2112 * or needs to be retried.
2113 * Requests that have been superseded will
7be8da07
AG
2114 * disappear from the write_requests tree.
2115 *
2116 * In addition, wait for the conflicting
2117 * request to finish locally before submitting
2118 * the conflicting peer request.
2119 */
2120 err = drbd_wait_misc(mdev, &req->i);
2121 if (err) {
2122 _conn_request_state(mdev->tconn,
2123 NS(conn, C_TIMEOUT),
2124 CS_HARD);
2125 fail_postponed_requests(mdev, sector, size);
2126 goto out;
2127 }
2128 goto repeat;
2129 }
2130 /*
2131 * Remember to restart the conflicting requests after
2132 * the new peer request has completed.
2133 */
2134 peer_req->flags |= EE_RESTART_REQUESTS;
2135 }
2136 }
2137 err = 0;
2138
2139 out:
2140 if (err)
2141 drbd_remove_epoch_entry_interval(mdev, peer_req);
2142 return err;
2143}
2144
b411b363 2145/* mirrored write */
4a76b161 2146static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2147{
4a76b161 2148 struct drbd_conf *mdev;
b411b363 2149 sector_t sector;
db830c46 2150 struct drbd_peer_request *peer_req;
e658983a 2151 struct p_data *p = pi->data;
7be8da07 2152 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
2153 int rw = WRITE;
2154 u32 dp_flags;
302bdeae 2155 int err, tp;
b411b363 2156
4a76b161
AG
2157 mdev = vnr_to_mdev(tconn, pi->vnr);
2158 if (!mdev)
2159 return -EIO;
b411b363 2160
7be8da07 2161 if (!get_ldev(mdev)) {
82bc0194
AG
2162 int err2;
2163
7be8da07 2164 err = wait_for_and_update_peer_seq(mdev, peer_seq);
e2857216 2165 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
12038a3a 2166 atomic_inc(&tconn->current_epoch->epoch_size);
e2857216 2167 err2 = drbd_drain_block(mdev, pi->size);
82bc0194
AG
2168 if (!err)
2169 err = err2;
2170 return err;
b411b363
PR
2171 }
2172
fcefa62e
AG
2173 /*
2174 * Corresponding put_ldev done either below (on various errors), or in
2175 * drbd_peer_request_endio, if we successfully submit the data at the
2176 * end of this function.
2177 */
b411b363
PR
2178
2179 sector = be64_to_cpu(p->sector);
e2857216 2180 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
db830c46 2181 if (!peer_req) {
b411b363 2182 put_ldev(mdev);
82bc0194 2183 return -EIO;
b411b363
PR
2184 }
2185
db830c46 2186 peer_req->w.cb = e_end_block;
b411b363 2187
688593c5
LE
2188 dp_flags = be32_to_cpu(p->dp_flags);
2189 rw |= wire_flags_to_bio(mdev, dp_flags);
81a3537a
LE
2190 if (peer_req->pages == NULL) {
2191 D_ASSERT(peer_req->i.size == 0);
a73ff323
LE
2192 D_ASSERT(dp_flags & DP_FLUSH);
2193 }
688593c5
LE
2194
2195 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2196 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2197
12038a3a
PR
2198 spin_lock(&tconn->epoch_lock);
2199 peer_req->epoch = tconn->current_epoch;
db830c46
AG
2200 atomic_inc(&peer_req->epoch->epoch_size);
2201 atomic_inc(&peer_req->epoch->active);
12038a3a 2202 spin_unlock(&tconn->epoch_lock);
b411b363 2203
302bdeae
PR
2204 rcu_read_lock();
2205 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2206 rcu_read_unlock();
2207 if (tp) {
2208 peer_req->flags |= EE_IN_INTERVAL_TREE;
7be8da07
AG
2209 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2210 if (err)
b411b363 2211 goto out_interrupted;
87eeee41 2212 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2213 err = handle_write_conflicts(mdev, peer_req);
2214 if (err) {
2215 spin_unlock_irq(&mdev->tconn->req_lock);
2216 if (err == -ENOENT) {
b411b363 2217 put_ldev(mdev);
82bc0194 2218 return 0;
b411b363 2219 }
7be8da07 2220 goto out_interrupted;
b411b363 2221 }
b874d231
PR
2222 } else {
2223 update_peer_seq(mdev, peer_seq);
7be8da07 2224 spin_lock_irq(&mdev->tconn->req_lock);
b874d231 2225 }
db830c46 2226 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2227 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2228
d93f6302 2229 if (mdev->state.conn == C_SYNC_TARGET)
3ea35df8 2230 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
b411b363 2231
303d1448 2232 if (mdev->tconn->agreed_pro_version < 100) {
44ed167d
PR
2233 rcu_read_lock();
2234 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
303d1448
PR
2235 case DRBD_PROT_C:
2236 dp_flags |= DP_SEND_WRITE_ACK;
2237 break;
2238 case DRBD_PROT_B:
2239 dp_flags |= DP_SEND_RECEIVE_ACK;
2240 break;
b411b363 2241 }
44ed167d 2242 rcu_read_unlock();
b411b363
PR
2243 }
2244
303d1448
PR
2245 if (dp_flags & DP_SEND_WRITE_ACK) {
2246 peer_req->flags |= EE_SEND_WRITE_ACK;
b411b363
PR
2247 inc_unacked(mdev);
2248 /* corresponding dec_unacked() in e_end_block()
2249 * respective _drbd_clear_done_ee */
303d1448
PR
2250 }
2251
2252 if (dp_flags & DP_SEND_RECEIVE_ACK) {
b411b363
PR
2253 /* I really don't like it that the receiver thread
2254 * sends on the msock, but anyways */
db830c46 2255 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2256 }
2257
6719fb03 2258 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2259 /* In case we have the only disk of the cluster, */
db830c46
AG
2260 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2261 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2262 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
56392d2f 2263 drbd_al_begin_io(mdev, &peer_req->i, true);
b411b363
PR
2264 }
2265
82bc0194
AG
2266 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2267 if (!err)
2268 return 0;
b411b363 2269
10f6d992
LE
2270 /* don't care for the reason here */
2271 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2272 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2273 list_del(&peer_req->w.list);
2274 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2275 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46 2276 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
181286ad 2277 drbd_al_complete_io(mdev, &peer_req->i);
22cc37a9 2278
b411b363 2279out_interrupted:
1e9dd291 2280 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2281 put_ldev(mdev);
3967deb1 2282 drbd_free_peer_req(mdev, peer_req);
82bc0194 2283 return err;
b411b363
PR
2284}
2285
0f0601f4
LE
2286/* We may throttle resync, if the lower device seems to be busy,
2287 * and current sync rate is above c_min_rate.
2288 *
2289 * To decide whether or not the lower device is busy, we use a scheme similar
2290 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2291 * (more than 64 sectors) of activity we cannot account for with our own resync
2292 * activity, it obviously is "busy".
2293 *
2294 * The current sync rate used here uses only the most recent two step marks,
2295 * to have a short time average so we can react faster.
2296 */
e3555d85 2297int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2298{
2299 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2300 unsigned long db, dt, dbdt;
e3555d85 2301 struct lc_element *tmp;
0f0601f4
LE
2302 int curr_events;
2303 int throttle = 0;
daeda1cc
PR
2304 unsigned int c_min_rate;
2305
2306 rcu_read_lock();
2307 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2308 rcu_read_unlock();
0f0601f4
LE
2309
2310 /* feature disabled? */
daeda1cc 2311 if (c_min_rate == 0)
0f0601f4
LE
2312 return 0;
2313
e3555d85
PR
2314 spin_lock_irq(&mdev->al_lock);
2315 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2316 if (tmp) {
2317 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2318 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2319 spin_unlock_irq(&mdev->al_lock);
2320 return 0;
2321 }
2322 /* Do not slow down if app IO is already waiting for this extent */
2323 }
2324 spin_unlock_irq(&mdev->al_lock);
2325
0f0601f4
LE
2326 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2327 (int)part_stat_read(&disk->part0, sectors[1]) -
2328 atomic_read(&mdev->rs_sect_ev);
e3555d85 2329
0f0601f4
LE
2330 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2331 unsigned long rs_left;
2332 int i;
2333
2334 mdev->rs_last_events = curr_events;
2335
2336 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2337 * approx. */
2649f080
LE
2338 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2339
2340 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2341 rs_left = mdev->ov_left;
2342 else
2343 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2344
2345 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2346 if (!dt)
2347 dt++;
2348 db = mdev->rs_mark_left[i] - rs_left;
2349 dbdt = Bit2KB(db/dt);
2350
daeda1cc 2351 if (dbdt > c_min_rate)
0f0601f4
LE
2352 throttle = 1;
2353 }
2354 return throttle;
2355}
2356
2357
4a76b161 2358static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2359{
4a76b161 2360 struct drbd_conf *mdev;
b411b363 2361 sector_t sector;
4a76b161 2362 sector_t capacity;
db830c46 2363 struct drbd_peer_request *peer_req;
b411b363 2364 struct digest_info *di = NULL;
b18b37be 2365 int size, verb;
b411b363 2366 unsigned int fault_type;
e658983a 2367 struct p_block_req *p = pi->data;
4a76b161
AG
2368
2369 mdev = vnr_to_mdev(tconn, pi->vnr);
2370 if (!mdev)
2371 return -EIO;
2372 capacity = drbd_get_capacity(mdev->this_bdev);
b411b363
PR
2373
2374 sector = be64_to_cpu(p->sector);
2375 size = be32_to_cpu(p->blksize);
2376
c670a398 2377 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2378 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2379 (unsigned long long)sector, size);
82bc0194 2380 return -EINVAL;
b411b363
PR
2381 }
2382 if (sector + (size>>9) > capacity) {
2383 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2384 (unsigned long long)sector, size);
82bc0194 2385 return -EINVAL;
b411b363
PR
2386 }
2387
2388 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be 2389 verb = 1;
e2857216 2390 switch (pi->cmd) {
b18b37be
PR
2391 case P_DATA_REQUEST:
2392 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2393 break;
2394 case P_RS_DATA_REQUEST:
2395 case P_CSUM_RS_REQUEST:
2396 case P_OV_REQUEST:
2397 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2398 break;
2399 case P_OV_REPLY:
2400 verb = 0;
2401 dec_rs_pending(mdev);
2402 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2403 break;
2404 default:
49ba9b1b 2405 BUG();
b18b37be
PR
2406 }
2407 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2408 dev_err(DEV, "Can not satisfy peer's read request, "
2409 "no local data.\n");
b18b37be 2410
a821cc4a 2411 /* drain possibly payload */
e2857216 2412 return drbd_drain_block(mdev, pi->size);
b411b363
PR
2413 }
2414
2415 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2416 * "criss-cross" setup, that might cause write-out on some other DRBD,
2417 * which in turn might block on the other node at this very place. */
0db55363 2418 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
db830c46 2419 if (!peer_req) {
b411b363 2420 put_ldev(mdev);
82bc0194 2421 return -ENOMEM;
b411b363
PR
2422 }
2423
e2857216 2424 switch (pi->cmd) {
b411b363 2425 case P_DATA_REQUEST:
db830c46 2426 peer_req->w.cb = w_e_end_data_req;
b411b363 2427 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2428 /* application IO, don't drbd_rs_begin_io */
2429 goto submit;
2430
b411b363 2431 case P_RS_DATA_REQUEST:
db830c46 2432 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2433 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2434 /* used in the sector offset progress display */
2435 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2436 break;
2437
2438 case P_OV_REPLY:
2439 case P_CSUM_RS_REQUEST:
2440 fault_type = DRBD_FAULT_RS_RD;
e2857216 2441 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2442 if (!di)
2443 goto out_free_e;
2444
e2857216 2445 di->digest_size = pi->size;
b411b363
PR
2446 di->digest = (((char *)di)+sizeof(struct digest_info));
2447
db830c46
AG
2448 peer_req->digest = di;
2449 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2450
e2857216 2451 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
b411b363
PR
2452 goto out_free_e;
2453
e2857216 2454 if (pi->cmd == P_CSUM_RS_REQUEST) {
31890f4a 2455 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2456 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2457 /* used in the sector offset progress display */
2458 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
e2857216 2459 } else if (pi->cmd == P_OV_REPLY) {
2649f080
LE
2460 /* track progress, we may need to throttle */
2461 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2462 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2463 dec_rs_pending(mdev);
0f0601f4
LE
2464 /* drbd_rs_begin_io done when we sent this request,
2465 * but accounting still needs to be done. */
2466 goto submit_for_resync;
b411b363
PR
2467 }
2468 break;
2469
2470 case P_OV_REQUEST:
b411b363 2471 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2472 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2473 unsigned long now = jiffies;
2474 int i;
b411b363
PR
2475 mdev->ov_start_sector = sector;
2476 mdev->ov_position = sector;
30b743a2
LE
2477 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2478 mdev->rs_total = mdev->ov_left;
de228bba
LE
2479 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2480 mdev->rs_mark_left[i] = mdev->ov_left;
2481 mdev->rs_mark_time[i] = now;
2482 }
b411b363
PR
2483 dev_info(DEV, "Online Verify start sector: %llu\n",
2484 (unsigned long long)sector);
2485 }
db830c46 2486 peer_req->w.cb = w_e_end_ov_req;
b411b363 2487 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2488 break;
2489
b411b363 2490 default:
49ba9b1b 2491 BUG();
b411b363
PR
2492 }
2493
0f0601f4
LE
2494 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2495 * wrt the receiver, but it is not as straightforward as it may seem.
2496 * Various places in the resync start and stop logic assume resync
2497 * requests are processed in order, requeuing this on the worker thread
2498 * introduces a bunch of new code for synchronization between threads.
2499 *
2500 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2501 * "forever", throttling after drbd_rs_begin_io will lock that extent
2502 * for application writes for the same time. For now, just throttle
2503 * here, where the rest of the code expects the receiver to sleep for
2504 * a while, anyways.
2505 */
2506
2507 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2508 * this defers syncer requests for some time, before letting at least
2509 * on request through. The resync controller on the receiving side
2510 * will adapt to the incoming rate accordingly.
2511 *
2512 * We cannot throttle here if remote is Primary/SyncTarget:
2513 * we would also throttle its application reads.
2514 * In that case, throttling is done on the SyncTarget only.
2515 */
e3555d85
PR
2516 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2517 schedule_timeout_uninterruptible(HZ/10);
2518 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2519 goto out_free_e;
b411b363 2520
0f0601f4
LE
2521submit_for_resync:
2522 atomic_add(size >> 9, &mdev->rs_sect_ev);
2523
80a40e43 2524submit:
b411b363 2525 inc_unacked(mdev);
87eeee41 2526 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2527 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2528 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2529
fbe29dec 2530 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2531 return 0;
b411b363 2532
10f6d992
LE
2533 /* don't care for the reason here */
2534 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2535 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2536 list_del(&peer_req->w.list);
87eeee41 2537 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2538 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2539
b411b363 2540out_free_e:
b411b363 2541 put_ldev(mdev);
3967deb1 2542 drbd_free_peer_req(mdev, peer_req);
82bc0194 2543 return -EIO;
b411b363
PR
2544}
2545
2546static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2547{
2548 int self, peer, rv = -100;
2549 unsigned long ch_self, ch_peer;
44ed167d 2550 enum drbd_after_sb_p after_sb_0p;
b411b363
PR
2551
2552 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2553 peer = mdev->p_uuid[UI_BITMAP] & 1;
2554
2555 ch_peer = mdev->p_uuid[UI_SIZE];
2556 ch_self = mdev->comm_bm_set;
2557
44ed167d
PR
2558 rcu_read_lock();
2559 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2560 rcu_read_unlock();
2561 switch (after_sb_0p) {
b411b363
PR
2562 case ASB_CONSENSUS:
2563 case ASB_DISCARD_SECONDARY:
2564 case ASB_CALL_HELPER:
44ed167d 2565 case ASB_VIOLENTLY:
b411b363
PR
2566 dev_err(DEV, "Configuration error.\n");
2567 break;
2568 case ASB_DISCONNECT:
2569 break;
2570 case ASB_DISCARD_YOUNGER_PRI:
2571 if (self == 0 && peer == 1) {
2572 rv = -1;
2573 break;
2574 }
2575 if (self == 1 && peer == 0) {
2576 rv = 1;
2577 break;
2578 }
2579 /* Else fall through to one of the other strategies... */
2580 case ASB_DISCARD_OLDER_PRI:
2581 if (self == 0 && peer == 1) {
2582 rv = 1;
2583 break;
2584 }
2585 if (self == 1 && peer == 0) {
2586 rv = -1;
2587 break;
2588 }
2589 /* Else fall through to one of the other strategies... */
ad19bf6e 2590 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2591 "Using discard-least-changes instead\n");
2592 case ASB_DISCARD_ZERO_CHG:
2593 if (ch_peer == 0 && ch_self == 0) {
427c0434 2594 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2595 ? -1 : 1;
2596 break;
2597 } else {
2598 if (ch_peer == 0) { rv = 1; break; }
2599 if (ch_self == 0) { rv = -1; break; }
2600 }
44ed167d 2601 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2602 break;
2603 case ASB_DISCARD_LEAST_CHG:
2604 if (ch_self < ch_peer)
2605 rv = -1;
2606 else if (ch_self > ch_peer)
2607 rv = 1;
2608 else /* ( ch_self == ch_peer ) */
2609 /* Well, then use something else. */
427c0434 2610 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2611 ? -1 : 1;
2612 break;
2613 case ASB_DISCARD_LOCAL:
2614 rv = -1;
2615 break;
2616 case ASB_DISCARD_REMOTE:
2617 rv = 1;
2618 }
2619
2620 return rv;
2621}
2622
2623static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2624{
6184ea21 2625 int hg, rv = -100;
44ed167d 2626 enum drbd_after_sb_p after_sb_1p;
b411b363 2627
44ed167d
PR
2628 rcu_read_lock();
2629 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2630 rcu_read_unlock();
2631 switch (after_sb_1p) {
b411b363
PR
2632 case ASB_DISCARD_YOUNGER_PRI:
2633 case ASB_DISCARD_OLDER_PRI:
2634 case ASB_DISCARD_LEAST_CHG:
2635 case ASB_DISCARD_LOCAL:
2636 case ASB_DISCARD_REMOTE:
44ed167d 2637 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2638 dev_err(DEV, "Configuration error.\n");
2639 break;
2640 case ASB_DISCONNECT:
2641 break;
2642 case ASB_CONSENSUS:
2643 hg = drbd_asb_recover_0p(mdev);
2644 if (hg == -1 && mdev->state.role == R_SECONDARY)
2645 rv = hg;
2646 if (hg == 1 && mdev->state.role == R_PRIMARY)
2647 rv = hg;
2648 break;
2649 case ASB_VIOLENTLY:
2650 rv = drbd_asb_recover_0p(mdev);
2651 break;
2652 case ASB_DISCARD_SECONDARY:
2653 return mdev->state.role == R_PRIMARY ? 1 : -1;
2654 case ASB_CALL_HELPER:
2655 hg = drbd_asb_recover_0p(mdev);
2656 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2657 enum drbd_state_rv rv2;
2658
b411b363
PR
2659 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2660 * we might be here in C_WF_REPORT_PARAMS which is transient.
2661 * we do not need to wait for the after state change work either. */
bb437946
AG
2662 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2663 if (rv2 != SS_SUCCESS) {
b411b363
PR
2664 drbd_khelper(mdev, "pri-lost-after-sb");
2665 } else {
2666 dev_warn(DEV, "Successfully gave up primary role.\n");
2667 rv = hg;
2668 }
2669 } else
2670 rv = hg;
2671 }
2672
2673 return rv;
2674}
2675
2676static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2677{
6184ea21 2678 int hg, rv = -100;
44ed167d 2679 enum drbd_after_sb_p after_sb_2p;
b411b363 2680
44ed167d
PR
2681 rcu_read_lock();
2682 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2683 rcu_read_unlock();
2684 switch (after_sb_2p) {
b411b363
PR
2685 case ASB_DISCARD_YOUNGER_PRI:
2686 case ASB_DISCARD_OLDER_PRI:
2687 case ASB_DISCARD_LEAST_CHG:
2688 case ASB_DISCARD_LOCAL:
2689 case ASB_DISCARD_REMOTE:
2690 case ASB_CONSENSUS:
2691 case ASB_DISCARD_SECONDARY:
44ed167d 2692 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2693 dev_err(DEV, "Configuration error.\n");
2694 break;
2695 case ASB_VIOLENTLY:
2696 rv = drbd_asb_recover_0p(mdev);
2697 break;
2698 case ASB_DISCONNECT:
2699 break;
2700 case ASB_CALL_HELPER:
2701 hg = drbd_asb_recover_0p(mdev);
2702 if (hg == -1) {
bb437946
AG
2703 enum drbd_state_rv rv2;
2704
b411b363
PR
2705 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2706 * we might be here in C_WF_REPORT_PARAMS which is transient.
2707 * we do not need to wait for the after state change work either. */
bb437946
AG
2708 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2709 if (rv2 != SS_SUCCESS) {
b411b363
PR
2710 drbd_khelper(mdev, "pri-lost-after-sb");
2711 } else {
2712 dev_warn(DEV, "Successfully gave up primary role.\n");
2713 rv = hg;
2714 }
2715 } else
2716 rv = hg;
2717 }
2718
2719 return rv;
2720}
2721
2722static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2723 u64 bits, u64 flags)
2724{
2725 if (!uuid) {
2726 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2727 return;
2728 }
2729 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2730 text,
2731 (unsigned long long)uuid[UI_CURRENT],
2732 (unsigned long long)uuid[UI_BITMAP],
2733 (unsigned long long)uuid[UI_HISTORY_START],
2734 (unsigned long long)uuid[UI_HISTORY_END],
2735 (unsigned long long)bits,
2736 (unsigned long long)flags);
2737}
2738
2739/*
2740 100 after split brain try auto recover
2741 2 C_SYNC_SOURCE set BitMap
2742 1 C_SYNC_SOURCE use BitMap
2743 0 no Sync
2744 -1 C_SYNC_TARGET use BitMap
2745 -2 C_SYNC_TARGET set BitMap
2746 -100 after split brain, disconnect
2747-1000 unrelated data
4a23f264
PR
2748-1091 requires proto 91
2749-1096 requires proto 96
b411b363
PR
2750 */
2751static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2752{
2753 u64 self, peer;
2754 int i, j;
2755
2756 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2757 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2758
2759 *rule_nr = 10;
2760 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2761 return 0;
2762
2763 *rule_nr = 20;
2764 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2765 peer != UUID_JUST_CREATED)
2766 return -2;
2767
2768 *rule_nr = 30;
2769 if (self != UUID_JUST_CREATED &&
2770 (peer == UUID_JUST_CREATED || peer == (u64)0))
2771 return 2;
2772
2773 if (self == peer) {
2774 int rct, dc; /* roles at crash time */
2775
2776 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2777
31890f4a 2778 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2779 return -1091;
b411b363
PR
2780
2781 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2782 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2783 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
9f2247bb
PR
2784 drbd_uuid_move_history(mdev);
2785 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2786 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363
PR
2787
2788 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2789 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2790 *rule_nr = 34;
2791 } else {
2792 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2793 *rule_nr = 36;
2794 }
2795
2796 return 1;
2797 }
2798
2799 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2800
31890f4a 2801 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2802 return -1091;
b411b363
PR
2803
2804 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2805 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2806 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2807
2808 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2809 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2810 mdev->p_uuid[UI_BITMAP] = 0UL;
2811
2812 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2813 *rule_nr = 35;
2814 } else {
2815 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2816 *rule_nr = 37;
2817 }
2818
2819 return -1;
2820 }
2821
2822 /* Common power [off|failure] */
2823 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2824 (mdev->p_uuid[UI_FLAGS] & 2);
2825 /* lowest bit is set when we were primary,
2826 * next bit (weight 2) is set when peer was primary */
2827 *rule_nr = 40;
2828
2829 switch (rct) {
2830 case 0: /* !self_pri && !peer_pri */ return 0;
2831 case 1: /* self_pri && !peer_pri */ return 1;
2832 case 2: /* !self_pri && peer_pri */ return -1;
2833 case 3: /* self_pri && peer_pri */
427c0434 2834 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2835 return dc ? -1 : 1;
2836 }
2837 }
2838
2839 *rule_nr = 50;
2840 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2841 if (self == peer)
2842 return -1;
2843
2844 *rule_nr = 51;
2845 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2846 if (self == peer) {
31890f4a 2847 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2848 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2849 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2850 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2851 /* The last P_SYNC_UUID did not get though. Undo the last start of
2852 resync as sync source modifications of the peer's UUIDs. */
2853
31890f4a 2854 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2855 return -1091;
b411b363
PR
2856
2857 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2858 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264 2859
92b4ca29 2860 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
4a23f264
PR
2861 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2862
b411b363
PR
2863 return -1;
2864 }
2865 }
2866
2867 *rule_nr = 60;
2868 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2869 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2870 peer = mdev->p_uuid[i] & ~((u64)1);
2871 if (self == peer)
2872 return -2;
2873 }
2874
2875 *rule_nr = 70;
2876 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2877 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2878 if (self == peer)
2879 return 1;
2880
2881 *rule_nr = 71;
2882 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2883 if (self == peer) {
31890f4a 2884 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2885 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2886 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2887 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2888 /* The last P_SYNC_UUID did not get though. Undo the last start of
2889 resync as sync source modifications of our UUIDs. */
2890
31890f4a 2891 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2892 return -1091;
b411b363 2893
9f2247bb
PR
2894 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2895 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 2896
4a23f264 2897 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2898 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2899 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2900
2901 return 1;
2902 }
2903 }
2904
2905
2906 *rule_nr = 80;
d8c2a36b 2907 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2908 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2909 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2910 if (self == peer)
2911 return 2;
2912 }
2913
2914 *rule_nr = 90;
2915 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2916 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2917 if (self == peer && self != ((u64)0))
2918 return 100;
2919
2920 *rule_nr = 100;
2921 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2922 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2923 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2924 peer = mdev->p_uuid[j] & ~((u64)1);
2925 if (self == peer)
2926 return -100;
2927 }
2928 }
2929
2930 return -1000;
2931}
2932
2933/* drbd_sync_handshake() returns the new conn state on success, or
2934 CONN_MASK (-1) on failure.
2935 */
2936static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2937 enum drbd_disk_state peer_disk) __must_hold(local)
2938{
b411b363
PR
2939 enum drbd_conns rv = C_MASK;
2940 enum drbd_disk_state mydisk;
44ed167d 2941 struct net_conf *nc;
6dff2902 2942 int hg, rule_nr, rr_conflict, tentative;
b411b363
PR
2943
2944 mydisk = mdev->state.disk;
2945 if (mydisk == D_NEGOTIATING)
2946 mydisk = mdev->new_state_tmp.disk;
2947
2948 dev_info(DEV, "drbd_sync_handshake:\n");
9f2247bb
PR
2949
2950 spin_lock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2951 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2952 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2953 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2954
2955 hg = drbd_uuid_compare(mdev, &rule_nr);
9f2247bb 2956 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2957
2958 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2959
2960 if (hg == -1000) {
2961 dev_alert(DEV, "Unrelated data, aborting!\n");
2962 return C_MASK;
2963 }
4a23f264
PR
2964 if (hg < -1000) {
2965 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2966 return C_MASK;
2967 }
2968
2969 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2970 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2971 int f = (hg == -100) || abs(hg) == 2;
2972 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2973 if (f)
2974 hg = hg*2;
2975 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2976 hg > 0 ? "source" : "target");
2977 }
2978
3a11a487
AG
2979 if (abs(hg) == 100)
2980 drbd_khelper(mdev, "initial-split-brain");
2981
44ed167d
PR
2982 rcu_read_lock();
2983 nc = rcu_dereference(mdev->tconn->net_conf);
2984
2985 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b411b363
PR
2986 int pcount = (mdev->state.role == R_PRIMARY)
2987 + (peer_role == R_PRIMARY);
2988 int forced = (hg == -100);
2989
2990 switch (pcount) {
2991 case 0:
2992 hg = drbd_asb_recover_0p(mdev);
2993 break;
2994 case 1:
2995 hg = drbd_asb_recover_1p(mdev);
2996 break;
2997 case 2:
2998 hg = drbd_asb_recover_2p(mdev);
2999 break;
3000 }
3001 if (abs(hg) < 100) {
3002 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3003 "automatically solved. Sync from %s node\n",
3004 pcount, (hg < 0) ? "peer" : "this");
3005 if (forced) {
3006 dev_warn(DEV, "Doing a full sync, since"
3007 " UUIDs where ambiguous.\n");
3008 hg = hg*2;
3009 }
3010 }
3011 }
3012
3013 if (hg == -100) {
08b165ba 3014 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 3015 hg = -1;
08b165ba 3016 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
3017 hg = 1;
3018
3019 if (abs(hg) < 100)
3020 dev_warn(DEV, "Split-Brain detected, manually solved. "
3021 "Sync from %s node\n",
3022 (hg < 0) ? "peer" : "this");
3023 }
44ed167d 3024 rr_conflict = nc->rr_conflict;
6dff2902 3025 tentative = nc->tentative;
44ed167d 3026 rcu_read_unlock();
b411b363
PR
3027
3028 if (hg == -100) {
580b9767
LE
3029 /* FIXME this log message is not correct if we end up here
3030 * after an attempted attach on a diskless node.
3031 * We just refuse to attach -- well, we drop the "connection"
3032 * to that disk, in a way... */
3a11a487 3033 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
3034 drbd_khelper(mdev, "split-brain");
3035 return C_MASK;
3036 }
3037
3038 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3039 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3040 return C_MASK;
3041 }
3042
3043 if (hg < 0 && /* by intention we do not use mydisk here. */
3044 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
44ed167d 3045 switch (rr_conflict) {
b411b363
PR
3046 case ASB_CALL_HELPER:
3047 drbd_khelper(mdev, "pri-lost");
3048 /* fall through */
3049 case ASB_DISCONNECT:
3050 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3051 return C_MASK;
3052 case ASB_VIOLENTLY:
3053 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3054 "assumption\n");
3055 }
3056 }
3057
6dff2902 3058 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
3059 if (hg == 0)
3060 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3061 else
3062 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3063 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3064 abs(hg) >= 2 ? "full" : "bit-map based");
3065 return C_MASK;
3066 }
3067
b411b363
PR
3068 if (abs(hg) >= 2) {
3069 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
3070 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3071 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3072 return C_MASK;
3073 }
3074
3075 if (hg > 0) { /* become sync source. */
3076 rv = C_WF_BITMAP_S;
3077 } else if (hg < 0) { /* become sync target */
3078 rv = C_WF_BITMAP_T;
3079 } else {
3080 rv = C_CONNECTED;
3081 if (drbd_bm_total_weight(mdev)) {
3082 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3083 drbd_bm_total_weight(mdev));
3084 }
3085 }
3086
3087 return rv;
3088}
3089
f179d76d 3090static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3091{
3092 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3093 if (peer == ASB_DISCARD_REMOTE)
3094 return ASB_DISCARD_LOCAL;
b411b363
PR
3095
3096 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3097 if (peer == ASB_DISCARD_LOCAL)
3098 return ASB_DISCARD_REMOTE;
b411b363
PR
3099
3100 /* everything else is valid if they are equal on both sides. */
f179d76d 3101 return peer;
b411b363
PR
3102}
3103
e2857216 3104static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3105{
e658983a 3106 struct p_protocol *p = pi->data;
036b17ea
PR
3107 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3108 int p_proto, p_discard_my_data, p_two_primaries, cf;
3109 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3110 char integrity_alg[SHARED_SECRET_MAX] = "";
accdbcc5 3111 struct crypto_hash *peer_integrity_tfm = NULL;
7aca6c75 3112 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3113
b411b363
PR
3114 p_proto = be32_to_cpu(p->protocol);
3115 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3116 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3117 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3118 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3119 cf = be32_to_cpu(p->conn_flags);
6139f60d 3120 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3121
86db0618
AG
3122 if (tconn->agreed_pro_version >= 87) {
3123 int err;
cf14c2e9 3124
88104ca4 3125 if (pi->size > sizeof(integrity_alg))
86db0618 3126 return -EIO;
88104ca4 3127 err = drbd_recv_all(tconn, integrity_alg, pi->size);
86db0618
AG
3128 if (err)
3129 return err;
036b17ea 3130 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3131 }
3132
7d4c782c 3133 if (pi->cmd != P_PROTOCOL_UPDATE) {
fbc12f45 3134 clear_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3135
fbc12f45
AG
3136 if (cf & CF_DRY_RUN)
3137 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3138
fbc12f45
AG
3139 rcu_read_lock();
3140 nc = rcu_dereference(tconn->net_conf);
b411b363 3141
fbc12f45 3142 if (p_proto != nc->wire_protocol) {
d505d9be 3143 conn_err(tconn, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3144 goto disconnect_rcu_unlock;
3145 }
b411b363 3146
fbc12f45 3147 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
d505d9be 3148 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3149 goto disconnect_rcu_unlock;
3150 }
b411b363 3151
fbc12f45 3152 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
d505d9be 3153 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3154 goto disconnect_rcu_unlock;
3155 }
b411b363 3156
fbc12f45 3157 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
d505d9be 3158 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3159 goto disconnect_rcu_unlock;
3160 }
b411b363 3161
fbc12f45 3162 if (p_discard_my_data && nc->discard_my_data) {
d505d9be 3163 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3164 goto disconnect_rcu_unlock;
3165 }
b411b363 3166
fbc12f45 3167 if (p_two_primaries != nc->two_primaries) {
d505d9be 3168 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3169 goto disconnect_rcu_unlock;
3170 }
b411b363 3171
fbc12f45 3172 if (strcmp(integrity_alg, nc->integrity_alg)) {
d505d9be 3173 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3174 goto disconnect_rcu_unlock;
3175 }
b411b363 3176
fbc12f45 3177 rcu_read_unlock();
b411b363
PR
3178 }
3179
7d4c782c
AG
3180 if (integrity_alg[0]) {
3181 int hash_size;
3182
3183 /*
3184 * We can only change the peer data integrity algorithm
3185 * here. Changing our own data integrity algorithm
3186 * requires that we send a P_PROTOCOL_UPDATE packet at
3187 * the same time; otherwise, the peer has no way to
3188 * tell between which packets the algorithm should
3189 * change.
3190 */
b411b363 3191
7d4c782c
AG
3192 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3193 if (!peer_integrity_tfm) {
3194 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3195 integrity_alg);
3196 goto disconnect;
3197 }
b411b363 3198
7d4c782c
AG
3199 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3200 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3201 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3202 if (!(int_dig_in && int_dig_vv)) {
3203 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3204 goto disconnect;
3205 }
b411b363
PR
3206 }
3207
7d4c782c
AG
3208 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3209 if (!new_net_conf) {
3210 conn_err(tconn, "Allocation of new net_conf failed\n");
3211 goto disconnect;
3212 }
3213
3214 mutex_lock(&tconn->data.mutex);
3215 mutex_lock(&tconn->conf_update);
3216 old_net_conf = tconn->net_conf;
3217 *new_net_conf = *old_net_conf;
3218
3219 new_net_conf->wire_protocol = p_proto;
3220 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3221 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3222 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3223 new_net_conf->two_primaries = p_two_primaries;
3224
3225 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3226 mutex_unlock(&tconn->conf_update);
3227 mutex_unlock(&tconn->data.mutex);
3228
3229 crypto_free_hash(tconn->peer_integrity_tfm);
3230 kfree(tconn->int_dig_in);
3231 kfree(tconn->int_dig_vv);
3232 tconn->peer_integrity_tfm = peer_integrity_tfm;
3233 tconn->int_dig_in = int_dig_in;
3234 tconn->int_dig_vv = int_dig_vv;
3235
3236 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3237 conn_info(tconn, "peer data-integrity-alg: %s\n",
3238 integrity_alg[0] ? integrity_alg : "(none)");
3239
3240 synchronize_rcu();
3241 kfree(old_net_conf);
82bc0194 3242 return 0;
b411b363 3243
44ed167d
PR
3244disconnect_rcu_unlock:
3245 rcu_read_unlock();
b411b363 3246disconnect:
b792c35c 3247 crypto_free_hash(peer_integrity_tfm);
036b17ea
PR
3248 kfree(int_dig_in);
3249 kfree(int_dig_vv);
7204624c 3250 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3251 return -EIO;
b411b363
PR
3252}
3253
3254/* helper function
3255 * input: alg name, feature name
3256 * return: NULL (alg name was "")
3257 * ERR_PTR(error) if something goes wrong
3258 * or the crypto hash ptr, if it worked out ok. */
3259struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3260 const char *alg, const char *name)
3261{
3262 struct crypto_hash *tfm;
3263
3264 if (!alg[0])
3265 return NULL;
3266
3267 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3268 if (IS_ERR(tfm)) {
3269 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3270 alg, name, PTR_ERR(tfm));
3271 return tfm;
3272 }
b411b363
PR
3273 return tfm;
3274}
3275
4a76b161
AG
3276static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3277{
3278 void *buffer = tconn->data.rbuf;
3279 int size = pi->size;
3280
3281 while (size) {
3282 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3283 s = drbd_recv(tconn, buffer, s);
3284 if (s <= 0) {
3285 if (s < 0)
3286 return s;
3287 break;
3288 }
3289 size -= s;
3290 }
3291 if (size)
3292 return -EIO;
3293 return 0;
3294}
3295
3296/*
3297 * config_unknown_volume - device configuration command for unknown volume
3298 *
3299 * When a device is added to an existing connection, the node on which the
3300 * device is added first will send configuration commands to its peer but the
3301 * peer will not know about the device yet. It will warn and ignore these
3302 * commands. Once the device is added on the second node, the second node will
3303 * send the same device configuration commands, but in the other direction.
3304 *
3305 * (We can also end up here if drbd is misconfigured.)
3306 */
3307static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3308{
2fcb8f30
AG
3309 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3310 cmdname(pi->cmd), pi->vnr);
4a76b161
AG
3311 return ignore_remaining_packet(tconn, pi);
3312}
3313
3314static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3315{
4a76b161 3316 struct drbd_conf *mdev;
e658983a 3317 struct p_rs_param_95 *p;
b411b363
PR
3318 unsigned int header_size, data_size, exp_max_sz;
3319 struct crypto_hash *verify_tfm = NULL;
3320 struct crypto_hash *csums_tfm = NULL;
2ec91e0e 3321 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3322 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
4a76b161 3323 const int apv = tconn->agreed_pro_version;
813472ce 3324 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3325 int fifo_size = 0;
82bc0194 3326 int err;
b411b363 3327
4a76b161
AG
3328 mdev = vnr_to_mdev(tconn, pi->vnr);
3329 if (!mdev)
3330 return config_unknown_volume(tconn, pi);
b411b363
PR
3331
3332 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3333 : apv == 88 ? sizeof(struct p_rs_param)
3334 + SHARED_SECRET_MAX
8e26f9cc
PR
3335 : apv <= 94 ? sizeof(struct p_rs_param_89)
3336 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3337
e2857216 3338 if (pi->size > exp_max_sz) {
b411b363 3339 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3340 pi->size, exp_max_sz);
82bc0194 3341 return -EIO;
b411b363
PR
3342 }
3343
3344 if (apv <= 88) {
e658983a 3345 header_size = sizeof(struct p_rs_param);
e2857216 3346 data_size = pi->size - header_size;
8e26f9cc 3347 } else if (apv <= 94) {
e658983a 3348 header_size = sizeof(struct p_rs_param_89);
e2857216 3349 data_size = pi->size - header_size;
b411b363 3350 D_ASSERT(data_size == 0);
8e26f9cc 3351 } else {
e658983a 3352 header_size = sizeof(struct p_rs_param_95);
e2857216 3353 data_size = pi->size - header_size;
b411b363
PR
3354 D_ASSERT(data_size == 0);
3355 }
3356
3357 /* initialize verify_alg and csums_alg */
e658983a 3358 p = pi->data;
b411b363
PR
3359 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3360
e658983a 3361 err = drbd_recv_all(mdev->tconn, p, header_size);
82bc0194
AG
3362 if (err)
3363 return err;
b411b363 3364
daeda1cc
PR
3365 mutex_lock(&mdev->tconn->conf_update);
3366 old_net_conf = mdev->tconn->net_conf;
813472ce
PR
3367 if (get_ldev(mdev)) {
3368 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3369 if (!new_disk_conf) {
3370 put_ldev(mdev);
3371 mutex_unlock(&mdev->tconn->conf_update);
3372 dev_err(DEV, "Allocation of new disk_conf failed\n");
3373 return -ENOMEM;
3374 }
daeda1cc 3375
813472ce
PR
3376 old_disk_conf = mdev->ldev->disk_conf;
3377 *new_disk_conf = *old_disk_conf;
b411b363 3378
6394b935 3379 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3380 }
b411b363
PR
3381
3382 if (apv >= 88) {
3383 if (apv == 88) {
5de73827
PR
3384 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3385 dev_err(DEV, "verify-alg of wrong size, "
3386 "peer wants %u, accepting only up to %u byte\n",
3387 data_size, SHARED_SECRET_MAX);
813472ce
PR
3388 err = -EIO;
3389 goto reconnect;
b411b363
PR
3390 }
3391
82bc0194 3392 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
813472ce
PR
3393 if (err)
3394 goto reconnect;
b411b363
PR
3395 /* we expect NUL terminated string */
3396 /* but just in case someone tries to be evil */
3397 D_ASSERT(p->verify_alg[data_size-1] == 0);
3398 p->verify_alg[data_size-1] = 0;
3399
3400 } else /* apv >= 89 */ {
3401 /* we still expect NUL terminated strings */
3402 /* but just in case someone tries to be evil */
3403 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3404 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3405 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3406 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3407 }
3408
2ec91e0e 3409 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3410 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3411 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3412 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3413 goto disconnect;
3414 }
3415 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3416 p->verify_alg, "verify-alg");
3417 if (IS_ERR(verify_tfm)) {
3418 verify_tfm = NULL;
3419 goto disconnect;
3420 }
3421 }
3422
2ec91e0e 3423 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3424 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3425 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3426 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3427 goto disconnect;
3428 }
3429 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3430 p->csums_alg, "csums-alg");
3431 if (IS_ERR(csums_tfm)) {
3432 csums_tfm = NULL;
3433 goto disconnect;
3434 }
3435 }
3436
813472ce 3437 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3438 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3439 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3440 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3441 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3442
daeda1cc 3443 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 3444 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
3445 new_plan = fifo_alloc(fifo_size);
3446 if (!new_plan) {
778f271d 3447 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3448 put_ldev(mdev);
778f271d
PR
3449 goto disconnect;
3450 }
3451 }
8e26f9cc 3452 }
b411b363 3453
91fd4dad 3454 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3455 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3456 if (!new_net_conf) {
91fd4dad
PR
3457 dev_err(DEV, "Allocation of new net_conf failed\n");
3458 goto disconnect;
3459 }
3460
2ec91e0e 3461 *new_net_conf = *old_net_conf;
91fd4dad
PR
3462
3463 if (verify_tfm) {
2ec91e0e
PR
3464 strcpy(new_net_conf->verify_alg, p->verify_alg);
3465 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
91fd4dad
PR
3466 crypto_free_hash(mdev->tconn->verify_tfm);
3467 mdev->tconn->verify_tfm = verify_tfm;
3468 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3469 }
3470 if (csums_tfm) {
2ec91e0e
PR
3471 strcpy(new_net_conf->csums_alg, p->csums_alg);
3472 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
91fd4dad
PR
3473 crypto_free_hash(mdev->tconn->csums_tfm);
3474 mdev->tconn->csums_tfm = csums_tfm;
3475 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3476 }
2ec91e0e 3477 rcu_assign_pointer(tconn->net_conf, new_net_conf);
778f271d 3478 }
b411b363
PR
3479 }
3480
813472ce
PR
3481 if (new_disk_conf) {
3482 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3483 put_ldev(mdev);
3484 }
3485
3486 if (new_plan) {
3487 old_plan = mdev->rs_plan_s;
3488 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
b411b363 3489 }
daeda1cc
PR
3490
3491 mutex_unlock(&mdev->tconn->conf_update);
3492 synchronize_rcu();
3493 if (new_net_conf)
3494 kfree(old_net_conf);
3495 kfree(old_disk_conf);
813472ce 3496 kfree(old_plan);
daeda1cc 3497
82bc0194 3498 return 0;
b411b363 3499
813472ce
PR
3500reconnect:
3501 if (new_disk_conf) {
3502 put_ldev(mdev);
3503 kfree(new_disk_conf);
3504 }
3505 mutex_unlock(&mdev->tconn->conf_update);
3506 return -EIO;
3507
b411b363 3508disconnect:
813472ce
PR
3509 kfree(new_plan);
3510 if (new_disk_conf) {
3511 put_ldev(mdev);
3512 kfree(new_disk_conf);
3513 }
a0095508 3514 mutex_unlock(&mdev->tconn->conf_update);
b411b363
PR
3515 /* just for completeness: actually not needed,
3516 * as this is not reached if csums_tfm was ok. */
3517 crypto_free_hash(csums_tfm);
3518 /* but free the verify_tfm again, if csums_tfm did not work out */
3519 crypto_free_hash(verify_tfm);
38fa9988 3520 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3521 return -EIO;
b411b363
PR
3522}
3523
b411b363
PR
3524/* warn if the arguments differ by more than 12.5% */
3525static void warn_if_differ_considerably(struct drbd_conf *mdev,
3526 const char *s, sector_t a, sector_t b)
3527{
3528 sector_t d;
3529 if (a == 0 || b == 0)
3530 return;
3531 d = (a > b) ? (a - b) : (b - a);
3532 if (d > (a>>3) || d > (b>>3))
3533 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3534 (unsigned long long)a, (unsigned long long)b);
3535}
3536
4a76b161 3537static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3538{
4a76b161 3539 struct drbd_conf *mdev;
e658983a 3540 struct p_sizes *p = pi->data;
e96c9633 3541 enum determine_dev_size dd = DS_UNCHANGED;
b411b363
PR
3542 sector_t p_size, p_usize, my_usize;
3543 int ldsc = 0; /* local disk size changed */
e89b591c 3544 enum dds_flags ddsf;
b411b363 3545
4a76b161
AG
3546 mdev = vnr_to_mdev(tconn, pi->vnr);
3547 if (!mdev)
3548 return config_unknown_volume(tconn, pi);
3549
b411b363
PR
3550 p_size = be64_to_cpu(p->d_size);
3551 p_usize = be64_to_cpu(p->u_size);
3552
b411b363
PR
3553 /* just store the peer's disk size for now.
3554 * we still need to figure out whether we accept that. */
3555 mdev->p_size = p_size;
3556
b411b363 3557 if (get_ldev(mdev)) {
daeda1cc
PR
3558 rcu_read_lock();
3559 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3560 rcu_read_unlock();
3561
b411b363
PR
3562 warn_if_differ_considerably(mdev, "lower level device sizes",
3563 p_size, drbd_get_max_capacity(mdev->ldev));
3564 warn_if_differ_considerably(mdev, "user requested size",
daeda1cc 3565 p_usize, my_usize);
b411b363
PR
3566
3567 /* if this is the first connect, or an otherwise expected
3568 * param exchange, choose the minimum */
3569 if (mdev->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 3570 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
3571
3572 /* Never shrink a device with usable data during connect.
3573 But allow online shrinking if we are connected. */
ef5e44a6 3574 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
daeda1cc
PR
3575 drbd_get_capacity(mdev->this_bdev) &&
3576 mdev->state.disk >= D_OUTDATED &&
3577 mdev->state.conn < C_CONNECTED) {
b411b363 3578 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3579 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 3580 put_ldev(mdev);
82bc0194 3581 return -EIO;
b411b363 3582 }
daeda1cc
PR
3583
3584 if (my_usize != p_usize) {
3585 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3586
3587 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3588 if (!new_disk_conf) {
3589 dev_err(DEV, "Allocation of new disk_conf failed\n");
3590 put_ldev(mdev);
3591 return -ENOMEM;
3592 }
3593
3594 mutex_lock(&mdev->tconn->conf_update);
3595 old_disk_conf = mdev->ldev->disk_conf;
3596 *new_disk_conf = *old_disk_conf;
3597 new_disk_conf->disk_size = p_usize;
3598
3599 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3600 mutex_unlock(&mdev->tconn->conf_update);
3601 synchronize_rcu();
3602 kfree(old_disk_conf);
3603
3604 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3605 (unsigned long)my_usize);
b411b363 3606 }
daeda1cc 3607
b411b363
PR
3608 put_ldev(mdev);
3609 }
b411b363 3610
e89b591c 3611 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3612 if (get_ldev(mdev)) {
d752b269 3613 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
b411b363 3614 put_ldev(mdev);
e96c9633 3615 if (dd == DS_ERROR)
82bc0194 3616 return -EIO;
b411b363
PR
3617 drbd_md_sync(mdev);
3618 } else {
3619 /* I am diskless, need to accept the peer's size. */
3620 drbd_set_my_capacity(mdev, p_size);
3621 }
3622
99432fcc
PR
3623 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3624 drbd_reconsider_max_bio_size(mdev);
3625
b411b363
PR
3626 if (get_ldev(mdev)) {
3627 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3628 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3629 ldsc = 1;
3630 }
3631
b411b363
PR
3632 put_ldev(mdev);
3633 }
3634
3635 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3636 if (be64_to_cpu(p->c_size) !=
3637 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3638 /* we have different sizes, probably peer
3639 * needs to know my new size... */
e89b591c 3640 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3641 }
3642 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
e96c9633 3643 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
b411b363 3644 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3645 mdev->state.disk >= D_INCONSISTENT) {
3646 if (ddsf & DDSF_NO_RESYNC)
3647 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3648 else
3649 resync_after_online_grow(mdev);
3650 } else
b411b363
PR
3651 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3652 }
3653 }
3654
82bc0194 3655 return 0;
b411b363
PR
3656}
3657
4a76b161 3658static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3659{
4a76b161 3660 struct drbd_conf *mdev;
e658983a 3661 struct p_uuids *p = pi->data;
b411b363 3662 u64 *p_uuid;
62b0da3a 3663 int i, updated_uuids = 0;
b411b363 3664
4a76b161
AG
3665 mdev = vnr_to_mdev(tconn, pi->vnr);
3666 if (!mdev)
3667 return config_unknown_volume(tconn, pi);
3668
b411b363 3669 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
063eacf8
JW
3670 if (!p_uuid) {
3671 dev_err(DEV, "kmalloc of p_uuid failed\n");
3672 return false;
3673 }
b411b363
PR
3674
3675 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3676 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3677
3678 kfree(mdev->p_uuid);
3679 mdev->p_uuid = p_uuid;
3680
3681 if (mdev->state.conn < C_CONNECTED &&
3682 mdev->state.disk < D_INCONSISTENT &&
3683 mdev->state.role == R_PRIMARY &&
3684 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3685 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3686 (unsigned long long)mdev->ed_uuid);
38fa9988 3687 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3688 return -EIO;
b411b363
PR
3689 }
3690
3691 if (get_ldev(mdev)) {
3692 int skip_initial_sync =
3693 mdev->state.conn == C_CONNECTED &&
31890f4a 3694 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3695 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3696 (p_uuid[UI_FLAGS] & 8);
3697 if (skip_initial_sync) {
3698 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3699 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3700 "clear_n_write from receive_uuids",
3701 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3702 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3703 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3704 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3705 CS_VERBOSE, NULL);
3706 drbd_md_sync(mdev);
62b0da3a 3707 updated_uuids = 1;
b411b363
PR
3708 }
3709 put_ldev(mdev);
18a50fa2
PR
3710 } else if (mdev->state.disk < D_INCONSISTENT &&
3711 mdev->state.role == R_PRIMARY) {
3712 /* I am a diskless primary, the peer just created a new current UUID
3713 for me. */
62b0da3a 3714 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3715 }
3716
3717 /* Before we test for the disk state, we should wait until an eventually
3718 ongoing cluster wide state change is finished. That is important if
3719 we are primary and are detaching from our disk. We need to see the
3720 new disk state... */
8410da8f
PR
3721 mutex_lock(mdev->state_mutex);
3722 mutex_unlock(mdev->state_mutex);
b411b363 3723 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3724 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3725
3726 if (updated_uuids)
3727 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3728
82bc0194 3729 return 0;
b411b363
PR
3730}
3731
3732/**
3733 * convert_state() - Converts the peer's view of the cluster state to our point of view
3734 * @ps: The state as seen by the peer.
3735 */
3736static union drbd_state convert_state(union drbd_state ps)
3737{
3738 union drbd_state ms;
3739
3740 static enum drbd_conns c_tab[] = {
369bea63 3741 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
3742 [C_CONNECTED] = C_CONNECTED,
3743
3744 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3745 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3746 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3747 [C_VERIFY_S] = C_VERIFY_T,
3748 [C_MASK] = C_MASK,
3749 };
3750
3751 ms.i = ps.i;
3752
3753 ms.conn = c_tab[ps.conn];
3754 ms.peer = ps.role;
3755 ms.role = ps.peer;
3756 ms.pdsk = ps.disk;
3757 ms.disk = ps.pdsk;
3758 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3759
3760 return ms;
3761}
3762
4a76b161 3763static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3764{
4a76b161 3765 struct drbd_conf *mdev;
e658983a 3766 struct p_req_state *p = pi->data;
b411b363 3767 union drbd_state mask, val;
bf885f8a 3768 enum drbd_state_rv rv;
b411b363 3769
4a76b161
AG
3770 mdev = vnr_to_mdev(tconn, pi->vnr);
3771 if (!mdev)
3772 return -EIO;
3773
b411b363
PR
3774 mask.i = be32_to_cpu(p->mask);
3775 val.i = be32_to_cpu(p->val);
3776
427c0434 3777 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
8410da8f 3778 mutex_is_locked(mdev->state_mutex)) {
b411b363 3779 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3780 return 0;
b411b363
PR
3781 }
3782
3783 mask = convert_state(mask);
3784 val = convert_state(val);
3785
dfafcc8a
PR
3786 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3787 drbd_send_sr_reply(mdev, rv);
b411b363 3788
b411b363
PR
3789 drbd_md_sync(mdev);
3790
82bc0194 3791 return 0;
b411b363
PR
3792}
3793
e2857216 3794static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3795{
e658983a 3796 struct p_req_state *p = pi->data;
b411b363 3797 union drbd_state mask, val;
bf885f8a 3798 enum drbd_state_rv rv;
b411b363 3799
b411b363
PR
3800 mask.i = be32_to_cpu(p->mask);
3801 val.i = be32_to_cpu(p->val);
3802
427c0434 3803 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
dfafcc8a
PR
3804 mutex_is_locked(&tconn->cstate_mutex)) {
3805 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3806 return 0;
b411b363
PR
3807 }
3808
3809 mask = convert_state(mask);
3810 val = convert_state(val);
3811
778bcf2e 3812 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
dfafcc8a 3813 conn_send_sr_reply(tconn, rv);
b411b363 3814
82bc0194 3815 return 0;
b411b363
PR
3816}
3817
4a76b161 3818static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3819{
4a76b161 3820 struct drbd_conf *mdev;
e658983a 3821 struct p_state *p = pi->data;
4ac4aada 3822 union drbd_state os, ns, peer_state;
b411b363 3823 enum drbd_disk_state real_peer_disk;
65d922c3 3824 enum chg_state_flags cs_flags;
b411b363
PR
3825 int rv;
3826
4a76b161
AG
3827 mdev = vnr_to_mdev(tconn, pi->vnr);
3828 if (!mdev)
3829 return config_unknown_volume(tconn, pi);
3830
b411b363
PR
3831 peer_state.i = be32_to_cpu(p->state);
3832
3833 real_peer_disk = peer_state.disk;
3834 if (peer_state.disk == D_NEGOTIATING) {
3835 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3836 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3837 }
3838
87eeee41 3839 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3840 retry:
78bae59b 3841 os = ns = drbd_read_state(mdev);
87eeee41 3842 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3843
545752d5
LE
3844 /* If some other part of the code (asender thread, timeout)
3845 * already decided to close the connection again,
3846 * we must not "re-establish" it here. */
3847 if (os.conn <= C_TEAR_DOWN)
58ffa580 3848 return -ECONNRESET;
545752d5 3849
40424e4a
LE
3850 /* If this is the "end of sync" confirmation, usually the peer disk
3851 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3852 * set) resync started in PausedSyncT, or if the timing of pause-/
3853 * unpause-sync events has been "just right", the peer disk may
3854 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3855 */
3856 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3857 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
3858 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3859 /* If we are (becoming) SyncSource, but peer is still in sync
3860 * preparation, ignore its uptodate-ness to avoid flapping, it
3861 * will change to inconsistent once the peer reaches active
3862 * syncing states.
3863 * It may have changed syncer-paused flags, however, so we
3864 * cannot ignore this completely. */
3865 if (peer_state.conn > C_CONNECTED &&
3866 peer_state.conn < C_SYNC_SOURCE)
3867 real_peer_disk = D_INCONSISTENT;
3868
3869 /* if peer_state changes to connected at the same time,
3870 * it explicitly notifies us that it finished resync.
3871 * Maybe we should finish it up, too? */
3872 else if (os.conn >= C_SYNC_SOURCE &&
3873 peer_state.conn == C_CONNECTED) {
3874 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3875 drbd_resync_finished(mdev);
82bc0194 3876 return 0;
e9ef7bb6
LE
3877 }
3878 }
3879
02b91b55
LE
3880 /* explicit verify finished notification, stop sector reached. */
3881 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3882 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
58ffa580 3883 ov_out_of_sync_print(mdev);
02b91b55 3884 drbd_resync_finished(mdev);
58ffa580 3885 return 0;
02b91b55
LE
3886 }
3887
e9ef7bb6
LE
3888 /* peer says his disk is inconsistent, while we think it is uptodate,
3889 * and this happens while the peer still thinks we have a sync going on,
3890 * but we think we are already done with the sync.
3891 * We ignore this to avoid flapping pdsk.
3892 * This should not happen, if the peer is a recent version of drbd. */
3893 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3894 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3895 real_peer_disk = D_UP_TO_DATE;
3896
4ac4aada
LE
3897 if (ns.conn == C_WF_REPORT_PARAMS)
3898 ns.conn = C_CONNECTED;
b411b363 3899
67531718
PR
3900 if (peer_state.conn == C_AHEAD)
3901 ns.conn = C_BEHIND;
3902
b411b363
PR
3903 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3904 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3905 int cr; /* consider resync */
3906
3907 /* if we established a new connection */
4ac4aada 3908 cr = (os.conn < C_CONNECTED);
b411b363
PR
3909 /* if we had an established connection
3910 * and one of the nodes newly attaches a disk */
4ac4aada 3911 cr |= (os.conn == C_CONNECTED &&
b411b363 3912 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3913 os.disk == D_NEGOTIATING));
b411b363
PR
3914 /* if we have both been inconsistent, and the peer has been
3915 * forced to be UpToDate with --overwrite-data */
3916 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3917 /* if we had been plain connected, and the admin requested to
3918 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3919 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3920 (peer_state.conn >= C_STARTING_SYNC_S &&
3921 peer_state.conn <= C_WF_BITMAP_T));
3922
3923 if (cr)
4ac4aada 3924 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3925
3926 put_ldev(mdev);
4ac4aada
LE
3927 if (ns.conn == C_MASK) {
3928 ns.conn = C_CONNECTED;
b411b363 3929 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3930 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3931 } else if (peer_state.disk == D_NEGOTIATING) {
3932 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3933 peer_state.disk = D_DISKLESS;
580b9767 3934 real_peer_disk = D_DISKLESS;
b411b363 3935 } else {
8169e41b 3936 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3937 return -EIO;
4ac4aada 3938 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3939 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3940 return -EIO;
b411b363
PR
3941 }
3942 }
3943 }
3944
87eeee41 3945 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b 3946 if (os.i != drbd_read_state(mdev).i)
b411b363
PR
3947 goto retry;
3948 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3949 ns.peer = peer_state.role;
3950 ns.pdsk = real_peer_disk;
3951 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3952 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3953 ns.disk = mdev->new_state_tmp.disk;
4ac4aada 3954 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
2aebfabb 3955 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3956 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3957 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3958 for temporal network outages! */
87eeee41 3959 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3960 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3961 tl_clear(mdev->tconn);
481c6f50
PR
3962 drbd_uuid_new_current(mdev);
3963 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3964 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3965 return -EIO;
481c6f50 3966 }
65d922c3 3967 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
78bae59b 3968 ns = drbd_read_state(mdev);
87eeee41 3969 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3970
3971 if (rv < SS_SUCCESS) {
38fa9988 3972 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3973 return -EIO;
b411b363
PR
3974 }
3975
4ac4aada
LE
3976 if (os.conn > C_WF_REPORT_PARAMS) {
3977 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3978 peer_state.disk != D_NEGOTIATING ) {
3979 /* we want resync, peer has not yet decided to sync... */
3980 /* Nowadays only used when forcing a node into primary role and
3981 setting its disk to UpToDate with that */
3982 drbd_send_uuids(mdev);
f479ea06 3983 drbd_send_current_state(mdev);
b411b363
PR
3984 }
3985 }
3986
08b165ba 3987 clear_bit(DISCARD_MY_DATA, &mdev->flags);
b411b363 3988
cccac985 3989 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
b411b363 3990
82bc0194 3991 return 0;
b411b363
PR
3992}
3993
4a76b161 3994static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3995{
4a76b161 3996 struct drbd_conf *mdev;
e658983a 3997 struct p_rs_uuid *p = pi->data;
4a76b161
AG
3998
3999 mdev = vnr_to_mdev(tconn, pi->vnr);
4000 if (!mdev)
4001 return -EIO;
b411b363
PR
4002
4003 wait_event(mdev->misc_wait,
4004 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 4005 mdev->state.conn == C_BEHIND ||
b411b363
PR
4006 mdev->state.conn < C_CONNECTED ||
4007 mdev->state.disk < D_NEGOTIATING);
4008
4009 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4010
b411b363
PR
4011 /* Here the _drbd_uuid_ functions are right, current should
4012 _not_ be rotated into the history */
4013 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4014 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4015 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4016
62b0da3a 4017 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
4018 drbd_start_resync(mdev, C_SYNC_TARGET);
4019
4020 put_ldev(mdev);
4021 } else
4022 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4023
82bc0194 4024 return 0;
b411b363
PR
4025}
4026
2c46407d
AG
4027/**
4028 * receive_bitmap_plain
4029 *
4030 * Return 0 when done, 1 when another iteration is needed, and a negative error
4031 * code upon failure.
4032 */
4033static int
50d0b1ad 4034receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
e658983a 4035 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4036{
50d0b1ad
AG
4037 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4038 drbd_header_size(mdev->tconn);
e658983a 4039 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4040 c->bm_words - c->word_offset);
e658983a 4041 unsigned int want = num_words * sizeof(*p);
2c46407d 4042 int err;
b411b363 4043
50d0b1ad
AG
4044 if (want != size) {
4045 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4046 return -EIO;
b411b363
PR
4047 }
4048 if (want == 0)
2c46407d 4049 return 0;
e658983a 4050 err = drbd_recv_all(mdev->tconn, p, want);
82bc0194 4051 if (err)
2c46407d 4052 return err;
b411b363 4053
e658983a 4054 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
b411b363
PR
4055
4056 c->word_offset += num_words;
4057 c->bit_offset = c->word_offset * BITS_PER_LONG;
4058 if (c->bit_offset > c->bm_bits)
4059 c->bit_offset = c->bm_bits;
4060
2c46407d 4061 return 1;
b411b363
PR
4062}
4063
a02d1240
AG
4064static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4065{
4066 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4067}
4068
4069static int dcbp_get_start(struct p_compressed_bm *p)
4070{
4071 return (p->encoding & 0x80) != 0;
4072}
4073
4074static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4075{
4076 return (p->encoding >> 4) & 0x7;
4077}
4078
2c46407d
AG
4079/**
4080 * recv_bm_rle_bits
4081 *
4082 * Return 0 when done, 1 when another iteration is needed, and a negative error
4083 * code upon failure.
4084 */
4085static int
b411b363
PR
4086recv_bm_rle_bits(struct drbd_conf *mdev,
4087 struct p_compressed_bm *p,
c6d25cfe
PR
4088 struct bm_xfer_ctx *c,
4089 unsigned int len)
b411b363
PR
4090{
4091 struct bitstream bs;
4092 u64 look_ahead;
4093 u64 rl;
4094 u64 tmp;
4095 unsigned long s = c->bit_offset;
4096 unsigned long e;
a02d1240 4097 int toggle = dcbp_get_start(p);
b411b363
PR
4098 int have;
4099 int bits;
4100
a02d1240 4101 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4102
4103 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4104 if (bits < 0)
2c46407d 4105 return -EIO;
b411b363
PR
4106
4107 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4108 bits = vli_decode_bits(&rl, look_ahead);
4109 if (bits <= 0)
2c46407d 4110 return -EIO;
b411b363
PR
4111
4112 if (toggle) {
4113 e = s + rl -1;
4114 if (e >= c->bm_bits) {
4115 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4116 return -EIO;
b411b363
PR
4117 }
4118 _drbd_bm_set_bits(mdev, s, e);
4119 }
4120
4121 if (have < bits) {
4122 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4123 have, bits, look_ahead,
4124 (unsigned int)(bs.cur.b - p->code),
4125 (unsigned int)bs.buf_len);
2c46407d 4126 return -EIO;
b411b363 4127 }
d2da5b0c
LE
4128 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4129 if (likely(bits < 64))
4130 look_ahead >>= bits;
4131 else
4132 look_ahead = 0;
b411b363
PR
4133 have -= bits;
4134
4135 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4136 if (bits < 0)
2c46407d 4137 return -EIO;
b411b363
PR
4138 look_ahead |= tmp << have;
4139 have += bits;
4140 }
4141
4142 c->bit_offset = s;
4143 bm_xfer_ctx_bit_to_word_offset(c);
4144
2c46407d 4145 return (s != c->bm_bits);
b411b363
PR
4146}
4147
2c46407d
AG
4148/**
4149 * decode_bitmap_c
4150 *
4151 * Return 0 when done, 1 when another iteration is needed, and a negative error
4152 * code upon failure.
4153 */
4154static int
b411b363
PR
4155decode_bitmap_c(struct drbd_conf *mdev,
4156 struct p_compressed_bm *p,
c6d25cfe
PR
4157 struct bm_xfer_ctx *c,
4158 unsigned int len)
b411b363 4159{
a02d1240 4160 if (dcbp_get_code(p) == RLE_VLI_Bits)
e658983a 4161 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
b411b363
PR
4162
4163 /* other variants had been implemented for evaluation,
4164 * but have been dropped as this one turned out to be "best"
4165 * during all our tests. */
4166
4167 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 4168 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4169 return -EIO;
b411b363
PR
4170}
4171
4172void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4173 const char *direction, struct bm_xfer_ctx *c)
4174{
4175 /* what would it take to transfer it "plaintext" */
50d0b1ad
AG
4176 unsigned int header_size = drbd_header_size(mdev->tconn);
4177 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4178 unsigned int plain =
4179 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4180 c->bm_words * sizeof(unsigned long);
4181 unsigned int total = c->bytes[0] + c->bytes[1];
4182 unsigned int r;
b411b363
PR
4183
4184 /* total can not be zero. but just in case: */
4185 if (total == 0)
4186 return;
4187
4188 /* don't report if not compressed */
4189 if (total >= plain)
4190 return;
4191
4192 /* total < plain. check for overflow, still */
4193 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4194 : (1000 * total / plain);
4195
4196 if (r > 1000)
4197 r = 1000;
4198
4199 r = 1000 - r;
4200 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4201 "total %u; compression: %u.%u%%\n",
4202 direction,
4203 c->bytes[1], c->packets[1],
4204 c->bytes[0], c->packets[0],
4205 total, r/10, r % 10);
4206}
4207
4208/* Since we are processing the bitfield from lower addresses to higher,
4209 it does not matter if the process it in 32 bit chunks or 64 bit
4210 chunks as long as it is little endian. (Understand it as byte stream,
4211 beginning with the lowest byte...) If we would use big endian
4212 we would need to process it from the highest address to the lowest,
4213 in order to be agnostic to the 32 vs 64 bits issue.
4214
4215 returns 0 on failure, 1 if we successfully received it. */
4a76b161 4216static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4217{
4a76b161 4218 struct drbd_conf *mdev;
b411b363 4219 struct bm_xfer_ctx c;
2c46407d 4220 int err;
4a76b161
AG
4221
4222 mdev = vnr_to_mdev(tconn, pi->vnr);
4223 if (!mdev)
4224 return -EIO;
b411b363 4225
20ceb2b2
LE
4226 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4227 /* you are supposed to send additional out-of-sync information
4228 * if you actually set bits during this phase */
b411b363 4229
b411b363
PR
4230 c = (struct bm_xfer_ctx) {
4231 .bm_bits = drbd_bm_bits(mdev),
4232 .bm_words = drbd_bm_words(mdev),
4233 };
4234
2c46407d 4235 for(;;) {
e658983a
AG
4236 if (pi->cmd == P_BITMAP)
4237 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4238 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4239 /* MAYBE: sanity check that we speak proto >= 90,
4240 * and the feature is enabled! */
e658983a 4241 struct p_compressed_bm *p = pi->data;
b411b363 4242
50d0b1ad 4243 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
b411b363 4244 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 4245 err = -EIO;
b411b363
PR
4246 goto out;
4247 }
e658983a 4248 if (pi->size <= sizeof(*p)) {
e2857216 4249 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4250 err = -EIO;
78fcbdae 4251 goto out;
b411b363 4252 }
e658983a
AG
4253 err = drbd_recv_all(mdev->tconn, p, pi->size);
4254 if (err)
4255 goto out;
e2857216 4256 err = decode_bitmap_c(mdev, p, &c, pi->size);
b411b363 4257 } else {
e2857216 4258 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4259 err = -EIO;
b411b363
PR
4260 goto out;
4261 }
4262
e2857216 4263 c.packets[pi->cmd == P_BITMAP]++;
50d0b1ad 4264 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
b411b363 4265
2c46407d
AG
4266 if (err <= 0) {
4267 if (err < 0)
4268 goto out;
b411b363 4269 break;
2c46407d 4270 }
e2857216 4271 err = drbd_recv_header(mdev->tconn, pi);
82bc0194 4272 if (err)
b411b363 4273 goto out;
2c46407d 4274 }
b411b363
PR
4275
4276 INFO_bm_xfer_stats(mdev, "receive", &c);
4277
4278 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4279 enum drbd_state_rv rv;
4280
82bc0194
AG
4281 err = drbd_send_bitmap(mdev);
4282 if (err)
b411b363
PR
4283 goto out;
4284 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
4285 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4286 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
4287 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4288 /* admin may have requested C_DISCONNECTING,
4289 * other threads may have noticed network errors */
4290 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4291 drbd_conn_str(mdev->state.conn));
4292 }
82bc0194 4293 err = 0;
b411b363 4294
b411b363 4295 out:
20ceb2b2 4296 drbd_bm_unlock(mdev);
82bc0194 4297 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363 4298 drbd_start_resync(mdev, C_SYNC_SOURCE);
82bc0194 4299 return err;
b411b363
PR
4300}
4301
4a76b161 4302static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4303{
4a76b161 4304 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4305 pi->cmd, pi->size);
b411b363 4306
4a76b161 4307 return ignore_remaining_packet(tconn, pi);
b411b363
PR
4308}
4309
4a76b161 4310static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 4311{
e7f52dfb
LE
4312 /* Make sure we've acked all the TCP data associated
4313 * with the data requests being unplugged */
4a76b161 4314 drbd_tcp_quickack(tconn->data.socket);
0ced55a3 4315
82bc0194 4316 return 0;
0ced55a3
PR
4317}
4318
4a76b161 4319static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
73a01a18 4320{
4a76b161 4321 struct drbd_conf *mdev;
e658983a 4322 struct p_block_desc *p = pi->data;
4a76b161
AG
4323
4324 mdev = vnr_to_mdev(tconn, pi->vnr);
4325 if (!mdev)
4326 return -EIO;
73a01a18 4327
f735e363
LE
4328 switch (mdev->state.conn) {
4329 case C_WF_SYNC_UUID:
4330 case C_WF_BITMAP_T:
4331 case C_BEHIND:
4332 break;
4333 default:
4334 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4335 drbd_conn_str(mdev->state.conn));
4336 }
4337
73a01a18
PR
4338 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4339
82bc0194 4340 return 0;
73a01a18
PR
4341}
4342
02918be2
PR
4343struct data_cmd {
4344 int expect_payload;
4345 size_t pkt_size;
4a76b161 4346 int (*fn)(struct drbd_tconn *, struct packet_info *);
02918be2
PR
4347};
4348
4349static struct data_cmd drbd_cmd_handler[] = {
4350 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4351 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4352 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4353 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4354 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4355 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4356 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4357 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4358 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4359 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4360 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4361 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4362 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4363 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4364 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4365 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4366 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4367 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4368 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4369 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4371 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4372 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4373 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
b411b363
PR
4374};
4375
eefc2f7d 4376static void drbdd(struct drbd_tconn *tconn)
b411b363 4377{
77351055 4378 struct packet_info pi;
02918be2 4379 size_t shs; /* sub header size */
82bc0194 4380 int err;
b411b363 4381
eefc2f7d 4382 while (get_t_state(&tconn->receiver) == RUNNING) {
deebe195 4383 struct data_cmd *cmd;
b411b363 4384
eefc2f7d 4385 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 4386 if (drbd_recv_header(tconn, &pi))
02918be2 4387 goto err_out;
b411b363 4388
deebe195 4389 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4390 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
2fcb8f30
AG
4391 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4392 cmdname(pi.cmd), pi.cmd);
02918be2 4393 goto err_out;
0b33a916 4394 }
b411b363 4395
e658983a
AG
4396 shs = cmd->pkt_size;
4397 if (pi.size > shs && !cmd->expect_payload) {
2fcb8f30
AG
4398 conn_err(tconn, "No payload expected %s l:%d\n",
4399 cmdname(pi.cmd), pi.size);
02918be2 4400 goto err_out;
b411b363 4401 }
b411b363 4402
c13f7e1a 4403 if (shs) {
e658983a 4404 err = drbd_recv_all_warn(tconn, pi.data, shs);
a5c31904 4405 if (err)
c13f7e1a 4406 goto err_out;
e2857216 4407 pi.size -= shs;
c13f7e1a
LE
4408 }
4409
4a76b161
AG
4410 err = cmd->fn(tconn, &pi);
4411 if (err) {
9f5bdc33
AG
4412 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4413 cmdname(pi.cmd), err, pi.size);
02918be2 4414 goto err_out;
b411b363
PR
4415 }
4416 }
82bc0194 4417 return;
b411b363 4418
82bc0194
AG
4419 err_out:
4420 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4421}
4422
0e29d163 4423void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
4424{
4425 struct drbd_wq_barrier barr;
4426
4427 barr.w.cb = w_prev_work_done;
0e29d163 4428 barr.w.tconn = tconn;
b411b363 4429 init_completion(&barr.done);
d5b27b01 4430 drbd_queue_work(&tconn->sender_work, &barr.w);
b411b363
PR
4431 wait_for_completion(&barr.done);
4432}
4433
81fa2e67 4434static void conn_disconnect(struct drbd_tconn *tconn)
b411b363 4435{
c141ebda 4436 struct drbd_conf *mdev;
bbeb641c 4437 enum drbd_conns oc;
376694a0 4438 int vnr;
b411b363 4439
bbeb641c 4440 if (tconn->cstate == C_STANDALONE)
b411b363 4441 return;
b411b363 4442
545752d5
LE
4443 /* We are about to start the cleanup after connection loss.
4444 * Make sure drbd_make_request knows about that.
4445 * Usually we should be in some network failure state already,
4446 * but just in case we are not, we fix it up here.
4447 */
b8853dbd 4448 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4449
b411b363 4450 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4451 drbd_thread_stop(&tconn->asender);
4452 drbd_free_sock(tconn);
4453
c141ebda
PR
4454 rcu_read_lock();
4455 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4456 kref_get(&mdev->kref);
4457 rcu_read_unlock();
4458 drbd_disconnected(mdev);
4459 kref_put(&mdev->kref, &drbd_minor_destroy);
4460 rcu_read_lock();
4461 }
4462 rcu_read_unlock();
4463
12038a3a
PR
4464 if (!list_empty(&tconn->current_epoch->list))
4465 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4466 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4467 atomic_set(&tconn->current_epoch->epoch_size, 0);
b6dd1a89 4468 tconn->send.seen_any_write_yet = false;
12038a3a 4469
360cc740
PR
4470 conn_info(tconn, "Connection closed\n");
4471
cb703454
PR
4472 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4473 conn_try_outdate_peer_async(tconn);
4474
360cc740 4475 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4476 oc = tconn->cstate;
4477 if (oc >= C_UNCONNECTED)
376694a0 4478 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 4479
360cc740
PR
4480 spin_unlock_irq(&tconn->req_lock);
4481
f3dfa40a 4482 if (oc == C_DISCONNECTING)
d9cc6e23 4483 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
4484}
4485
c141ebda 4486static int drbd_disconnected(struct drbd_conf *mdev)
360cc740 4487{
360cc740 4488 unsigned int i;
b411b363 4489
85719573 4490 /* wait for current activity to cease. */
87eeee41 4491 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4492 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4493 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4494 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4495 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4496
4497 /* We do not have data structures that would allow us to
4498 * get the rs_pending_cnt down to 0 again.
4499 * * On C_SYNC_TARGET we do not have any data structures describing
4500 * the pending RSDataRequest's we have sent.
4501 * * On C_SYNC_SOURCE there is no data structure that tracks
4502 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4503 * And no, it is not the sum of the reference counts in the
4504 * resync_LRU. The resync_LRU tracks the whole operation including
4505 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4506 * on the fly. */
4507 drbd_rs_cancel_all(mdev);
4508 mdev->rs_total = 0;
4509 mdev->rs_failed = 0;
4510 atomic_set(&mdev->rs_pending_cnt, 0);
4511 wake_up(&mdev->misc_wait);
4512
b411b363 4513 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4514 resync_timer_fn((unsigned long)mdev);
4515
b411b363
PR
4516 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4517 * w_make_resync_request etc. which may still be on the worker queue
4518 * to be "canceled" */
4519 drbd_flush_workqueue(mdev);
4520
a990be46 4521 drbd_finish_peer_reqs(mdev);
b411b363 4522
d10b4ea3
PR
4523 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4524 might have issued a work again. The one before drbd_finish_peer_reqs() is
4525 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4526 drbd_flush_workqueue(mdev);
4527
08332d73
LE
4528 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4529 * again via drbd_try_clear_on_disk_bm(). */
4530 drbd_rs_cancel_all(mdev);
b411b363
PR
4531
4532 kfree(mdev->p_uuid);
4533 mdev->p_uuid = NULL;
4534
2aebfabb 4535 if (!drbd_suspended(mdev))
2f5cdd0b 4536 tl_clear(mdev->tconn);
b411b363
PR
4537
4538 drbd_md_sync(mdev);
4539
20ceb2b2
LE
4540 /* serialize with bitmap writeout triggered by the state change,
4541 * if any. */
4542 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4543
b411b363
PR
4544 /* tcp_close and release of sendpage pages can be deferred. I don't
4545 * want to use SO_LINGER, because apparently it can be deferred for
4546 * more than 20 seconds (longest time I checked).
4547 *
4548 * Actually we don't care for exactly when the network stack does its
4549 * put_page(), but release our reference on these pages right here.
4550 */
7721f567 4551 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
4552 if (i)
4553 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4554 i = atomic_read(&mdev->pp_in_use_by_net);
4555 if (i)
4556 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4557 i = atomic_read(&mdev->pp_in_use);
4558 if (i)
45bb912b 4559 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4560
4561 D_ASSERT(list_empty(&mdev->read_ee));
4562 D_ASSERT(list_empty(&mdev->active_ee));
4563 D_ASSERT(list_empty(&mdev->sync_ee));
4564 D_ASSERT(list_empty(&mdev->done_ee));
4565
360cc740 4566 return 0;
b411b363
PR
4567}
4568
4569/*
4570 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4571 * we can agree on is stored in agreed_pro_version.
4572 *
4573 * feature flags and the reserved array should be enough room for future
4574 * enhancements of the handshake protocol, and possible plugins...
4575 *
4576 * for now, they are expected to be zero, but ignored.
4577 */
6038178e 4578static int drbd_send_features(struct drbd_tconn *tconn)
b411b363 4579{
9f5bdc33
AG
4580 struct drbd_socket *sock;
4581 struct p_connection_features *p;
b411b363 4582
9f5bdc33
AG
4583 sock = &tconn->data;
4584 p = conn_prepare_command(tconn, sock);
4585 if (!p)
e8d17b01 4586 return -EIO;
b411b363
PR
4587 memset(p, 0, sizeof(*p));
4588 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4589 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
9f5bdc33 4590 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
4591}
4592
4593/*
4594 * return values:
4595 * 1 yes, we have a valid connection
4596 * 0 oops, did not work out, please try again
4597 * -1 peer talks different language,
4598 * no point in trying again, please go standalone.
4599 */
6038178e 4600static int drbd_do_features(struct drbd_tconn *tconn)
b411b363 4601{
65d11ed6 4602 /* ASSERT current == tconn->receiver ... */
e658983a
AG
4603 struct p_connection_features *p;
4604 const int expect = sizeof(struct p_connection_features);
77351055 4605 struct packet_info pi;
a5c31904 4606 int err;
b411b363 4607
6038178e 4608 err = drbd_send_features(tconn);
e8d17b01 4609 if (err)
b411b363
PR
4610 return 0;
4611
69bc7bc3
AG
4612 err = drbd_recv_header(tconn, &pi);
4613 if (err)
b411b363
PR
4614 return 0;
4615
6038178e
AG
4616 if (pi.cmd != P_CONNECTION_FEATURES) {
4617 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 4618 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4619 return -1;
4620 }
4621
77351055 4622 if (pi.size != expect) {
6038178e 4623 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 4624 expect, pi.size);
b411b363
PR
4625 return -1;
4626 }
4627
e658983a
AG
4628 p = pi.data;
4629 err = drbd_recv_all_warn(tconn, p, expect);
a5c31904 4630 if (err)
b411b363 4631 return 0;
b411b363 4632
b411b363
PR
4633 p->protocol_min = be32_to_cpu(p->protocol_min);
4634 p->protocol_max = be32_to_cpu(p->protocol_max);
4635 if (p->protocol_max == 0)
4636 p->protocol_max = p->protocol_min;
4637
4638 if (PRO_VERSION_MAX < p->protocol_min ||
4639 PRO_VERSION_MIN > p->protocol_max)
4640 goto incompat;
4641
65d11ed6 4642 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4643
65d11ed6
PR
4644 conn_info(tconn, "Handshake successful: "
4645 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4646
4647 return 1;
4648
4649 incompat:
65d11ed6 4650 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4651 "I support %d-%d, peer supports %d-%d\n",
4652 PRO_VERSION_MIN, PRO_VERSION_MAX,
4653 p->protocol_min, p->protocol_max);
4654 return -1;
4655}
4656
4657#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4658static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4659{
ef57f9e6
PR
4660 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4661 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4662 return -1;
b411b363
PR
4663}
4664#else
4665#define CHALLENGE_LEN 64
b10d96cb
JT
4666
4667/* Return value:
4668 1 - auth succeeded,
4669 0 - failed, try again (network error),
4670 -1 - auth failed, don't try again.
4671*/
4672
13e6037d 4673static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4674{
9f5bdc33 4675 struct drbd_socket *sock;
b411b363
PR
4676 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4677 struct scatterlist sg;
4678 char *response = NULL;
4679 char *right_response = NULL;
4680 char *peers_ch = NULL;
44ed167d
PR
4681 unsigned int key_len;
4682 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363
PR
4683 unsigned int resp_size;
4684 struct hash_desc desc;
77351055 4685 struct packet_info pi;
44ed167d 4686 struct net_conf *nc;
69bc7bc3 4687 int err, rv;
b411b363 4688
9f5bdc33 4689 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 4690
44ed167d
PR
4691 rcu_read_lock();
4692 nc = rcu_dereference(tconn->net_conf);
4693 key_len = strlen(nc->shared_secret);
4694 memcpy(secret, nc->shared_secret, key_len);
4695 rcu_read_unlock();
4696
13e6037d 4697 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4698 desc.flags = 0;
4699
44ed167d 4700 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 4701 if (rv) {
13e6037d 4702 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4703 rv = -1;
b411b363
PR
4704 goto fail;
4705 }
4706
4707 get_random_bytes(my_challenge, CHALLENGE_LEN);
4708
9f5bdc33
AG
4709 sock = &tconn->data;
4710 if (!conn_prepare_command(tconn, sock)) {
4711 rv = 0;
4712 goto fail;
4713 }
e658983a 4714 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 4715 my_challenge, CHALLENGE_LEN);
b411b363
PR
4716 if (!rv)
4717 goto fail;
4718
69bc7bc3
AG
4719 err = drbd_recv_header(tconn, &pi);
4720 if (err) {
4721 rv = 0;
b411b363 4722 goto fail;
69bc7bc3 4723 }
b411b363 4724
77351055 4725 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4726 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 4727 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4728 rv = 0;
4729 goto fail;
4730 }
4731
77351055 4732 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4733 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4734 rv = -1;
b411b363
PR
4735 goto fail;
4736 }
4737
77351055 4738 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4739 if (peers_ch == NULL) {
13e6037d 4740 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4741 rv = -1;
b411b363
PR
4742 goto fail;
4743 }
4744
a5c31904
AG
4745 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4746 if (err) {
b411b363
PR
4747 rv = 0;
4748 goto fail;
4749 }
4750
13e6037d 4751 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4752 response = kmalloc(resp_size, GFP_NOIO);
4753 if (response == NULL) {
13e6037d 4754 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4755 rv = -1;
b411b363
PR
4756 goto fail;
4757 }
4758
4759 sg_init_table(&sg, 1);
77351055 4760 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4761
4762 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4763 if (rv) {
13e6037d 4764 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4765 rv = -1;
b411b363
PR
4766 goto fail;
4767 }
4768
9f5bdc33
AG
4769 if (!conn_prepare_command(tconn, sock)) {
4770 rv = 0;
b411b363 4771 goto fail;
9f5bdc33 4772 }
e658983a 4773 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 4774 response, resp_size);
b411b363
PR
4775 if (!rv)
4776 goto fail;
4777
69bc7bc3
AG
4778 err = drbd_recv_header(tconn, &pi);
4779 if (err) {
b411b363
PR
4780 rv = 0;
4781 goto fail;
4782 }
4783
77351055 4784 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4785 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 4786 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4787 rv = 0;
4788 goto fail;
4789 }
4790
77351055 4791 if (pi.size != resp_size) {
13e6037d 4792 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4793 rv = 0;
4794 goto fail;
4795 }
b411b363 4796
a5c31904
AG
4797 err = drbd_recv_all_warn(tconn, response , resp_size);
4798 if (err) {
b411b363
PR
4799 rv = 0;
4800 goto fail;
4801 }
4802
4803 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4804 if (right_response == NULL) {
13e6037d 4805 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4806 rv = -1;
b411b363
PR
4807 goto fail;
4808 }
4809
4810 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4811
4812 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4813 if (rv) {
13e6037d 4814 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4815 rv = -1;
b411b363
PR
4816 goto fail;
4817 }
4818
4819 rv = !memcmp(response, right_response, resp_size);
4820
4821 if (rv)
44ed167d
PR
4822 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4823 resp_size);
b10d96cb
JT
4824 else
4825 rv = -1;
b411b363
PR
4826
4827 fail:
4828 kfree(peers_ch);
4829 kfree(response);
4830 kfree(right_response);
4831
4832 return rv;
4833}
4834#endif
4835
4836int drbdd_init(struct drbd_thread *thi)
4837{
392c8801 4838 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4839 int h;
4840
4d641dd7 4841 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4842
4843 do {
81fa2e67 4844 h = conn_connect(tconn);
b411b363 4845 if (h == 0) {
81fa2e67 4846 conn_disconnect(tconn);
20ee6390 4847 schedule_timeout_interruptible(HZ);
b411b363
PR
4848 }
4849 if (h == -1) {
4d641dd7 4850 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4851 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4852 }
4853 } while (h == 0);
4854
91fd4dad
PR
4855 if (h > 0)
4856 drbdd(tconn);
b411b363 4857
81fa2e67 4858 conn_disconnect(tconn);
b411b363 4859
4d641dd7 4860 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4861 return 0;
4862}
4863
4864/* ********* acknowledge sender ******** */
4865
e05e1e59 4866static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4867{
e658983a 4868 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
4869 int retcode = be32_to_cpu(p->retcode);
4870
4871 if (retcode >= SS_SUCCESS) {
4872 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4873 } else {
4874 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4875 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4876 drbd_set_st_err_str(retcode), retcode);
4877 }
4878 wake_up(&tconn->ping_wait);
4879
2735a594 4880 return 0;
e4f78ede 4881}
b411b363 4882
1952e916 4883static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4884{
1952e916 4885 struct drbd_conf *mdev;
e658983a 4886 struct p_req_state_reply *p = pi->data;
b411b363
PR
4887 int retcode = be32_to_cpu(p->retcode);
4888
1952e916
AG
4889 mdev = vnr_to_mdev(tconn, pi->vnr);
4890 if (!mdev)
2735a594 4891 return -EIO;
1952e916 4892
4d0fc3fd
PR
4893 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4894 D_ASSERT(tconn->agreed_pro_version < 100);
4895 return got_conn_RqSReply(tconn, pi);
4896 }
4897
b411b363 4898 if (retcode >= SS_SUCCESS) {
e4f78ede 4899 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
b411b363 4900 } else {
e4f78ede 4901 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
b411b363 4902 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 4903 drbd_set_st_err_str(retcode), retcode);
b411b363
PR
4904 }
4905 wake_up(&mdev->state_wait);
4906
2735a594 4907 return 0;
b411b363
PR
4908}
4909
e05e1e59 4910static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4911{
2735a594 4912 return drbd_send_ping_ack(tconn);
b411b363
PR
4913
4914}
4915
e05e1e59 4916static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363
PR
4917{
4918 /* restore idle timeout */
2a67d8b9
PR
4919 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4920 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4921 wake_up(&tconn->ping_wait);
b411b363 4922
2735a594 4923 return 0;
b411b363
PR
4924}
4925
1952e916 4926static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4927{
1952e916 4928 struct drbd_conf *mdev;
e658983a 4929 struct p_block_ack *p = pi->data;
b411b363
PR
4930 sector_t sector = be64_to_cpu(p->sector);
4931 int blksize = be32_to_cpu(p->blksize);
4932
1952e916
AG
4933 mdev = vnr_to_mdev(tconn, pi->vnr);
4934 if (!mdev)
2735a594 4935 return -EIO;
1952e916 4936
31890f4a 4937 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4938
4939 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4940
1d53f09e
LE
4941 if (get_ldev(mdev)) {
4942 drbd_rs_complete_io(mdev, sector);
4943 drbd_set_in_sync(mdev, sector, blksize);
4944 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4945 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4946 put_ldev(mdev);
4947 }
b411b363 4948 dec_rs_pending(mdev);
778f271d 4949 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4950
2735a594 4951 return 0;
b411b363
PR
4952}
4953
bc9c5c41
AG
4954static int
4955validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4956 struct rb_root *root, const char *func,
4957 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4958{
4959 struct drbd_request *req;
4960 struct bio_and_error m;
4961
87eeee41 4962 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4963 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4964 if (unlikely(!req)) {
87eeee41 4965 spin_unlock_irq(&mdev->tconn->req_lock);
85997675 4966 return -EIO;
b411b363
PR
4967 }
4968 __req_mod(req, what, &m);
87eeee41 4969 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4970
4971 if (m.bio)
4972 complete_master_bio(mdev, &m);
85997675 4973 return 0;
b411b363
PR
4974}
4975
1952e916 4976static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4977{
1952e916 4978 struct drbd_conf *mdev;
e658983a 4979 struct p_block_ack *p = pi->data;
b411b363
PR
4980 sector_t sector = be64_to_cpu(p->sector);
4981 int blksize = be32_to_cpu(p->blksize);
4982 enum drbd_req_event what;
4983
1952e916
AG
4984 mdev = vnr_to_mdev(tconn, pi->vnr);
4985 if (!mdev)
2735a594 4986 return -EIO;
1952e916 4987
b411b363
PR
4988 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4989
579b57ed 4990 if (p->block_id == ID_SYNCER) {
b411b363
PR
4991 drbd_set_in_sync(mdev, sector, blksize);
4992 dec_rs_pending(mdev);
2735a594 4993 return 0;
b411b363 4994 }
e05e1e59 4995 switch (pi->cmd) {
b411b363 4996 case P_RS_WRITE_ACK:
8554df1c 4997 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
4998 break;
4999 case P_WRITE_ACK:
8554df1c 5000 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5001 break;
5002 case P_RECV_ACK:
8554df1c 5003 what = RECV_ACKED_BY_PEER;
b411b363 5004 break;
d4dabbe2
LE
5005 case P_SUPERSEDED:
5006 what = CONFLICT_RESOLVED;
b411b363 5007 break;
7be8da07 5008 case P_RETRY_WRITE:
7be8da07 5009 what = POSTPONE_WRITE;
b411b363
PR
5010 break;
5011 default:
2735a594 5012 BUG();
b411b363
PR
5013 }
5014
5015 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5016 &mdev->write_requests, __func__,
5017 what, false);
b411b363
PR
5018}
5019
1952e916 5020static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5021{
1952e916 5022 struct drbd_conf *mdev;
e658983a 5023 struct p_block_ack *p = pi->data;
b411b363 5024 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5025 int size = be32_to_cpu(p->blksize);
85997675 5026 int err;
b411b363 5027
1952e916
AG
5028 mdev = vnr_to_mdev(tconn, pi->vnr);
5029 if (!mdev)
2735a594 5030 return -EIO;
b411b363
PR
5031
5032 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5033
579b57ed 5034 if (p->block_id == ID_SYNCER) {
b411b363
PR
5035 dec_rs_pending(mdev);
5036 drbd_rs_failed_io(mdev, sector, size);
2735a594 5037 return 0;
b411b363 5038 }
2deb8336 5039
85997675
AG
5040 err = validate_req_change_req_state(mdev, p->block_id, sector,
5041 &mdev->write_requests, __func__,
303d1448 5042 NEG_ACKED, true);
85997675 5043 if (err) {
c3afd8f5
AG
5044 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5045 The master bio might already be completed, therefore the
5046 request is no longer in the collision hash. */
5047 /* In Protocol B we might already have got a P_RECV_ACK
5048 but then get a P_NEG_ACK afterwards. */
c3afd8f5 5049 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 5050 }
2735a594 5051 return 0;
b411b363
PR
5052}
5053
1952e916 5054static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5055{
1952e916 5056 struct drbd_conf *mdev;
e658983a 5057 struct p_block_ack *p = pi->data;
b411b363
PR
5058 sector_t sector = be64_to_cpu(p->sector);
5059
1952e916
AG
5060 mdev = vnr_to_mdev(tconn, pi->vnr);
5061 if (!mdev)
2735a594 5062 return -EIO;
1952e916 5063
b411b363 5064 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 5065
380207d0 5066 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5067 (unsigned long long)sector, be32_to_cpu(p->blksize));
5068
5069 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5070 &mdev->read_requests, __func__,
5071 NEG_ACKED, false);
b411b363
PR
5072}
5073
1952e916 5074static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5075{
1952e916 5076 struct drbd_conf *mdev;
b411b363
PR
5077 sector_t sector;
5078 int size;
e658983a 5079 struct p_block_ack *p = pi->data;
1952e916
AG
5080
5081 mdev = vnr_to_mdev(tconn, pi->vnr);
5082 if (!mdev)
2735a594 5083 return -EIO;
b411b363
PR
5084
5085 sector = be64_to_cpu(p->sector);
5086 size = be32_to_cpu(p->blksize);
b411b363
PR
5087
5088 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5089
5090 dec_rs_pending(mdev);
5091
5092 if (get_ldev_if_state(mdev, D_FAILED)) {
5093 drbd_rs_complete_io(mdev, sector);
e05e1e59 5094 switch (pi->cmd) {
d612d309
PR
5095 case P_NEG_RS_DREPLY:
5096 drbd_rs_failed_io(mdev, sector, size);
5097 case P_RS_CANCEL:
5098 break;
5099 default:
2735a594 5100 BUG();
d612d309 5101 }
b411b363
PR
5102 put_ldev(mdev);
5103 }
5104
2735a594 5105 return 0;
b411b363
PR
5106}
5107
1952e916 5108static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5109{
e658983a 5110 struct p_barrier_ack *p = pi->data;
9ed57dcb
LE
5111 struct drbd_conf *mdev;
5112 int vnr;
1952e916 5113
9ed57dcb 5114 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 5115
9ed57dcb
LE
5116 rcu_read_lock();
5117 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5118 if (mdev->state.conn == C_AHEAD &&
5119 atomic_read(&mdev->ap_in_flight) == 0 &&
5120 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5121 mdev->start_resync_timer.expires = jiffies + HZ;
5122 add_timer(&mdev->start_resync_timer);
5123 }
c4752ef1 5124 }
9ed57dcb 5125 rcu_read_unlock();
c4752ef1 5126
2735a594 5127 return 0;
b411b363
PR
5128}
5129
1952e916 5130static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5131{
1952e916 5132 struct drbd_conf *mdev;
e658983a 5133 struct p_block_ack *p = pi->data;
b411b363
PR
5134 struct drbd_work *w;
5135 sector_t sector;
5136 int size;
5137
1952e916
AG
5138 mdev = vnr_to_mdev(tconn, pi->vnr);
5139 if (!mdev)
2735a594 5140 return -EIO;
1952e916 5141
b411b363
PR
5142 sector = be64_to_cpu(p->sector);
5143 size = be32_to_cpu(p->blksize);
5144
5145 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5146
5147 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 5148 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 5149 else
8f7bed77 5150 ov_out_of_sync_print(mdev);
b411b363 5151
1d53f09e 5152 if (!get_ldev(mdev))
2735a594 5153 return 0;
1d53f09e 5154
b411b363
PR
5155 drbd_rs_complete_io(mdev, sector);
5156 dec_rs_pending(mdev);
5157
ea5442af
LE
5158 --mdev->ov_left;
5159
5160 /* let's advance progress step marks only for every other megabyte */
5161 if ((mdev->ov_left & 0x200) == 0x200)
5162 drbd_advance_rs_marks(mdev, mdev->ov_left);
5163
5164 if (mdev->ov_left == 0) {
b411b363
PR
5165 w = kmalloc(sizeof(*w), GFP_NOIO);
5166 if (w) {
5167 w->cb = w_ov_finished;
a21e9298 5168 w->mdev = mdev;
d5b27b01 5169 drbd_queue_work(&mdev->tconn->sender_work, w);
b411b363
PR
5170 } else {
5171 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 5172 ov_out_of_sync_print(mdev);
b411b363
PR
5173 drbd_resync_finished(mdev);
5174 }
5175 }
1d53f09e 5176 put_ldev(mdev);
2735a594 5177 return 0;
b411b363
PR
5178}
5179
1952e916 5180static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 5181{
2735a594 5182 return 0;
b411b363
PR
5183}
5184
a990be46 5185static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
0ced55a3 5186{
082a3439 5187 struct drbd_conf *mdev;
c141ebda 5188 int vnr, not_empty = 0;
32862ec7
PR
5189
5190 do {
5191 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5192 flush_signals(current);
c141ebda
PR
5193
5194 rcu_read_lock();
5195 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5196 kref_get(&mdev->kref);
5197 rcu_read_unlock();
d3fcb490 5198 if (drbd_finish_peer_reqs(mdev)) {
c141ebda
PR
5199 kref_put(&mdev->kref, &drbd_minor_destroy);
5200 return 1;
d3fcb490 5201 }
c141ebda
PR
5202 kref_put(&mdev->kref, &drbd_minor_destroy);
5203 rcu_read_lock();
082a3439 5204 }
32862ec7 5205 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
5206
5207 spin_lock_irq(&tconn->req_lock);
c141ebda 5208 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
082a3439
PR
5209 not_empty = !list_empty(&mdev->done_ee);
5210 if (not_empty)
5211 break;
5212 }
5213 spin_unlock_irq(&tconn->req_lock);
c141ebda 5214 rcu_read_unlock();
32862ec7
PR
5215 } while (not_empty);
5216
5217 return 0;
0ced55a3
PR
5218}
5219
b411b363
PR
5220struct asender_cmd {
5221 size_t pkt_size;
1952e916 5222 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
b411b363
PR
5223};
5224
7201b972 5225static struct asender_cmd asender_tbl[] = {
e658983a
AG
5226 [P_PING] = { 0, got_Ping },
5227 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5228 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5229 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5230 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5231 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5232 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5233 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5234 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5235 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5236 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5237 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5238 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5239 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5240 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5241 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5242 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5243};
b411b363
PR
5244
5245int drbd_asender(struct drbd_thread *thi)
5246{
392c8801 5247 struct drbd_tconn *tconn = thi->tconn;
b411b363 5248 struct asender_cmd *cmd = NULL;
77351055 5249 struct packet_info pi;
257d0af6 5250 int rv;
e658983a 5251 void *buf = tconn->meta.rbuf;
b411b363 5252 int received = 0;
52b061a4
AG
5253 unsigned int header_size = drbd_header_size(tconn);
5254 int expect = header_size;
44ed167d
PR
5255 bool ping_timeout_active = false;
5256 struct net_conf *nc;
bb77d34e 5257 int ping_timeo, tcp_cork, ping_int;
3990e04d 5258 struct sched_param param = { .sched_priority = 2 };
b411b363 5259
3990e04d
PR
5260 rv = sched_setscheduler(current, SCHED_RR, &param);
5261 if (rv < 0)
5262 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
b411b363 5263
e77a0a5c 5264 while (get_t_state(thi) == RUNNING) {
80822284 5265 drbd_thread_current_set_cpu(thi);
b411b363 5266
44ed167d
PR
5267 rcu_read_lock();
5268 nc = rcu_dereference(tconn->net_conf);
5269 ping_timeo = nc->ping_timeo;
bb77d34e 5270 tcp_cork = nc->tcp_cork;
44ed167d
PR
5271 ping_int = nc->ping_int;
5272 rcu_read_unlock();
5273
32862ec7 5274 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
a17647aa 5275 if (drbd_send_ping(tconn)) {
32862ec7 5276 conn_err(tconn, "drbd_send_ping has failed\n");
b411b363 5277 goto reconnect;
841ce241 5278 }
44ed167d
PR
5279 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5280 ping_timeout_active = true;
b411b363
PR
5281 }
5282
32862ec7
PR
5283 /* TODO: conditionally cork; it may hurt latency if we cork without
5284 much to send */
bb77d34e 5285 if (tcp_cork)
32862ec7 5286 drbd_tcp_cork(tconn->meta.socket);
a990be46
AG
5287 if (tconn_finish_peer_reqs(tconn)) {
5288 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
32862ec7 5289 goto reconnect;
b411b363
PR
5290 }
5291 /* but unconditionally uncork unless disabled */
bb77d34e 5292 if (tcp_cork)
32862ec7 5293 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
5294
5295 /* short circuit, recv_msg would return EINTR anyways. */
5296 if (signal_pending(current))
5297 continue;
5298
32862ec7
PR
5299 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5300 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
5301
5302 flush_signals(current);
5303
5304 /* Note:
5305 * -EINTR (on meta) we got a signal
5306 * -EAGAIN (on meta) rcvtimeo expired
5307 * -ECONNRESET other side closed the connection
5308 * -ERESTARTSYS (on data) we got a signal
5309 * rv < 0 other than above: unexpected error!
5310 * rv == expected: full header or command
5311 * rv < expected: "woken" by signal during receive
5312 * rv == 0 : "connection shut down by peer"
5313 */
5314 if (likely(rv > 0)) {
5315 received += rv;
5316 buf += rv;
5317 } else if (rv == 0) {
b66623e3
PR
5318 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5319 long t;
5320 rcu_read_lock();
5321 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5322 rcu_read_unlock();
5323
5324 t = wait_event_timeout(tconn->ping_wait,
5325 tconn->cstate < C_WF_REPORT_PARAMS,
5326 t);
599377ac
PR
5327 if (t)
5328 break;
5329 }
32862ec7 5330 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
5331 goto reconnect;
5332 } else if (rv == -EAGAIN) {
cb6518cb
LE
5333 /* If the data socket received something meanwhile,
5334 * that is good enough: peer is still alive. */
32862ec7
PR
5335 if (time_after(tconn->last_received,
5336 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 5337 continue;
f36af18c 5338 if (ping_timeout_active) {
32862ec7 5339 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
5340 goto reconnect;
5341 }
32862ec7 5342 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
5343 continue;
5344 } else if (rv == -EINTR) {
5345 continue;
5346 } else {
32862ec7 5347 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5348 goto reconnect;
5349 }
5350
5351 if (received == expect && cmd == NULL) {
e658983a 5352 if (decode_header(tconn, tconn->meta.rbuf, &pi))
b411b363 5353 goto reconnect;
7201b972 5354 cmd = &asender_tbl[pi.cmd];
1952e916 5355 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
2fcb8f30
AG
5356 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5357 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5358 goto disconnect;
5359 }
e658983a 5360 expect = header_size + cmd->pkt_size;
52b061a4 5361 if (pi.size != expect - header_size) {
32862ec7 5362 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5363 pi.cmd, pi.size);
b411b363 5364 goto reconnect;
257d0af6 5365 }
b411b363
PR
5366 }
5367 if (received == expect) {
2735a594 5368 bool err;
a4fbda8e 5369
2735a594
AG
5370 err = cmd->fn(tconn, &pi);
5371 if (err) {
1952e916 5372 conn_err(tconn, "%pf failed\n", cmd->fn);
b411b363 5373 goto reconnect;
1952e916 5374 }
b411b363 5375
a4fbda8e 5376 tconn->last_received = jiffies;
f36af18c 5377
44ed167d
PR
5378 if (cmd == &asender_tbl[P_PING_ACK]) {
5379 /* restore idle timeout */
5380 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5381 ping_timeout_active = false;
5382 }
f36af18c 5383
e658983a 5384 buf = tconn->meta.rbuf;
b411b363 5385 received = 0;
52b061a4 5386 expect = header_size;
b411b363
PR
5387 cmd = NULL;
5388 }
5389 }
5390
5391 if (0) {
5392reconnect:
bbeb641c 5393 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
19fffd7b 5394 conn_md_sync(tconn);
b411b363
PR
5395 }
5396 if (0) {
5397disconnect:
bbeb641c 5398 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5399 }
32862ec7 5400 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 5401
32862ec7 5402 conn_info(tconn, "asender terminated\n");
b411b363
PR
5403
5404 return 0;
5405}