]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
constify blk_rq_map_user_iov() and friends
[mirror_ubuntu-bionic-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
e2857216
AG
53 unsigned int size;
54 unsigned int vnr;
e658983a 55 void *data;
77351055
PR
56};
57
b411b363
PR
58enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
6038178e 64static int drbd_do_features(struct drbd_tconn *tconn);
13e6037d 65static int drbd_do_auth(struct drbd_tconn *tconn);
c141ebda 66static int drbd_disconnected(struct drbd_conf *mdev);
b411b363 67
1e9dd291 68static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
99920dc5 69static int e_end_block(struct drbd_work *, int);
b411b363 70
b411b363
PR
71
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
45bb912b
LE
74/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
23ce4227
PR
92
93 if (!page)
94 return NULL;
95
45bb912b
LE
96 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
18c2d522
AG
153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
b411b363
PR
155{
156 struct page *page = NULL;
45bb912b 157 struct page *tmp = NULL;
18c2d522 158 unsigned int i = 0;
b411b363
PR
159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
45bb912b 162 if (drbd_pp_vacant >= number) {
b411b363 163 spin_lock(&drbd_pp_lock);
45bb912b
LE
164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
b411b363 167 spin_unlock(&drbd_pp_lock);
45bb912b
LE
168 if (page)
169 return page;
b411b363 170 }
45bb912b 171
b411b363
PR
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
45bb912b
LE
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
c37c8ecf 187 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
b411b363
PR
197}
198
a990be46
AG
199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
b411b363 201{
db830c46 202 struct drbd_peer_request *peer_req;
b411b363
PR
203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46 211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
045417f7 212 if (drbd_peer_req_has_active_page(peer_req))
b411b363
PR
213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
db830c46 221 struct drbd_peer_request *peer_req, *t;
b411b363 222
87eeee41 223 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
87eeee41 225 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 226
db830c46 227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 228 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
229}
230
231/**
c37c8ecf 232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b411b363 233 * @mdev: DRBD device.
45bb912b
LE
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
236 *
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 240 *
45bb912b 241 * Returns a page chain linked via page->private.
b411b363 242 */
c37c8ecf
AG
243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
b411b363
PR
245{
246 struct page *page = NULL;
44ed167d 247 struct net_conf *nc;
b411b363 248 DEFINE_WAIT(wait);
44ed167d 249 int mxb;
b411b363 250
45bb912b
LE
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
44ed167d
PR
253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
18c2d522 259 page = __drbd_alloc_pages(mdev, number);
b411b363 260
45bb912b 261 while (page == NULL) {
b411b363
PR
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
44ed167d 266 if (atomic_read(&mdev->pp_in_use) < mxb) {
18c2d522 267 page = __drbd_alloc_pages(mdev, number);
b411b363
PR
268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
c37c8ecf 276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
b411b363
PR
277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
45bb912b
LE
284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
286 return page;
287}
288
c37c8ecf 289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
87eeee41 290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
5cc287e0 293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 294{
435f0740 295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 296 int i;
435f0740 297
a73ff323
LE
298 if (page == NULL)
299 return;
300
81a5d60e 301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
b411b363 310 }
435f0740 311 i = atomic_sub_return(i, a);
45bb912b 312 if (i < 0)
435f0740
LE
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
3967deb1 323 drbd_free_peer_req()
0db55363 324 drbd_alloc_peer_req()
7721f567 325 drbd_free_peer_reqs()
b411b363 326 drbd_ee_fix_bhs()
a990be46 327 drbd_finish_peer_reqs()
b411b363
PR
328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
f6ffca9f 332struct drbd_peer_request *
0db55363
AG
333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 335{
db830c46 336 struct drbd_peer_request *peer_req;
a73ff323 337 struct page *page = NULL;
45bb912b 338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 339
0cf9d27e 340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
341 return NULL;
342
db830c46
AG
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
b411b363 345 if (!(gfp_mask & __GFP_NOWARN))
0db55363 346 dev_err(DEV, "%s: allocation failed\n", __func__);
b411b363
PR
347 return NULL;
348 }
349
a73ff323 350 if (data_size) {
81a3537a 351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
a73ff323
LE
352 if (!page)
353 goto fail;
354 }
b411b363 355
db830c46
AG
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
361
362 peer_req->epoch = NULL;
a21e9298 363 peer_req->w.mdev = mdev;
db830c46
AG
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
9a8e7753
AG
367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
db830c46 371 peer_req->block_id = id;
b411b363 372
db830c46 373 return peer_req;
b411b363 374
45bb912b 375 fail:
db830c46 376 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
377 return NULL;
378}
379
3967deb1 380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 381 int is_net)
b411b363 382{
db830c46
AG
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
5cc287e0 385 drbd_free_pages(mdev, peer_req->pages, is_net);
db830c46
AG
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
389}
390
7721f567 391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
b411b363
PR
392{
393 LIST_HEAD(work_list);
db830c46 394 struct drbd_peer_request *peer_req, *t;
b411b363 395 int count = 0;
435f0740 396 int is_net = list == &mdev->net_ee;
b411b363 397
87eeee41 398 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 399 list_splice_init(list, &work_list);
87eeee41 400 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 401
db830c46 402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
3967deb1 403 __drbd_free_peer_req(mdev, peer_req, is_net);
b411b363
PR
404 count++;
405 }
406 return count;
407}
408
b411b363 409/*
a990be46 410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 411 */
a990be46 412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
db830c46 416 struct drbd_peer_request *peer_req, *t;
e2b3032b 417 int err = 0;
b411b363 418
87eeee41 419 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
b411b363 421 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 422 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 423
db830c46 424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 425 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
426
427 /* possible callbacks here:
d4dabbe2 428 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
429 * all ignore the last argument.
430 */
db830c46 431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
432 int err2;
433
b411b363 434 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
3967deb1 438 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
439 }
440 wake_up(&mdev->ee_wait);
441
e2b3032b 442 return err;
b411b363
PR
443}
444
d4da1537
AG
445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
b411b363
PR
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 454 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 455 io_schedule();
b411b363 456 finish_wait(&mdev->ee_wait, &wait);
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
458 }
459}
460
d4da1537
AG
461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
b411b363 463{
87eeee41 464 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 465 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 466 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
467}
468
dbd9eea0 469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
de0ff338 491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363 492{
b411b363
PR
493 int rv;
494
1393b59f 495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
b411b363 496
dbd0820c
PR
497 if (rv < 0) {
498 if (rv == -ECONNRESET)
155522df 499 conn_info(tconn, "sock was reset by peer\n");
dbd0820c 500 else if (rv != -ERESTARTSYS)
155522df 501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
dbd0820c 502 } else if (rv == 0) {
b66623e3
PR
503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
599377ac
PR
511 if (t)
512 goto out;
513 }
b66623e3 514 conn_info(tconn, "sock was shut down by peer\n");
599377ac
PR
515 }
516
b411b363 517 if (rv != size)
bbeb641c 518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 519
599377ac 520out:
b411b363
PR
521 return rv;
522}
523
c6967746
AG
524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
a5c31904
AG
537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
5dbf1673
LE
547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
eac3e990 566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
44ed167d
PR
571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
69ef82de 574 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
575 int disconnect_on_error = 1;
576
44ed167d
PR
577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
b411b363 581 return NULL;
44ed167d 582 }
44ed167d
PR
583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
69ef82de 585 connect_int = nc->connect_int;
089c075d 586 rcu_read_unlock();
44ed167d 587
089c075d
AG
588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
44ed167d 590
089c075d 591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
44ed167d
PR
592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
089c075d
AG
596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
b411b363
PR
598
599 what = "sock_create_kern";
44ed167d
PR
600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
69ef82de 608 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
b411b363 618 what = "bind before connect";
44ed167d 619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
44ed167d 627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
eac3e990 645 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
646 }
647 if (disconnect_on_error)
bbeb641c 648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 649 }
44ed167d 650
b411b363
PR
651 return sock;
652}
653
7a426fd8
PR
654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
715306f6 662static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
663{
664 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 665 void (*state_change)(struct sock *sk);
7a426fd8 666
715306f6
AG
667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
7a426fd8
PR
671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 674{
1f3e509b 675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 676 struct sockaddr_in6 my_addr;
1f3e509b 677 struct socket *s_listen;
44ed167d 678 struct net_conf *nc;
b411b363
PR
679 const char *what;
680
44ed167d
PR
681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
7a426fd8 685 return -EIO;
44ed167d 686 }
44ed167d
PR
687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
44ed167d 689 rcu_read_unlock();
b411b363 690
089c075d
AG
691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
b411b363
PR
693
694 what = "sock_create_kern";
44ed167d 695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
98683650 702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
704
705 what = "bind before listen";
44ed167d 706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
707 if (err < 0)
708 goto out;
709
7a426fd8
PR
710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 713 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 716
2820fd39
PR
717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
7a426fd8 722 return 0;
b411b363
PR
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b
PR
728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
b411b363 732
7a426fd8 733 return -EIO;
b411b363
PR
734}
735
715306f6 736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 737{
715306f6
AG
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
742}
743
7a426fd8 744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 745{
1f3e509b
PR
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
1f3e509b
PR
748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
38b682b2
AM
760 /* 28.5% random jitter */
761 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
1f3e509b 762
7a426fd8
PR
763 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 if (err <= 0)
765 return NULL;
b411b363 766
7a426fd8 767 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
768 if (err < 0) {
769 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b 770 conn_err(tconn, "accept failed, err = %d\n", err);
bbeb641c 771 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
772 }
773 }
b411b363 774
715306f6
AG
775 if (s_estab)
776 unregister_state_change(s_estab->sk, ad);
b411b363 777
b411b363
PR
778 return s_estab;
779}
b411b363 780
e658983a 781static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
b411b363 782
9f5bdc33
AG
783static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 enum drbd_packet cmd)
785{
786 if (!conn_prepare_command(tconn, sock))
787 return -EIO;
e658983a 788 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
b411b363
PR
789}
790
9f5bdc33 791static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
b411b363 792{
9f5bdc33
AG
793 unsigned int header_size = drbd_header_size(tconn);
794 struct packet_info pi;
795 int err;
b411b363 796
9f5bdc33
AG
797 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 if (err != header_size) {
799 if (err >= 0)
800 err = -EIO;
801 return err;
802 }
803 err = decode_header(tconn, tconn->data.rbuf, &pi);
804 if (err)
805 return err;
806 return pi.cmd;
b411b363
PR
807}
808
809/**
810 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
811 * @sock: pointer to the pointer to the socket.
812 */
dbd9eea0 813static int drbd_socket_okay(struct socket **sock)
b411b363
PR
814{
815 int rr;
816 char tb[4];
817
818 if (!*sock)
81e84650 819 return false;
b411b363 820
dbd9eea0 821 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
822
823 if (rr > 0 || rr == -EAGAIN) {
81e84650 824 return true;
b411b363
PR
825 } else {
826 sock_release(*sock);
827 *sock = NULL;
81e84650 828 return false;
b411b363
PR
829 }
830}
2325eb66
PR
831/* Gets called if a connection is established, or if a new minor gets created
832 in a connection */
c141ebda 833int drbd_connected(struct drbd_conf *mdev)
907599e0 834{
0829f5ed 835 int err;
907599e0
PR
836
837 atomic_set(&mdev->packet_seq, 0);
838 mdev->peer_seq = 0;
839
8410da8f
PR
840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 &mdev->tconn->cstate_mutex :
842 &mdev->own_state_mutex;
843
0829f5ed
AG
844 err = drbd_send_sync_param(mdev);
845 if (!err)
846 err = drbd_send_sizes(mdev, 0, 0);
847 if (!err)
848 err = drbd_send_uuids(mdev);
849 if (!err)
43de7c85 850 err = drbd_send_current_state(mdev);
907599e0
PR
851 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 clear_bit(RESIZE_PENDING, &mdev->flags);
2d56a974 853 atomic_set(&mdev->ap_in_flight, 0);
8b924f1d 854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 855 return err;
907599e0 856}
b411b363
PR
857
858/*
859 * return values:
860 * 1 yes, we have a valid connection
861 * 0 oops, did not work out, please try again
862 * -1 peer talks different language,
863 * no point in trying again, please go standalone.
864 * -2 We do not have a network config...
865 */
81fa2e67 866static int conn_connect(struct drbd_tconn *tconn)
b411b363 867{
7da35862 868 struct drbd_socket sock, msock;
c141ebda 869 struct drbd_conf *mdev;
44ed167d 870 struct net_conf *nc;
92f14951 871 int vnr, timeout, h, ok;
08b165ba 872 bool discard_my_data;
197296ff 873 enum drbd_state_rv rv;
7a426fd8
PR
874 struct accept_wait_data ad = {
875 .tconn = tconn,
876 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 };
b411b363 878
b66623e3 879 clear_bit(DISCONNECT_SENT, &tconn->flags);
bbeb641c 880 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
881 return -2;
882
7da35862
PR
883 mutex_init(&sock.mutex);
884 sock.sbuf = tconn->data.sbuf;
885 sock.rbuf = tconn->data.rbuf;
886 sock.socket = NULL;
887 mutex_init(&msock.mutex);
888 msock.sbuf = tconn->meta.sbuf;
889 msock.rbuf = tconn->meta.rbuf;
890 msock.socket = NULL;
891
0916e0e3
AG
892 /* Assume that the peer only understands protocol 80 until we know better. */
893 tconn->agreed_pro_version = 80;
b411b363 894
7a426fd8
PR
895 if (prepare_listen_socket(tconn, &ad))
896 return 0;
b411b363
PR
897
898 do {
2bf89621 899 struct socket *s;
b411b363 900
92f14951 901 s = drbd_try_connect(tconn);
b411b363 902 if (s) {
7da35862
PR
903 if (!sock.socket) {
904 sock.socket = s;
905 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 } else if (!msock.socket) {
427c0434 907 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862
PR
908 msock.socket = s;
909 send_first_packet(tconn, &msock, P_INITIAL_META);
b411b363 910 } else {
81fa2e67 911 conn_err(tconn, "Logic error in conn_connect()\n");
b411b363
PR
912 goto out_release_sockets;
913 }
914 }
915
7da35862
PR
916 if (sock.socket && msock.socket) {
917 rcu_read_lock();
918 nc = rcu_dereference(tconn->net_conf);
919 timeout = nc->ping_timeo * HZ / 10;
920 rcu_read_unlock();
921 schedule_timeout_interruptible(timeout);
922 ok = drbd_socket_okay(&sock.socket);
923 ok = drbd_socket_okay(&msock.socket) && ok;
b411b363
PR
924 if (ok)
925 break;
926 }
927
928retry:
7a426fd8 929 s = drbd_wait_for_connect(tconn, &ad);
b411b363 930 if (s) {
92f14951 931 int fp = receive_first_packet(tconn, s);
7da35862
PR
932 drbd_socket_okay(&sock.socket);
933 drbd_socket_okay(&msock.socket);
92f14951 934 switch (fp) {
e5d6f33a 935 case P_INITIAL_DATA:
7da35862 936 if (sock.socket) {
907599e0 937 conn_warn(tconn, "initial packet S crossed\n");
7da35862 938 sock_release(sock.socket);
80c6eed4
PR
939 sock.socket = s;
940 goto randomize;
b411b363 941 }
7da35862 942 sock.socket = s;
b411b363 943 break;
e5d6f33a 944 case P_INITIAL_META:
427c0434 945 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862 946 if (msock.socket) {
907599e0 947 conn_warn(tconn, "initial packet M crossed\n");
7da35862 948 sock_release(msock.socket);
80c6eed4
PR
949 msock.socket = s;
950 goto randomize;
b411b363 951 }
7da35862 952 msock.socket = s;
b411b363
PR
953 break;
954 default:
907599e0 955 conn_warn(tconn, "Error receiving initial packet\n");
b411b363 956 sock_release(s);
80c6eed4 957randomize:
38b682b2 958 if (prandom_u32() & 1)
b411b363
PR
959 goto retry;
960 }
961 }
962
bbeb641c 963 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
964 goto out_release_sockets;
965 if (signal_pending(current)) {
966 flush_signals(current);
967 smp_rmb();
907599e0 968 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
969 goto out_release_sockets;
970 }
971
b666dbf8
PR
972 ok = drbd_socket_okay(&sock.socket);
973 ok = drbd_socket_okay(&msock.socket) && ok;
974 } while (!ok);
b411b363 975
7a426fd8
PR
976 if (ad.s_listen)
977 sock_release(ad.s_listen);
b411b363 978
98683650
PR
979 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 981
7da35862
PR
982 sock.socket->sk->sk_allocation = GFP_NOIO;
983 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 984
7da35862
PR
985 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 987
b411b363 988 /* NOT YET ...
7da35862
PR
989 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 991 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 992 * which we set to 4x the configured ping_timeout. */
44ed167d
PR
993 rcu_read_lock();
994 nc = rcu_dereference(tconn->net_conf);
995
7da35862
PR
996 sock.socket->sk->sk_sndtimeo =
997 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 998
7da35862 999 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 1000 timeout = nc->timeout * HZ / 10;
08b165ba 1001 discard_my_data = nc->discard_my_data;
44ed167d 1002 rcu_read_unlock();
b411b363 1003
7da35862 1004 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1005
1006 /* we don't want delays.
25985edc 1007 * we use TCP_CORK where appropriate, though */
7da35862
PR
1008 drbd_tcp_nodelay(sock.socket);
1009 drbd_tcp_nodelay(msock.socket);
b411b363 1010
7da35862
PR
1011 tconn->data.socket = sock.socket;
1012 tconn->meta.socket = msock.socket;
907599e0 1013 tconn->last_received = jiffies;
b411b363 1014
6038178e 1015 h = drbd_do_features(tconn);
b411b363
PR
1016 if (h <= 0)
1017 return h;
1018
907599e0 1019 if (tconn->cram_hmac_tfm) {
b411b363 1020 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 1021 switch (drbd_do_auth(tconn)) {
b10d96cb 1022 case -1:
907599e0 1023 conn_err(tconn, "Authentication of peer failed\n");
b411b363 1024 return -1;
b10d96cb 1025 case 0:
907599e0 1026 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 1027 return 0;
b411b363
PR
1028 }
1029 }
1030
7da35862
PR
1031 tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1033
387eb308 1034 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 1035 return -1;
b411b363 1036
a1096a6e
PR
1037 set_bit(STATE_SENT, &tconn->flags);
1038
c141ebda
PR
1039 rcu_read_lock();
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 kref_get(&mdev->kref);
26ea8f92
AG
1042 rcu_read_unlock();
1043
13c76aba
PR
1044 /* Prevent a race between resync-handshake and
1045 * being promoted to Primary.
1046 *
1047 * Grab and release the state mutex, so we know that any current
1048 * drbd_set_role() is finished, and any incoming drbd_set_role
1049 * will see the STATE_SENT flag, and wait for it to be cleared.
1050 */
1051 mutex_lock(mdev->state_mutex);
1052 mutex_unlock(mdev->state_mutex);
1053
08b165ba
PR
1054 if (discard_my_data)
1055 set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 else
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
c141ebda
PR
1059 drbd_connected(mdev);
1060 kref_put(&mdev->kref, &drbd_minor_destroy);
1061 rcu_read_lock();
1062 }
1063 rcu_read_unlock();
1064
a1096a6e 1065 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
ed635cb0 1066 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
a1096a6e 1067 clear_bit(STATE_SENT, &tconn->flags);
1e86ac48 1068 return 0;
a1096a6e 1069 }
1e86ac48 1070
823bd832 1071 drbd_thread_start(&tconn->asender);
b411b363 1072
08b165ba
PR
1073 mutex_lock(&tconn->conf_update);
1074 /* The discard_my_data flag is a single-shot modifier to the next
1075 * connection attempt, the handshake of which is now well underway.
1076 * No need for rcu style copying of the whole struct
1077 * just to clear a single value. */
1078 tconn->net_conf->discard_my_data = 0;
1079 mutex_unlock(&tconn->conf_update);
1080
d3fcb490 1081 return h;
b411b363
PR
1082
1083out_release_sockets:
7a426fd8
PR
1084 if (ad.s_listen)
1085 sock_release(ad.s_listen);
7da35862
PR
1086 if (sock.socket)
1087 sock_release(sock.socket);
1088 if (msock.socket)
1089 sock_release(msock.socket);
b411b363
PR
1090 return -1;
1091}
1092
e658983a 1093static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
b411b363 1094{
e658983a
AG
1095 unsigned int header_size = drbd_header_size(tconn);
1096
0c8e36d9
AG
1097 if (header_size == sizeof(struct p_header100) &&
1098 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 struct p_header100 *h = header;
1100 if (h->pad != 0) {
1101 conn_err(tconn, "Header padding is not zero\n");
1102 return -EINVAL;
1103 }
1104 pi->vnr = be16_to_cpu(h->volume);
1105 pi->cmd = be16_to_cpu(h->command);
1106 pi->size = be32_to_cpu(h->length);
1107 } else if (header_size == sizeof(struct p_header95) &&
1108 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1109 struct p_header95 *h = header;
e658983a 1110 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1111 pi->size = be32_to_cpu(h->length);
1112 pi->vnr = 0;
e658983a
AG
1113 } else if (header_size == sizeof(struct p_header80) &&
1114 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 struct p_header80 *h = header;
1116 pi->cmd = be16_to_cpu(h->command);
1117 pi->size = be16_to_cpu(h->length);
77351055 1118 pi->vnr = 0;
02918be2 1119 } else {
e658983a
AG
1120 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 be32_to_cpu(*(__be32 *)header),
1122 tconn->agreed_pro_version);
8172f3e9 1123 return -EINVAL;
b411b363 1124 }
e658983a 1125 pi->data = header + header_size;
8172f3e9 1126 return 0;
257d0af6 1127}
b411b363 1128
9ba7aa00 1129static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 1130{
e658983a 1131 void *buffer = tconn->data.rbuf;
69bc7bc3 1132 int err;
257d0af6 1133
e658983a 1134 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
a5c31904 1135 if (err)
69bc7bc3 1136 return err;
257d0af6 1137
e658983a 1138 err = decode_header(tconn, buffer, pi);
9ba7aa00 1139 tconn->last_received = jiffies;
b411b363 1140
69bc7bc3 1141 return err;
b411b363
PR
1142}
1143
4b0007c0 1144static void drbd_flush(struct drbd_tconn *tconn)
b411b363
PR
1145{
1146 int rv;
4b0007c0
PR
1147 struct drbd_conf *mdev;
1148 int vnr;
1149
1150 if (tconn->write_ordering >= WO_bdev_flush) {
615e087f 1151 rcu_read_lock();
4b0007c0 1152 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
615e087f
LE
1153 if (!get_ldev(mdev))
1154 continue;
1155 kref_get(&mdev->kref);
1156 rcu_read_unlock();
1157
1158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 GFP_NOIO, NULL);
1160 if (rv) {
1161 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163 * don't try again for ANY return value != 0
1164 * if (rv == -EOPNOTSUPP) */
1165 drbd_bump_write_ordering(tconn, WO_drain_io);
4b0007c0 1166 }
615e087f
LE
1167 put_ldev(mdev);
1168 kref_put(&mdev->kref, &drbd_minor_destroy);
b411b363 1169
615e087f
LE
1170 rcu_read_lock();
1171 if (rv)
1172 break;
b411b363 1173 }
615e087f 1174 rcu_read_unlock();
b411b363 1175 }
b411b363
PR
1176}
1177
1178/**
1179 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180 * @mdev: DRBD device.
1181 * @epoch: Epoch object.
1182 * @ev: Epoch event.
1183 */
1e9dd291 1184static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
b411b363
PR
1185 struct drbd_epoch *epoch,
1186 enum epoch_event ev)
1187{
2451fc3b 1188 int epoch_size;
b411b363 1189 struct drbd_epoch *next_epoch;
b411b363
PR
1190 enum finish_epoch rv = FE_STILL_LIVE;
1191
12038a3a 1192 spin_lock(&tconn->epoch_lock);
b411b363
PR
1193 do {
1194 next_epoch = NULL;
b411b363
PR
1195
1196 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198 switch (ev & ~EV_CLEANUP) {
1199 case EV_PUT:
1200 atomic_dec(&epoch->active);
1201 break;
1202 case EV_GOT_BARRIER_NR:
1203 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1204 break;
1205 case EV_BECAME_LAST:
1206 /* nothing to do*/
1207 break;
1208 }
1209
b411b363
PR
1210 if (epoch_size != 0 &&
1211 atomic_read(&epoch->active) == 0 &&
80f9fd55 1212 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1213 if (!(ev & EV_CLEANUP)) {
12038a3a 1214 spin_unlock(&tconn->epoch_lock);
9ed57dcb 1215 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
12038a3a 1216 spin_lock(&tconn->epoch_lock);
b411b363 1217 }
9ed57dcb
LE
1218#if 0
1219 /* FIXME: dec unacked on connection, once we have
1220 * something to count pending connection packets in. */
80f9fd55 1221 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
9ed57dcb
LE
1222 dec_unacked(epoch->tconn);
1223#endif
b411b363 1224
12038a3a 1225 if (tconn->current_epoch != epoch) {
b411b363
PR
1226 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 list_del(&epoch->list);
1228 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
12038a3a 1229 tconn->epochs--;
b411b363
PR
1230 kfree(epoch);
1231
1232 if (rv == FE_STILL_LIVE)
1233 rv = FE_DESTROYED;
1234 } else {
1235 epoch->flags = 0;
1236 atomic_set(&epoch->epoch_size, 0);
698f9315 1237 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1238 if (rv == FE_STILL_LIVE)
1239 rv = FE_RECYCLED;
1240 }
1241 }
1242
1243 if (!next_epoch)
1244 break;
1245
1246 epoch = next_epoch;
1247 } while (1);
1248
12038a3a 1249 spin_unlock(&tconn->epoch_lock);
b411b363 1250
b411b363
PR
1251 return rv;
1252}
1253
1254/**
1255 * drbd_bump_write_ordering() - Fall back to an other write ordering method
4b0007c0 1256 * @tconn: DRBD connection.
b411b363
PR
1257 * @wo: Write ordering method to try.
1258 */
4b0007c0 1259void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
b411b363 1260{
daeda1cc 1261 struct disk_conf *dc;
4b0007c0 1262 struct drbd_conf *mdev;
b411b363 1263 enum write_ordering_e pwo;
4b0007c0 1264 int vnr;
b411b363
PR
1265 static char *write_ordering_str[] = {
1266 [WO_none] = "none",
1267 [WO_drain_io] = "drain",
1268 [WO_bdev_flush] = "flush",
b411b363
PR
1269 };
1270
4b0007c0 1271 pwo = tconn->write_ordering;
b411b363 1272 wo = min(pwo, wo);
daeda1cc 1273 rcu_read_lock();
4b0007c0 1274 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
27eb13e9 1275 if (!get_ldev_if_state(mdev, D_ATTACHING))
4b0007c0
PR
1276 continue;
1277 dc = rcu_dereference(mdev->ldev->disk_conf);
1278
1279 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 wo = WO_drain_io;
1281 if (wo == WO_drain_io && !dc->disk_drain)
1282 wo = WO_none;
1283 put_ldev(mdev);
1284 }
daeda1cc 1285 rcu_read_unlock();
4b0007c0
PR
1286 tconn->write_ordering = wo;
1287 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
b411b363
PR
1289}
1290
45bb912b 1291/**
fbe29dec 1292 * drbd_submit_peer_request()
45bb912b 1293 * @mdev: DRBD device.
db830c46 1294 * @peer_req: peer request
45bb912b 1295 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1296 *
1297 * May spread the pages to multiple bios,
1298 * depending on bio_add_page restrictions.
1299 *
1300 * Returns 0 if all bios have been submitted,
1301 * -ENOMEM if we could not allocate enough bios,
1302 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303 * single page to an empty bio (which should never happen and likely indicates
1304 * that the lower level IO stack is in some way broken). This has been observed
1305 * on certain Xen deployments.
45bb912b
LE
1306 */
1307/* TODO allocate from our own bio_set. */
fbe29dec
AG
1308int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 struct drbd_peer_request *peer_req,
1310 const unsigned rw, const int fault_type)
45bb912b
LE
1311{
1312 struct bio *bios = NULL;
1313 struct bio *bio;
db830c46
AG
1314 struct page *page = peer_req->pages;
1315 sector_t sector = peer_req->i.sector;
1316 unsigned ds = peer_req->i.size;
45bb912b
LE
1317 unsigned n_bios = 0;
1318 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1319 int err = -ENOMEM;
45bb912b
LE
1320
1321 /* In most cases, we will only need one bio. But in case the lower
1322 * level restrictions happen to be different at this offset on this
1323 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1324 * request in more than one bio.
1325 *
1326 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 * generated bio, but a bio allocated on behalf of the peer.
1328 */
45bb912b
LE
1329next_bio:
1330 bio = bio_alloc(GFP_NOIO, nr_pages);
1331 if (!bio) {
1332 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 goto fail;
1334 }
db830c46 1335 /* > peer_req->i.sector, unless this is the first bio */
4f024f37 1336 bio->bi_iter.bi_sector = sector;
45bb912b 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1338 bio->bi_rw = rw;
db830c46 1339 bio->bi_private = peer_req;
fcefa62e 1340 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1341
1342 bio->bi_next = bios;
1343 bios = bio;
1344 ++n_bios;
1345
1346 page_chain_for_each(page) {
1347 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1349 /* A single page must always be possible!
1350 * But in case it fails anyways,
1351 * we deal with it, and complain (below). */
1352 if (bio->bi_vcnt == 0) {
1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n",
4f024f37 1356 len, (uint64_t)bio->bi_iter.bi_sector);
10f6d992
LE
1357 err = -ENOSPC;
1358 goto fail;
1359 }
45bb912b
LE
1360 goto next_bio;
1361 }
1362 ds -= len;
1363 sector += len >> 9;
1364 --nr_pages;
1365 }
1366 D_ASSERT(page == NULL);
1367 D_ASSERT(ds == 0);
1368
db830c46 1369 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1370 do {
1371 bio = bios;
1372 bios = bios->bi_next;
1373 bio->bi_next = NULL;
1374
45bb912b 1375 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1376 } while (bios);
45bb912b
LE
1377 return 0;
1378
1379fail:
1380 while (bios) {
1381 bio = bios;
1382 bios = bios->bi_next;
1383 bio_put(bio);
1384 }
10f6d992 1385 return err;
45bb912b
LE
1386}
1387
53840641 1388static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1389 struct drbd_peer_request *peer_req)
53840641 1390{
db830c46 1391 struct drbd_interval *i = &peer_req->i;
53840641
AG
1392
1393 drbd_remove_interval(&mdev->write_requests, i);
1394 drbd_clear_interval(i);
1395
6c852bec 1396 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1397 if (i->waiting)
1398 wake_up(&mdev->misc_wait);
1399}
1400
77fede51
PR
1401void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402{
1403 struct drbd_conf *mdev;
1404 int vnr;
1405
1406 rcu_read_lock();
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 kref_get(&mdev->kref);
1409 rcu_read_unlock();
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 kref_put(&mdev->kref, &drbd_minor_destroy);
1412 rcu_read_lock();
1413 }
1414 rcu_read_unlock();
1415}
1416
4a76b161 1417static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1418{
2451fc3b 1419 int rv;
e658983a 1420 struct p_barrier *p = pi->data;
b411b363
PR
1421 struct drbd_epoch *epoch;
1422
9ed57dcb
LE
1423 /* FIXME these are unacked on connection,
1424 * not a specific (peer)device.
1425 */
12038a3a 1426 tconn->current_epoch->barrier_nr = p->barrier;
9ed57dcb 1427 tconn->current_epoch->tconn = tconn;
1e9dd291 1428 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1429
1430 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 * the activity log, which means it would not be resynced in case the
1432 * R_PRIMARY crashes now.
1433 * Therefore we must send the barrier_ack after the barrier request was
1434 * completed. */
4b0007c0 1435 switch (tconn->write_ordering) {
b411b363
PR
1436 case WO_none:
1437 if (rv == FE_RECYCLED)
82bc0194 1438 return 0;
2451fc3b
PR
1439
1440 /* receiver context, in the writeout path of the other node.
1441 * avoid potential distributed deadlock */
1442 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 if (epoch)
1444 break;
1445 else
9ed57dcb 1446 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1447 /* Fall through */
b411b363
PR
1448
1449 case WO_bdev_flush:
1450 case WO_drain_io:
77fede51 1451 conn_wait_active_ee_empty(tconn);
4b0007c0 1452 drbd_flush(tconn);
2451fc3b 1453
12038a3a 1454 if (atomic_read(&tconn->current_epoch->epoch_size)) {
2451fc3b
PR
1455 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 if (epoch)
1457 break;
b411b363
PR
1458 }
1459
82bc0194 1460 return 0;
2451fc3b 1461 default:
9ed57dcb 1462 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
82bc0194 1463 return -EIO;
b411b363
PR
1464 }
1465
1466 epoch->flags = 0;
1467 atomic_set(&epoch->epoch_size, 0);
1468 atomic_set(&epoch->active, 0);
1469
12038a3a
PR
1470 spin_lock(&tconn->epoch_lock);
1471 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 list_add(&epoch->list, &tconn->current_epoch->list);
1473 tconn->current_epoch = epoch;
1474 tconn->epochs++;
b411b363
PR
1475 } else {
1476 /* The current_epoch got recycled while we allocated this one... */
1477 kfree(epoch);
1478 }
12038a3a 1479 spin_unlock(&tconn->epoch_lock);
b411b363 1480
82bc0194 1481 return 0;
b411b363
PR
1482}
1483
1484/* used from receive_RSDataReply (recv_resync_read)
1485 * and from receive_Data */
f6ffca9f
AG
1486static struct drbd_peer_request *
1487read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 int data_size) __must_hold(local)
b411b363 1489{
6666032a 1490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1491 struct drbd_peer_request *peer_req;
b411b363 1492 struct page *page;
a5c31904 1493 int dgs, ds, err;
a0638456
PR
1494 void *dig_in = mdev->tconn->int_dig_in;
1495 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1496 unsigned long *data;
b411b363 1497
88104ca4
AG
1498 dgs = 0;
1499 if (mdev->tconn->peer_integrity_tfm) {
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
9f5bdc33
AG
1501 /*
1502 * FIXME: Receive the incoming digest into the receive buffer
1503 * here, together with its struct p_data?
1504 */
a5c31904
AG
1505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 if (err)
b411b363 1507 return NULL;
88104ca4 1508 data_size -= dgs;
b411b363
PR
1509 }
1510
841ce241
AG
1511 if (!expect(IS_ALIGNED(data_size, 512)))
1512 return NULL;
1513 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 return NULL;
b411b363 1515
6666032a
LE
1516 /* even though we trust out peer,
1517 * we sometimes have to double check. */
1518 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1519 dev_err(DEV, "request from peer beyond end of local disk: "
1520 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1521 (unsigned long long)capacity,
1522 (unsigned long long)sector, data_size);
1523 return NULL;
1524 }
1525
b411b363
PR
1526 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 * which in turn might block on the other node at this very place. */
0db55363 1529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
db830c46 1530 if (!peer_req)
b411b363 1531 return NULL;
45bb912b 1532
a73ff323 1533 if (!data_size)
81a3537a 1534 return peer_req;
a73ff323 1535
b411b363 1536 ds = data_size;
db830c46 1537 page = peer_req->pages;
45bb912b
LE
1538 page_chain_for_each(page) {
1539 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1540 data = kmap(page);
a5c31904 1541 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1543 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 data[0] = data[0] ^ (unsigned long)-1;
1545 }
b411b363 1546 kunmap(page);
a5c31904 1547 if (err) {
3967deb1 1548 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1549 return NULL;
1550 }
a5c31904 1551 ds -= len;
b411b363
PR
1552 }
1553
1554 if (dgs) {
5b614abe 1555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
b411b363 1556 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1557 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 (unsigned long long)sector, data_size);
3967deb1 1559 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1560 return NULL;
1561 }
1562 }
1563 mdev->recv_cnt += data_size>>9;
db830c46 1564 return peer_req;
b411b363
PR
1565}
1566
1567/* drbd_drain_block() just takes a data block
1568 * out of the socket input buffer, and discards it.
1569 */
1570static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571{
1572 struct page *page;
a5c31904 1573 int err = 0;
b411b363
PR
1574 void *data;
1575
c3470cde 1576 if (!data_size)
fc5be839 1577 return 0;
c3470cde 1578
c37c8ecf 1579 page = drbd_alloc_pages(mdev, 1, 1);
b411b363
PR
1580
1581 data = kmap(page);
1582 while (data_size) {
fc5be839
AG
1583 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
a5c31904
AG
1585 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 if (err)
b411b363 1587 break;
a5c31904 1588 data_size -= len;
b411b363
PR
1589 }
1590 kunmap(page);
5cc287e0 1591 drbd_free_pages(mdev, page, 0);
fc5be839 1592 return err;
b411b363
PR
1593}
1594
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size)
1597{
7988613b
KO
1598 struct bio_vec bvec;
1599 struct bvec_iter iter;
b411b363 1600 struct bio *bio;
7988613b 1601 int dgs, err, expect;
a0638456
PR
1602 void *dig_in = mdev->tconn->int_dig_in;
1603 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1604
88104ca4
AG
1605 dgs = 0;
1606 if (mdev->tconn->peer_integrity_tfm) {
1607 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a5c31904
AG
1608 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1609 if (err)
1610 return err;
88104ca4 1611 data_size -= dgs;
b411b363
PR
1612 }
1613
b411b363
PR
1614 /* optimistically update recv_cnt. if receiving fails below,
1615 * we disconnect anyways, and counters will be reset. */
1616 mdev->recv_cnt += data_size>>9;
1617
1618 bio = req->master_bio;
4f024f37 1619 D_ASSERT(sector == bio->bi_iter.bi_sector);
b411b363 1620
7988613b
KO
1621 bio_for_each_segment(bvec, bio, iter) {
1622 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1623 expect = min_t(int, data_size, bvec.bv_len);
a5c31904 1624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
7988613b 1625 kunmap(bvec.bv_page);
a5c31904
AG
1626 if (err)
1627 return err;
1628 data_size -= expect;
b411b363
PR
1629 }
1630
1631 if (dgs) {
5b614abe 1632 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
b411b363
PR
1633 if (memcmp(dig_in, dig_vv, dgs)) {
1634 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1635 return -EINVAL;
b411b363
PR
1636 }
1637 }
1638
1639 D_ASSERT(data_size == 0);
28284cef 1640 return 0;
b411b363
PR
1641}
1642
a990be46
AG
1643/*
1644 * e_end_resync_block() is called in asender context via
1645 * drbd_finish_peer_reqs().
1646 */
99920dc5 1647static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1648{
8050e6d0
AG
1649 struct drbd_peer_request *peer_req =
1650 container_of(w, struct drbd_peer_request, w);
00d56944 1651 struct drbd_conf *mdev = w->mdev;
db830c46 1652 sector_t sector = peer_req->i.sector;
99920dc5 1653 int err;
b411b363 1654
db830c46 1655 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1656
db830c46
AG
1657 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1658 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1659 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1660 } else {
1661 /* Record failure to sync */
db830c46 1662 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1663
99920dc5 1664 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1665 }
1666 dec_unacked(mdev);
1667
99920dc5 1668 return err;
b411b363
PR
1669}
1670
1671static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1672{
db830c46 1673 struct drbd_peer_request *peer_req;
b411b363 1674
db830c46
AG
1675 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1676 if (!peer_req)
45bb912b 1677 goto fail;
b411b363
PR
1678
1679 dec_rs_pending(mdev);
1680
b411b363
PR
1681 inc_unacked(mdev);
1682 /* corresponding dec_unacked() in e_end_resync_block()
1683 * respective _drbd_clear_done_ee */
1684
db830c46 1685 peer_req->w.cb = e_end_resync_block;
45bb912b 1686
87eeee41 1687 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1688 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1689 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1690
0f0601f4 1691 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1692 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1693 return 0;
b411b363 1694
10f6d992
LE
1695 /* don't care for the reason here */
1696 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1697 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1698 list_del(&peer_req->w.list);
87eeee41 1699 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1700
3967deb1 1701 drbd_free_peer_req(mdev, peer_req);
45bb912b
LE
1702fail:
1703 put_ldev(mdev);
e1c1b0fc 1704 return -EIO;
b411b363
PR
1705}
1706
668eebc6 1707static struct drbd_request *
bc9c5c41
AG
1708find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1709 sector_t sector, bool missing_ok, const char *func)
51624585 1710{
51624585
AG
1711 struct drbd_request *req;
1712
bc9c5c41
AG
1713 /* Request object according to our peer */
1714 req = (struct drbd_request *)(unsigned long)id;
5e472264 1715 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1716 return req;
c3afd8f5 1717 if (!missing_ok) {
5af172ed 1718 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
1719 (unsigned long)id, (unsigned long long)sector);
1720 }
51624585 1721 return NULL;
b411b363
PR
1722}
1723
4a76b161 1724static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1725{
4a76b161 1726 struct drbd_conf *mdev;
b411b363
PR
1727 struct drbd_request *req;
1728 sector_t sector;
82bc0194 1729 int err;
e658983a 1730 struct p_data *p = pi->data;
4a76b161
AG
1731
1732 mdev = vnr_to_mdev(tconn, pi->vnr);
1733 if (!mdev)
1734 return -EIO;
b411b363
PR
1735
1736 sector = be64_to_cpu(p->sector);
1737
87eeee41 1738 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1739 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1740 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1741 if (unlikely(!req))
82bc0194 1742 return -EIO;
b411b363 1743
24c4830c 1744 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1745 * special casing it there for the various failure cases.
1746 * still no race with drbd_fail_pending_reads */
e2857216 1747 err = recv_dless_read(mdev, req, sector, pi->size);
82bc0194 1748 if (!err)
8554df1c 1749 req_mod(req, DATA_RECEIVED);
b411b363
PR
1750 /* else: nothing. handled from drbd_disconnect...
1751 * I don't think we may complete this just yet
1752 * in case we are "on-disconnect: freeze" */
1753
82bc0194 1754 return err;
b411b363
PR
1755}
1756
4a76b161 1757static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1758{
4a76b161 1759 struct drbd_conf *mdev;
b411b363 1760 sector_t sector;
82bc0194 1761 int err;
e658983a 1762 struct p_data *p = pi->data;
4a76b161
AG
1763
1764 mdev = vnr_to_mdev(tconn, pi->vnr);
1765 if (!mdev)
1766 return -EIO;
b411b363
PR
1767
1768 sector = be64_to_cpu(p->sector);
1769 D_ASSERT(p->block_id == ID_SYNCER);
1770
1771 if (get_ldev(mdev)) {
1772 /* data is submitted to disk within recv_resync_read.
1773 * corresponding put_ldev done below on error,
fcefa62e 1774 * or in drbd_peer_request_endio. */
e2857216 1775 err = recv_resync_read(mdev, sector, pi->size);
b411b363
PR
1776 } else {
1777 if (__ratelimit(&drbd_ratelimit_state))
1778 dev_err(DEV, "Can not write resync data to local disk.\n");
1779
e2857216 1780 err = drbd_drain_block(mdev, pi->size);
b411b363 1781
e2857216 1782 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
b411b363
PR
1783 }
1784
e2857216 1785 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
778f271d 1786
82bc0194 1787 return err;
b411b363
PR
1788}
1789
7be8da07
AG
1790static void restart_conflicting_writes(struct drbd_conf *mdev,
1791 sector_t sector, int size)
b411b363 1792{
7be8da07
AG
1793 struct drbd_interval *i;
1794 struct drbd_request *req;
1795
1796 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1797 if (!i->local)
1798 continue;
1799 req = container_of(i, struct drbd_request, i);
1800 if (req->rq_state & RQ_LOCAL_PENDING ||
1801 !(req->rq_state & RQ_POSTPONED))
1802 continue;
2312f0b3
LE
1803 /* as it is RQ_POSTPONED, this will cause it to
1804 * be queued on the retry workqueue. */
d4dabbe2 1805 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
1806 }
1807}
b411b363 1808
a990be46
AG
1809/*
1810 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
b411b363 1811 */
99920dc5 1812static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1813{
8050e6d0
AG
1814 struct drbd_peer_request *peer_req =
1815 container_of(w, struct drbd_peer_request, w);
00d56944 1816 struct drbd_conf *mdev = w->mdev;
db830c46 1817 sector_t sector = peer_req->i.sector;
99920dc5 1818 int err = 0, pcmd;
b411b363 1819
303d1448 1820 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 1821 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1822 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1823 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1824 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1825 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1826 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1827 if (pcmd == P_RS_WRITE_ACK)
db830c46 1828 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1829 } else {
99920dc5 1830 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1831 /* we expect it to be marked out of sync anyways...
1832 * maybe assert this? */
1833 }
1834 dec_unacked(mdev);
1835 }
1836 /* we delete from the conflict detection hash _after_ we sent out the
1837 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 1838 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
87eeee41 1839 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1840 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1841 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1842 if (peer_req->flags & EE_RESTART_REQUESTS)
1843 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1844 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1845 } else
db830c46 1846 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1847
1e9dd291 1848 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1849
99920dc5 1850 return err;
b411b363
PR
1851}
1852
7be8da07 1853static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1854{
7be8da07 1855 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1856 struct drbd_peer_request *peer_req =
1857 container_of(w, struct drbd_peer_request, w);
99920dc5 1858 int err;
b411b363 1859
99920dc5 1860 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1861 dec_unacked(mdev);
1862
99920dc5 1863 return err;
b411b363
PR
1864}
1865
d4dabbe2 1866static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 1867{
d4dabbe2 1868 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
1869}
1870
99920dc5 1871static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1872{
1873 struct drbd_tconn *tconn = w->mdev->tconn;
1874
1875 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
d4dabbe2 1876 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 1877}
b411b363 1878
3e394da1
AG
1879static bool seq_greater(u32 a, u32 b)
1880{
1881 /*
1882 * We assume 32-bit wrap-around here.
1883 * For 24-bit wrap-around, we would have to shift:
1884 * a <<= 8; b <<= 8;
1885 */
1886 return (s32)a - (s32)b > 0;
1887}
b411b363 1888
3e394da1
AG
1889static u32 seq_max(u32 a, u32 b)
1890{
1891 return seq_greater(a, b) ? a : b;
b411b363
PR
1892}
1893
43ae077d 1894static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1895{
3c13b680 1896 unsigned int newest_peer_seq;
3e394da1 1897
b874d231 1898 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
7be8da07 1899 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1900 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1901 mdev->peer_seq = newest_peer_seq;
7be8da07 1902 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1903 /* wake up only if we actually changed mdev->peer_seq */
1904 if (peer_seq == newest_peer_seq)
7be8da07
AG
1905 wake_up(&mdev->seq_wait);
1906 }
b411b363
PR
1907}
1908
d93f6302 1909static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 1910{
d93f6302
LE
1911 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1912}
b6a370ba 1913
d93f6302 1914/* maybe change sync_ee into interval trees as well? */
3ea35df8 1915static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
d93f6302
LE
1916{
1917 struct drbd_peer_request *rs_req;
b6a370ba
PR
1918 bool rv = 0;
1919
d93f6302
LE
1920 spin_lock_irq(&mdev->tconn->req_lock);
1921 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1922 if (overlaps(peer_req->i.sector, peer_req->i.size,
1923 rs_req->i.sector, rs_req->i.size)) {
b6a370ba
PR
1924 rv = 1;
1925 break;
1926 }
1927 }
d93f6302 1928 spin_unlock_irq(&mdev->tconn->req_lock);
b6a370ba
PR
1929
1930 return rv;
1931}
1932
b411b363
PR
1933/* Called from receive_Data.
1934 * Synchronize packets on sock with packets on msock.
1935 *
1936 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1937 * packet traveling on msock, they are still processed in the order they have
1938 * been sent.
1939 *
1940 * Note: we don't care for Ack packets overtaking P_DATA packets.
1941 *
1942 * In case packet_seq is larger than mdev->peer_seq number, there are
1943 * outstanding packets on the msock. We wait for them to arrive.
1944 * In case we are the logically next packet, we update mdev->peer_seq
1945 * ourselves. Correctly handles 32bit wrap around.
1946 *
1947 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1948 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1949 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1950 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1951 *
1952 * returns 0 if we may process the packet,
1953 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1954static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1955{
1956 DEFINE_WAIT(wait);
b411b363 1957 long timeout;
b874d231 1958 int ret = 0, tp;
7be8da07 1959
b874d231 1960 if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
7be8da07
AG
1961 return 0;
1962
b411b363
PR
1963 spin_lock(&mdev->peer_seq_lock);
1964 for (;;) {
7be8da07
AG
1965 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1966 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
b411b363 1967 break;
7be8da07 1968 }
b874d231 1969
b411b363
PR
1970 if (signal_pending(current)) {
1971 ret = -ERESTARTSYS;
1972 break;
1973 }
b874d231
PR
1974
1975 rcu_read_lock();
1976 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1977 rcu_read_unlock();
1978
1979 if (!tp)
1980 break;
1981
1982 /* Only need to wait if two_primaries is enabled */
7be8da07 1983 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1984 spin_unlock(&mdev->peer_seq_lock);
44ed167d
PR
1985 rcu_read_lock();
1986 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1987 rcu_read_unlock();
71b1c1eb 1988 timeout = schedule_timeout(timeout);
b411b363 1989 spin_lock(&mdev->peer_seq_lock);
7be8da07 1990 if (!timeout) {
b411b363 1991 ret = -ETIMEDOUT;
71b1c1eb 1992 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
1993 break;
1994 }
1995 }
b411b363 1996 spin_unlock(&mdev->peer_seq_lock);
7be8da07 1997 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
1998 return ret;
1999}
2000
688593c5
LE
2001/* see also bio_flags_to_wire()
2002 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2003 * flags and back. We may replicate to other kernel versions. */
2004static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 2005{
688593c5
LE
2006 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2007 (dpf & DP_FUA ? REQ_FUA : 0) |
2008 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2009 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
2010}
2011
7be8da07
AG
2012static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2013 unsigned int size)
2014{
2015 struct drbd_interval *i;
2016
2017 repeat:
2018 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2019 struct drbd_request *req;
2020 struct bio_and_error m;
2021
2022 if (!i->local)
2023 continue;
2024 req = container_of(i, struct drbd_request, i);
2025 if (!(req->rq_state & RQ_POSTPONED))
2026 continue;
2027 req->rq_state &= ~RQ_POSTPONED;
2028 __req_mod(req, NEG_ACKED, &m);
2029 spin_unlock_irq(&mdev->tconn->req_lock);
2030 if (m.bio)
2031 complete_master_bio(mdev, &m);
2032 spin_lock_irq(&mdev->tconn->req_lock);
2033 goto repeat;
2034 }
2035}
2036
2037static int handle_write_conflicts(struct drbd_conf *mdev,
2038 struct drbd_peer_request *peer_req)
2039{
2040 struct drbd_tconn *tconn = mdev->tconn;
427c0434 2041 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07
AG
2042 sector_t sector = peer_req->i.sector;
2043 const unsigned int size = peer_req->i.size;
2044 struct drbd_interval *i;
2045 bool equal;
2046 int err;
2047
2048 /*
2049 * Inserting the peer request into the write_requests tree will prevent
2050 * new conflicting local requests from being added.
2051 */
2052 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2053
2054 repeat:
2055 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2056 if (i == &peer_req->i)
2057 continue;
2058
2059 if (!i->local) {
2060 /*
2061 * Our peer has sent a conflicting remote request; this
2062 * should not happen in a two-node setup. Wait for the
2063 * earlier peer request to complete.
2064 */
2065 err = drbd_wait_misc(mdev, i);
2066 if (err)
2067 goto out;
2068 goto repeat;
2069 }
2070
2071 equal = i->sector == sector && i->size == size;
2072 if (resolve_conflicts) {
2073 /*
2074 * If the peer request is fully contained within the
d4dabbe2
LE
2075 * overlapping request, it can be considered overwritten
2076 * and thus superseded; otherwise, it will be retried
2077 * once all overlapping requests have completed.
7be8da07 2078 */
d4dabbe2 2079 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2080 (i->size >> 9) >= sector + (size >> 9);
2081
2082 if (!equal)
2083 dev_alert(DEV, "Concurrent writes detected: "
2084 "local=%llus +%u, remote=%llus +%u, "
2085 "assuming %s came first\n",
2086 (unsigned long long)i->sector, i->size,
2087 (unsigned long long)sector, size,
d4dabbe2 2088 superseded ? "local" : "remote");
7be8da07
AG
2089
2090 inc_unacked(mdev);
d4dabbe2 2091 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07
AG
2092 e_send_retry_write;
2093 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2094 wake_asender(mdev->tconn);
2095
2096 err = -ENOENT;
2097 goto out;
2098 } else {
2099 struct drbd_request *req =
2100 container_of(i, struct drbd_request, i);
2101
2102 if (!equal)
2103 dev_alert(DEV, "Concurrent writes detected: "
2104 "local=%llus +%u, remote=%llus +%u\n",
2105 (unsigned long long)i->sector, i->size,
2106 (unsigned long long)sector, size);
2107
2108 if (req->rq_state & RQ_LOCAL_PENDING ||
2109 !(req->rq_state & RQ_POSTPONED)) {
2110 /*
2111 * Wait for the node with the discard flag to
d4dabbe2
LE
2112 * decide if this request has been superseded
2113 * or needs to be retried.
2114 * Requests that have been superseded will
7be8da07
AG
2115 * disappear from the write_requests tree.
2116 *
2117 * In addition, wait for the conflicting
2118 * request to finish locally before submitting
2119 * the conflicting peer request.
2120 */
2121 err = drbd_wait_misc(mdev, &req->i);
2122 if (err) {
2123 _conn_request_state(mdev->tconn,
2124 NS(conn, C_TIMEOUT),
2125 CS_HARD);
2126 fail_postponed_requests(mdev, sector, size);
2127 goto out;
2128 }
2129 goto repeat;
2130 }
2131 /*
2132 * Remember to restart the conflicting requests after
2133 * the new peer request has completed.
2134 */
2135 peer_req->flags |= EE_RESTART_REQUESTS;
2136 }
2137 }
2138 err = 0;
2139
2140 out:
2141 if (err)
2142 drbd_remove_epoch_entry_interval(mdev, peer_req);
2143 return err;
2144}
2145
b411b363 2146/* mirrored write */
4a76b161 2147static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2148{
4a76b161 2149 struct drbd_conf *mdev;
b411b363 2150 sector_t sector;
db830c46 2151 struct drbd_peer_request *peer_req;
e658983a 2152 struct p_data *p = pi->data;
7be8da07 2153 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
2154 int rw = WRITE;
2155 u32 dp_flags;
302bdeae 2156 int err, tp;
b411b363 2157
4a76b161
AG
2158 mdev = vnr_to_mdev(tconn, pi->vnr);
2159 if (!mdev)
2160 return -EIO;
b411b363 2161
7be8da07 2162 if (!get_ldev(mdev)) {
82bc0194
AG
2163 int err2;
2164
7be8da07 2165 err = wait_for_and_update_peer_seq(mdev, peer_seq);
e2857216 2166 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
12038a3a 2167 atomic_inc(&tconn->current_epoch->epoch_size);
e2857216 2168 err2 = drbd_drain_block(mdev, pi->size);
82bc0194
AG
2169 if (!err)
2170 err = err2;
2171 return err;
b411b363
PR
2172 }
2173
fcefa62e
AG
2174 /*
2175 * Corresponding put_ldev done either below (on various errors), or in
2176 * drbd_peer_request_endio, if we successfully submit the data at the
2177 * end of this function.
2178 */
b411b363
PR
2179
2180 sector = be64_to_cpu(p->sector);
e2857216 2181 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
db830c46 2182 if (!peer_req) {
b411b363 2183 put_ldev(mdev);
82bc0194 2184 return -EIO;
b411b363
PR
2185 }
2186
db830c46 2187 peer_req->w.cb = e_end_block;
b411b363 2188
688593c5
LE
2189 dp_flags = be32_to_cpu(p->dp_flags);
2190 rw |= wire_flags_to_bio(mdev, dp_flags);
81a3537a
LE
2191 if (peer_req->pages == NULL) {
2192 D_ASSERT(peer_req->i.size == 0);
a73ff323
LE
2193 D_ASSERT(dp_flags & DP_FLUSH);
2194 }
688593c5
LE
2195
2196 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2197 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2198
12038a3a
PR
2199 spin_lock(&tconn->epoch_lock);
2200 peer_req->epoch = tconn->current_epoch;
db830c46
AG
2201 atomic_inc(&peer_req->epoch->epoch_size);
2202 atomic_inc(&peer_req->epoch->active);
12038a3a 2203 spin_unlock(&tconn->epoch_lock);
b411b363 2204
302bdeae
PR
2205 rcu_read_lock();
2206 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2207 rcu_read_unlock();
2208 if (tp) {
2209 peer_req->flags |= EE_IN_INTERVAL_TREE;
7be8da07
AG
2210 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2211 if (err)
b411b363 2212 goto out_interrupted;
87eeee41 2213 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2214 err = handle_write_conflicts(mdev, peer_req);
2215 if (err) {
2216 spin_unlock_irq(&mdev->tconn->req_lock);
2217 if (err == -ENOENT) {
b411b363 2218 put_ldev(mdev);
82bc0194 2219 return 0;
b411b363 2220 }
7be8da07 2221 goto out_interrupted;
b411b363 2222 }
b874d231
PR
2223 } else {
2224 update_peer_seq(mdev, peer_seq);
7be8da07 2225 spin_lock_irq(&mdev->tconn->req_lock);
b874d231 2226 }
db830c46 2227 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2228 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2229
d93f6302 2230 if (mdev->state.conn == C_SYNC_TARGET)
3ea35df8 2231 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
b411b363 2232
303d1448 2233 if (mdev->tconn->agreed_pro_version < 100) {
44ed167d
PR
2234 rcu_read_lock();
2235 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
303d1448
PR
2236 case DRBD_PROT_C:
2237 dp_flags |= DP_SEND_WRITE_ACK;
2238 break;
2239 case DRBD_PROT_B:
2240 dp_flags |= DP_SEND_RECEIVE_ACK;
2241 break;
b411b363 2242 }
44ed167d 2243 rcu_read_unlock();
b411b363
PR
2244 }
2245
303d1448
PR
2246 if (dp_flags & DP_SEND_WRITE_ACK) {
2247 peer_req->flags |= EE_SEND_WRITE_ACK;
b411b363
PR
2248 inc_unacked(mdev);
2249 /* corresponding dec_unacked() in e_end_block()
2250 * respective _drbd_clear_done_ee */
303d1448
PR
2251 }
2252
2253 if (dp_flags & DP_SEND_RECEIVE_ACK) {
b411b363
PR
2254 /* I really don't like it that the receiver thread
2255 * sends on the msock, but anyways */
db830c46 2256 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2257 }
2258
6719fb03 2259 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2260 /* In case we have the only disk of the cluster, */
db830c46
AG
2261 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2262 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2263 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
56392d2f 2264 drbd_al_begin_io(mdev, &peer_req->i, true);
b411b363
PR
2265 }
2266
82bc0194
AG
2267 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2268 if (!err)
2269 return 0;
b411b363 2270
10f6d992
LE
2271 /* don't care for the reason here */
2272 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2273 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2274 list_del(&peer_req->w.list);
2275 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2276 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46 2277 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
181286ad 2278 drbd_al_complete_io(mdev, &peer_req->i);
22cc37a9 2279
b411b363 2280out_interrupted:
1e9dd291 2281 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2282 put_ldev(mdev);
3967deb1 2283 drbd_free_peer_req(mdev, peer_req);
82bc0194 2284 return err;
b411b363
PR
2285}
2286
0f0601f4
LE
2287/* We may throttle resync, if the lower device seems to be busy,
2288 * and current sync rate is above c_min_rate.
2289 *
2290 * To decide whether or not the lower device is busy, we use a scheme similar
2291 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2292 * (more than 64 sectors) of activity we cannot account for with our own resync
2293 * activity, it obviously is "busy".
2294 *
2295 * The current sync rate used here uses only the most recent two step marks,
2296 * to have a short time average so we can react faster.
2297 */
e3555d85 2298int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2299{
2300 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2301 unsigned long db, dt, dbdt;
e3555d85 2302 struct lc_element *tmp;
0f0601f4
LE
2303 int curr_events;
2304 int throttle = 0;
daeda1cc
PR
2305 unsigned int c_min_rate;
2306
2307 rcu_read_lock();
2308 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2309 rcu_read_unlock();
0f0601f4
LE
2310
2311 /* feature disabled? */
daeda1cc 2312 if (c_min_rate == 0)
0f0601f4
LE
2313 return 0;
2314
e3555d85
PR
2315 spin_lock_irq(&mdev->al_lock);
2316 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2317 if (tmp) {
2318 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2319 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2320 spin_unlock_irq(&mdev->al_lock);
2321 return 0;
2322 }
2323 /* Do not slow down if app IO is already waiting for this extent */
2324 }
2325 spin_unlock_irq(&mdev->al_lock);
2326
0f0601f4
LE
2327 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2328 (int)part_stat_read(&disk->part0, sectors[1]) -
2329 atomic_read(&mdev->rs_sect_ev);
e3555d85 2330
0f0601f4
LE
2331 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2332 unsigned long rs_left;
2333 int i;
2334
2335 mdev->rs_last_events = curr_events;
2336
2337 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2338 * approx. */
2649f080
LE
2339 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2340
2341 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2342 rs_left = mdev->ov_left;
2343 else
2344 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2345
2346 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2347 if (!dt)
2348 dt++;
2349 db = mdev->rs_mark_left[i] - rs_left;
2350 dbdt = Bit2KB(db/dt);
2351
daeda1cc 2352 if (dbdt > c_min_rate)
0f0601f4
LE
2353 throttle = 1;
2354 }
2355 return throttle;
2356}
2357
2358
4a76b161 2359static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2360{
4a76b161 2361 struct drbd_conf *mdev;
b411b363 2362 sector_t sector;
4a76b161 2363 sector_t capacity;
db830c46 2364 struct drbd_peer_request *peer_req;
b411b363 2365 struct digest_info *di = NULL;
b18b37be 2366 int size, verb;
b411b363 2367 unsigned int fault_type;
e658983a 2368 struct p_block_req *p = pi->data;
4a76b161
AG
2369
2370 mdev = vnr_to_mdev(tconn, pi->vnr);
2371 if (!mdev)
2372 return -EIO;
2373 capacity = drbd_get_capacity(mdev->this_bdev);
b411b363
PR
2374
2375 sector = be64_to_cpu(p->sector);
2376 size = be32_to_cpu(p->blksize);
2377
c670a398 2378 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2379 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2380 (unsigned long long)sector, size);
82bc0194 2381 return -EINVAL;
b411b363
PR
2382 }
2383 if (sector + (size>>9) > capacity) {
2384 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2385 (unsigned long long)sector, size);
82bc0194 2386 return -EINVAL;
b411b363
PR
2387 }
2388
2389 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be 2390 verb = 1;
e2857216 2391 switch (pi->cmd) {
b18b37be
PR
2392 case P_DATA_REQUEST:
2393 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2394 break;
2395 case P_RS_DATA_REQUEST:
2396 case P_CSUM_RS_REQUEST:
2397 case P_OV_REQUEST:
2398 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2399 break;
2400 case P_OV_REPLY:
2401 verb = 0;
2402 dec_rs_pending(mdev);
2403 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2404 break;
2405 default:
49ba9b1b 2406 BUG();
b18b37be
PR
2407 }
2408 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2409 dev_err(DEV, "Can not satisfy peer's read request, "
2410 "no local data.\n");
b18b37be 2411
a821cc4a 2412 /* drain possibly payload */
e2857216 2413 return drbd_drain_block(mdev, pi->size);
b411b363
PR
2414 }
2415
2416 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2417 * "criss-cross" setup, that might cause write-out on some other DRBD,
2418 * which in turn might block on the other node at this very place. */
0db55363 2419 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
db830c46 2420 if (!peer_req) {
b411b363 2421 put_ldev(mdev);
82bc0194 2422 return -ENOMEM;
b411b363
PR
2423 }
2424
e2857216 2425 switch (pi->cmd) {
b411b363 2426 case P_DATA_REQUEST:
db830c46 2427 peer_req->w.cb = w_e_end_data_req;
b411b363 2428 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2429 /* application IO, don't drbd_rs_begin_io */
2430 goto submit;
2431
b411b363 2432 case P_RS_DATA_REQUEST:
db830c46 2433 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2434 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2435 /* used in the sector offset progress display */
2436 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2437 break;
2438
2439 case P_OV_REPLY:
2440 case P_CSUM_RS_REQUEST:
2441 fault_type = DRBD_FAULT_RS_RD;
e2857216 2442 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2443 if (!di)
2444 goto out_free_e;
2445
e2857216 2446 di->digest_size = pi->size;
b411b363
PR
2447 di->digest = (((char *)di)+sizeof(struct digest_info));
2448
db830c46
AG
2449 peer_req->digest = di;
2450 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2451
e2857216 2452 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
b411b363
PR
2453 goto out_free_e;
2454
e2857216 2455 if (pi->cmd == P_CSUM_RS_REQUEST) {
31890f4a 2456 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2457 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2458 /* used in the sector offset progress display */
2459 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
e2857216 2460 } else if (pi->cmd == P_OV_REPLY) {
2649f080
LE
2461 /* track progress, we may need to throttle */
2462 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2463 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2464 dec_rs_pending(mdev);
0f0601f4
LE
2465 /* drbd_rs_begin_io done when we sent this request,
2466 * but accounting still needs to be done. */
2467 goto submit_for_resync;
b411b363
PR
2468 }
2469 break;
2470
2471 case P_OV_REQUEST:
b411b363 2472 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2473 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2474 unsigned long now = jiffies;
2475 int i;
b411b363
PR
2476 mdev->ov_start_sector = sector;
2477 mdev->ov_position = sector;
30b743a2
LE
2478 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2479 mdev->rs_total = mdev->ov_left;
de228bba
LE
2480 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2481 mdev->rs_mark_left[i] = mdev->ov_left;
2482 mdev->rs_mark_time[i] = now;
2483 }
b411b363
PR
2484 dev_info(DEV, "Online Verify start sector: %llu\n",
2485 (unsigned long long)sector);
2486 }
db830c46 2487 peer_req->w.cb = w_e_end_ov_req;
b411b363 2488 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2489 break;
2490
b411b363 2491 default:
49ba9b1b 2492 BUG();
b411b363
PR
2493 }
2494
0f0601f4
LE
2495 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2496 * wrt the receiver, but it is not as straightforward as it may seem.
2497 * Various places in the resync start and stop logic assume resync
2498 * requests are processed in order, requeuing this on the worker thread
2499 * introduces a bunch of new code for synchronization between threads.
2500 *
2501 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2502 * "forever", throttling after drbd_rs_begin_io will lock that extent
2503 * for application writes for the same time. For now, just throttle
2504 * here, where the rest of the code expects the receiver to sleep for
2505 * a while, anyways.
2506 */
2507
2508 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2509 * this defers syncer requests for some time, before letting at least
2510 * on request through. The resync controller on the receiving side
2511 * will adapt to the incoming rate accordingly.
2512 *
2513 * We cannot throttle here if remote is Primary/SyncTarget:
2514 * we would also throttle its application reads.
2515 * In that case, throttling is done on the SyncTarget only.
2516 */
e3555d85
PR
2517 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2518 schedule_timeout_uninterruptible(HZ/10);
2519 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2520 goto out_free_e;
b411b363 2521
0f0601f4
LE
2522submit_for_resync:
2523 atomic_add(size >> 9, &mdev->rs_sect_ev);
2524
80a40e43 2525submit:
b411b363 2526 inc_unacked(mdev);
87eeee41 2527 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2528 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2529 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2530
fbe29dec 2531 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2532 return 0;
b411b363 2533
10f6d992
LE
2534 /* don't care for the reason here */
2535 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2536 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2537 list_del(&peer_req->w.list);
87eeee41 2538 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2539 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2540
b411b363 2541out_free_e:
b411b363 2542 put_ldev(mdev);
3967deb1 2543 drbd_free_peer_req(mdev, peer_req);
82bc0194 2544 return -EIO;
b411b363
PR
2545}
2546
2547static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2548{
2549 int self, peer, rv = -100;
2550 unsigned long ch_self, ch_peer;
44ed167d 2551 enum drbd_after_sb_p after_sb_0p;
b411b363
PR
2552
2553 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2554 peer = mdev->p_uuid[UI_BITMAP] & 1;
2555
2556 ch_peer = mdev->p_uuid[UI_SIZE];
2557 ch_self = mdev->comm_bm_set;
2558
44ed167d
PR
2559 rcu_read_lock();
2560 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2561 rcu_read_unlock();
2562 switch (after_sb_0p) {
b411b363
PR
2563 case ASB_CONSENSUS:
2564 case ASB_DISCARD_SECONDARY:
2565 case ASB_CALL_HELPER:
44ed167d 2566 case ASB_VIOLENTLY:
b411b363
PR
2567 dev_err(DEV, "Configuration error.\n");
2568 break;
2569 case ASB_DISCONNECT:
2570 break;
2571 case ASB_DISCARD_YOUNGER_PRI:
2572 if (self == 0 && peer == 1) {
2573 rv = -1;
2574 break;
2575 }
2576 if (self == 1 && peer == 0) {
2577 rv = 1;
2578 break;
2579 }
2580 /* Else fall through to one of the other strategies... */
2581 case ASB_DISCARD_OLDER_PRI:
2582 if (self == 0 && peer == 1) {
2583 rv = 1;
2584 break;
2585 }
2586 if (self == 1 && peer == 0) {
2587 rv = -1;
2588 break;
2589 }
2590 /* Else fall through to one of the other strategies... */
ad19bf6e 2591 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2592 "Using discard-least-changes instead\n");
2593 case ASB_DISCARD_ZERO_CHG:
2594 if (ch_peer == 0 && ch_self == 0) {
427c0434 2595 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2596 ? -1 : 1;
2597 break;
2598 } else {
2599 if (ch_peer == 0) { rv = 1; break; }
2600 if (ch_self == 0) { rv = -1; break; }
2601 }
44ed167d 2602 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2603 break;
2604 case ASB_DISCARD_LEAST_CHG:
2605 if (ch_self < ch_peer)
2606 rv = -1;
2607 else if (ch_self > ch_peer)
2608 rv = 1;
2609 else /* ( ch_self == ch_peer ) */
2610 /* Well, then use something else. */
427c0434 2611 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2612 ? -1 : 1;
2613 break;
2614 case ASB_DISCARD_LOCAL:
2615 rv = -1;
2616 break;
2617 case ASB_DISCARD_REMOTE:
2618 rv = 1;
2619 }
2620
2621 return rv;
2622}
2623
2624static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2625{
6184ea21 2626 int hg, rv = -100;
44ed167d 2627 enum drbd_after_sb_p after_sb_1p;
b411b363 2628
44ed167d
PR
2629 rcu_read_lock();
2630 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2631 rcu_read_unlock();
2632 switch (after_sb_1p) {
b411b363
PR
2633 case ASB_DISCARD_YOUNGER_PRI:
2634 case ASB_DISCARD_OLDER_PRI:
2635 case ASB_DISCARD_LEAST_CHG:
2636 case ASB_DISCARD_LOCAL:
2637 case ASB_DISCARD_REMOTE:
44ed167d 2638 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2639 dev_err(DEV, "Configuration error.\n");
2640 break;
2641 case ASB_DISCONNECT:
2642 break;
2643 case ASB_CONSENSUS:
2644 hg = drbd_asb_recover_0p(mdev);
2645 if (hg == -1 && mdev->state.role == R_SECONDARY)
2646 rv = hg;
2647 if (hg == 1 && mdev->state.role == R_PRIMARY)
2648 rv = hg;
2649 break;
2650 case ASB_VIOLENTLY:
2651 rv = drbd_asb_recover_0p(mdev);
2652 break;
2653 case ASB_DISCARD_SECONDARY:
2654 return mdev->state.role == R_PRIMARY ? 1 : -1;
2655 case ASB_CALL_HELPER:
2656 hg = drbd_asb_recover_0p(mdev);
2657 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2658 enum drbd_state_rv rv2;
2659
b411b363
PR
2660 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2661 * we might be here in C_WF_REPORT_PARAMS which is transient.
2662 * we do not need to wait for the after state change work either. */
bb437946
AG
2663 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2664 if (rv2 != SS_SUCCESS) {
b411b363
PR
2665 drbd_khelper(mdev, "pri-lost-after-sb");
2666 } else {
2667 dev_warn(DEV, "Successfully gave up primary role.\n");
2668 rv = hg;
2669 }
2670 } else
2671 rv = hg;
2672 }
2673
2674 return rv;
2675}
2676
2677static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2678{
6184ea21 2679 int hg, rv = -100;
44ed167d 2680 enum drbd_after_sb_p after_sb_2p;
b411b363 2681
44ed167d
PR
2682 rcu_read_lock();
2683 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2684 rcu_read_unlock();
2685 switch (after_sb_2p) {
b411b363
PR
2686 case ASB_DISCARD_YOUNGER_PRI:
2687 case ASB_DISCARD_OLDER_PRI:
2688 case ASB_DISCARD_LEAST_CHG:
2689 case ASB_DISCARD_LOCAL:
2690 case ASB_DISCARD_REMOTE:
2691 case ASB_CONSENSUS:
2692 case ASB_DISCARD_SECONDARY:
44ed167d 2693 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2694 dev_err(DEV, "Configuration error.\n");
2695 break;
2696 case ASB_VIOLENTLY:
2697 rv = drbd_asb_recover_0p(mdev);
2698 break;
2699 case ASB_DISCONNECT:
2700 break;
2701 case ASB_CALL_HELPER:
2702 hg = drbd_asb_recover_0p(mdev);
2703 if (hg == -1) {
bb437946
AG
2704 enum drbd_state_rv rv2;
2705
b411b363
PR
2706 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2707 * we might be here in C_WF_REPORT_PARAMS which is transient.
2708 * we do not need to wait for the after state change work either. */
bb437946
AG
2709 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2710 if (rv2 != SS_SUCCESS) {
b411b363
PR
2711 drbd_khelper(mdev, "pri-lost-after-sb");
2712 } else {
2713 dev_warn(DEV, "Successfully gave up primary role.\n");
2714 rv = hg;
2715 }
2716 } else
2717 rv = hg;
2718 }
2719
2720 return rv;
2721}
2722
2723static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2724 u64 bits, u64 flags)
2725{
2726 if (!uuid) {
2727 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2728 return;
2729 }
2730 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2731 text,
2732 (unsigned long long)uuid[UI_CURRENT],
2733 (unsigned long long)uuid[UI_BITMAP],
2734 (unsigned long long)uuid[UI_HISTORY_START],
2735 (unsigned long long)uuid[UI_HISTORY_END],
2736 (unsigned long long)bits,
2737 (unsigned long long)flags);
2738}
2739
2740/*
2741 100 after split brain try auto recover
2742 2 C_SYNC_SOURCE set BitMap
2743 1 C_SYNC_SOURCE use BitMap
2744 0 no Sync
2745 -1 C_SYNC_TARGET use BitMap
2746 -2 C_SYNC_TARGET set BitMap
2747 -100 after split brain, disconnect
2748-1000 unrelated data
4a23f264
PR
2749-1091 requires proto 91
2750-1096 requires proto 96
b411b363
PR
2751 */
2752static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2753{
2754 u64 self, peer;
2755 int i, j;
2756
2757 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2758 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2759
2760 *rule_nr = 10;
2761 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2762 return 0;
2763
2764 *rule_nr = 20;
2765 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2766 peer != UUID_JUST_CREATED)
2767 return -2;
2768
2769 *rule_nr = 30;
2770 if (self != UUID_JUST_CREATED &&
2771 (peer == UUID_JUST_CREATED || peer == (u64)0))
2772 return 2;
2773
2774 if (self == peer) {
2775 int rct, dc; /* roles at crash time */
2776
2777 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2778
31890f4a 2779 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2780 return -1091;
b411b363
PR
2781
2782 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2783 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2784 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
9f2247bb
PR
2785 drbd_uuid_move_history(mdev);
2786 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2787 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363
PR
2788
2789 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2790 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2791 *rule_nr = 34;
2792 } else {
2793 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2794 *rule_nr = 36;
2795 }
2796
2797 return 1;
2798 }
2799
2800 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2801
31890f4a 2802 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2803 return -1091;
b411b363
PR
2804
2805 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2806 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2807 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2808
2809 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2810 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2811 mdev->p_uuid[UI_BITMAP] = 0UL;
2812
2813 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2814 *rule_nr = 35;
2815 } else {
2816 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2817 *rule_nr = 37;
2818 }
2819
2820 return -1;
2821 }
2822
2823 /* Common power [off|failure] */
2824 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2825 (mdev->p_uuid[UI_FLAGS] & 2);
2826 /* lowest bit is set when we were primary,
2827 * next bit (weight 2) is set when peer was primary */
2828 *rule_nr = 40;
2829
2830 switch (rct) {
2831 case 0: /* !self_pri && !peer_pri */ return 0;
2832 case 1: /* self_pri && !peer_pri */ return 1;
2833 case 2: /* !self_pri && peer_pri */ return -1;
2834 case 3: /* self_pri && peer_pri */
427c0434 2835 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2836 return dc ? -1 : 1;
2837 }
2838 }
2839
2840 *rule_nr = 50;
2841 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2842 if (self == peer)
2843 return -1;
2844
2845 *rule_nr = 51;
2846 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2847 if (self == peer) {
31890f4a 2848 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2849 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2850 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2851 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2852 /* The last P_SYNC_UUID did not get though. Undo the last start of
2853 resync as sync source modifications of the peer's UUIDs. */
2854
31890f4a 2855 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2856 return -1091;
b411b363
PR
2857
2858 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2859 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264 2860
92b4ca29 2861 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
4a23f264
PR
2862 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2863
b411b363
PR
2864 return -1;
2865 }
2866 }
2867
2868 *rule_nr = 60;
2869 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2870 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2871 peer = mdev->p_uuid[i] & ~((u64)1);
2872 if (self == peer)
2873 return -2;
2874 }
2875
2876 *rule_nr = 70;
2877 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2878 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2879 if (self == peer)
2880 return 1;
2881
2882 *rule_nr = 71;
2883 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2884 if (self == peer) {
31890f4a 2885 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2886 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2887 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2888 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2889 /* The last P_SYNC_UUID did not get though. Undo the last start of
2890 resync as sync source modifications of our UUIDs. */
2891
31890f4a 2892 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2893 return -1091;
b411b363 2894
9f2247bb
PR
2895 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2896 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 2897
4a23f264 2898 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2899 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2900 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2901
2902 return 1;
2903 }
2904 }
2905
2906
2907 *rule_nr = 80;
d8c2a36b 2908 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2909 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2910 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2911 if (self == peer)
2912 return 2;
2913 }
2914
2915 *rule_nr = 90;
2916 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2917 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2918 if (self == peer && self != ((u64)0))
2919 return 100;
2920
2921 *rule_nr = 100;
2922 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2923 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2924 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2925 peer = mdev->p_uuid[j] & ~((u64)1);
2926 if (self == peer)
2927 return -100;
2928 }
2929 }
2930
2931 return -1000;
2932}
2933
2934/* drbd_sync_handshake() returns the new conn state on success, or
2935 CONN_MASK (-1) on failure.
2936 */
2937static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2938 enum drbd_disk_state peer_disk) __must_hold(local)
2939{
b411b363
PR
2940 enum drbd_conns rv = C_MASK;
2941 enum drbd_disk_state mydisk;
44ed167d 2942 struct net_conf *nc;
6dff2902 2943 int hg, rule_nr, rr_conflict, tentative;
b411b363
PR
2944
2945 mydisk = mdev->state.disk;
2946 if (mydisk == D_NEGOTIATING)
2947 mydisk = mdev->new_state_tmp.disk;
2948
2949 dev_info(DEV, "drbd_sync_handshake:\n");
9f2247bb
PR
2950
2951 spin_lock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2952 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2953 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2954 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2955
2956 hg = drbd_uuid_compare(mdev, &rule_nr);
9f2247bb 2957 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2958
2959 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2960
2961 if (hg == -1000) {
2962 dev_alert(DEV, "Unrelated data, aborting!\n");
2963 return C_MASK;
2964 }
4a23f264
PR
2965 if (hg < -1000) {
2966 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2967 return C_MASK;
2968 }
2969
2970 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2971 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2972 int f = (hg == -100) || abs(hg) == 2;
2973 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2974 if (f)
2975 hg = hg*2;
2976 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2977 hg > 0 ? "source" : "target");
2978 }
2979
3a11a487
AG
2980 if (abs(hg) == 100)
2981 drbd_khelper(mdev, "initial-split-brain");
2982
44ed167d
PR
2983 rcu_read_lock();
2984 nc = rcu_dereference(mdev->tconn->net_conf);
2985
2986 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b411b363
PR
2987 int pcount = (mdev->state.role == R_PRIMARY)
2988 + (peer_role == R_PRIMARY);
2989 int forced = (hg == -100);
2990
2991 switch (pcount) {
2992 case 0:
2993 hg = drbd_asb_recover_0p(mdev);
2994 break;
2995 case 1:
2996 hg = drbd_asb_recover_1p(mdev);
2997 break;
2998 case 2:
2999 hg = drbd_asb_recover_2p(mdev);
3000 break;
3001 }
3002 if (abs(hg) < 100) {
3003 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3004 "automatically solved. Sync from %s node\n",
3005 pcount, (hg < 0) ? "peer" : "this");
3006 if (forced) {
3007 dev_warn(DEV, "Doing a full sync, since"
3008 " UUIDs where ambiguous.\n");
3009 hg = hg*2;
3010 }
3011 }
3012 }
3013
3014 if (hg == -100) {
08b165ba 3015 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 3016 hg = -1;
08b165ba 3017 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
3018 hg = 1;
3019
3020 if (abs(hg) < 100)
3021 dev_warn(DEV, "Split-Brain detected, manually solved. "
3022 "Sync from %s node\n",
3023 (hg < 0) ? "peer" : "this");
3024 }
44ed167d 3025 rr_conflict = nc->rr_conflict;
6dff2902 3026 tentative = nc->tentative;
44ed167d 3027 rcu_read_unlock();
b411b363
PR
3028
3029 if (hg == -100) {
580b9767
LE
3030 /* FIXME this log message is not correct if we end up here
3031 * after an attempted attach on a diskless node.
3032 * We just refuse to attach -- well, we drop the "connection"
3033 * to that disk, in a way... */
3a11a487 3034 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
3035 drbd_khelper(mdev, "split-brain");
3036 return C_MASK;
3037 }
3038
3039 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3040 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3041 return C_MASK;
3042 }
3043
3044 if (hg < 0 && /* by intention we do not use mydisk here. */
3045 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
44ed167d 3046 switch (rr_conflict) {
b411b363
PR
3047 case ASB_CALL_HELPER:
3048 drbd_khelper(mdev, "pri-lost");
3049 /* fall through */
3050 case ASB_DISCONNECT:
3051 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3052 return C_MASK;
3053 case ASB_VIOLENTLY:
3054 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3055 "assumption\n");
3056 }
3057 }
3058
6dff2902 3059 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
3060 if (hg == 0)
3061 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3062 else
3063 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3064 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3065 abs(hg) >= 2 ? "full" : "bit-map based");
3066 return C_MASK;
3067 }
3068
b411b363
PR
3069 if (abs(hg) >= 2) {
3070 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
3071 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3072 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3073 return C_MASK;
3074 }
3075
3076 if (hg > 0) { /* become sync source. */
3077 rv = C_WF_BITMAP_S;
3078 } else if (hg < 0) { /* become sync target */
3079 rv = C_WF_BITMAP_T;
3080 } else {
3081 rv = C_CONNECTED;
3082 if (drbd_bm_total_weight(mdev)) {
3083 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3084 drbd_bm_total_weight(mdev));
3085 }
3086 }
3087
3088 return rv;
3089}
3090
f179d76d 3091static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3092{
3093 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3094 if (peer == ASB_DISCARD_REMOTE)
3095 return ASB_DISCARD_LOCAL;
b411b363
PR
3096
3097 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3098 if (peer == ASB_DISCARD_LOCAL)
3099 return ASB_DISCARD_REMOTE;
b411b363
PR
3100
3101 /* everything else is valid if they are equal on both sides. */
f179d76d 3102 return peer;
b411b363
PR
3103}
3104
e2857216 3105static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3106{
e658983a 3107 struct p_protocol *p = pi->data;
036b17ea
PR
3108 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3109 int p_proto, p_discard_my_data, p_two_primaries, cf;
3110 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3111 char integrity_alg[SHARED_SECRET_MAX] = "";
accdbcc5 3112 struct crypto_hash *peer_integrity_tfm = NULL;
7aca6c75 3113 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3114
b411b363
PR
3115 p_proto = be32_to_cpu(p->protocol);
3116 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3117 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3118 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3119 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3120 cf = be32_to_cpu(p->conn_flags);
6139f60d 3121 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3122
86db0618
AG
3123 if (tconn->agreed_pro_version >= 87) {
3124 int err;
cf14c2e9 3125
88104ca4 3126 if (pi->size > sizeof(integrity_alg))
86db0618 3127 return -EIO;
88104ca4 3128 err = drbd_recv_all(tconn, integrity_alg, pi->size);
86db0618
AG
3129 if (err)
3130 return err;
036b17ea 3131 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3132 }
3133
7d4c782c 3134 if (pi->cmd != P_PROTOCOL_UPDATE) {
fbc12f45 3135 clear_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3136
fbc12f45
AG
3137 if (cf & CF_DRY_RUN)
3138 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3139
fbc12f45
AG
3140 rcu_read_lock();
3141 nc = rcu_dereference(tconn->net_conf);
b411b363 3142
fbc12f45 3143 if (p_proto != nc->wire_protocol) {
d505d9be 3144 conn_err(tconn, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3145 goto disconnect_rcu_unlock;
3146 }
b411b363 3147
fbc12f45 3148 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
d505d9be 3149 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3150 goto disconnect_rcu_unlock;
3151 }
b411b363 3152
fbc12f45 3153 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
d505d9be 3154 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3155 goto disconnect_rcu_unlock;
3156 }
b411b363 3157
fbc12f45 3158 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
d505d9be 3159 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3160 goto disconnect_rcu_unlock;
3161 }
b411b363 3162
fbc12f45 3163 if (p_discard_my_data && nc->discard_my_data) {
d505d9be 3164 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3165 goto disconnect_rcu_unlock;
3166 }
b411b363 3167
fbc12f45 3168 if (p_two_primaries != nc->two_primaries) {
d505d9be 3169 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3170 goto disconnect_rcu_unlock;
3171 }
b411b363 3172
fbc12f45 3173 if (strcmp(integrity_alg, nc->integrity_alg)) {
d505d9be 3174 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3175 goto disconnect_rcu_unlock;
3176 }
b411b363 3177
fbc12f45 3178 rcu_read_unlock();
b411b363
PR
3179 }
3180
7d4c782c
AG
3181 if (integrity_alg[0]) {
3182 int hash_size;
3183
3184 /*
3185 * We can only change the peer data integrity algorithm
3186 * here. Changing our own data integrity algorithm
3187 * requires that we send a P_PROTOCOL_UPDATE packet at
3188 * the same time; otherwise, the peer has no way to
3189 * tell between which packets the algorithm should
3190 * change.
3191 */
b411b363 3192
7d4c782c
AG
3193 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3194 if (!peer_integrity_tfm) {
3195 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3196 integrity_alg);
3197 goto disconnect;
3198 }
b411b363 3199
7d4c782c
AG
3200 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3201 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3202 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3203 if (!(int_dig_in && int_dig_vv)) {
3204 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3205 goto disconnect;
3206 }
b411b363
PR
3207 }
3208
7d4c782c
AG
3209 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3210 if (!new_net_conf) {
3211 conn_err(tconn, "Allocation of new net_conf failed\n");
3212 goto disconnect;
3213 }
3214
3215 mutex_lock(&tconn->data.mutex);
3216 mutex_lock(&tconn->conf_update);
3217 old_net_conf = tconn->net_conf;
3218 *new_net_conf = *old_net_conf;
3219
3220 new_net_conf->wire_protocol = p_proto;
3221 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3222 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3223 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3224 new_net_conf->two_primaries = p_two_primaries;
3225
3226 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3227 mutex_unlock(&tconn->conf_update);
3228 mutex_unlock(&tconn->data.mutex);
3229
3230 crypto_free_hash(tconn->peer_integrity_tfm);
3231 kfree(tconn->int_dig_in);
3232 kfree(tconn->int_dig_vv);
3233 tconn->peer_integrity_tfm = peer_integrity_tfm;
3234 tconn->int_dig_in = int_dig_in;
3235 tconn->int_dig_vv = int_dig_vv;
3236
3237 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3238 conn_info(tconn, "peer data-integrity-alg: %s\n",
3239 integrity_alg[0] ? integrity_alg : "(none)");
3240
3241 synchronize_rcu();
3242 kfree(old_net_conf);
82bc0194 3243 return 0;
b411b363 3244
44ed167d
PR
3245disconnect_rcu_unlock:
3246 rcu_read_unlock();
b411b363 3247disconnect:
b792c35c 3248 crypto_free_hash(peer_integrity_tfm);
036b17ea
PR
3249 kfree(int_dig_in);
3250 kfree(int_dig_vv);
7204624c 3251 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3252 return -EIO;
b411b363
PR
3253}
3254
3255/* helper function
3256 * input: alg name, feature name
3257 * return: NULL (alg name was "")
3258 * ERR_PTR(error) if something goes wrong
3259 * or the crypto hash ptr, if it worked out ok. */
3260struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3261 const char *alg, const char *name)
3262{
3263 struct crypto_hash *tfm;
3264
3265 if (!alg[0])
3266 return NULL;
3267
3268 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3269 if (IS_ERR(tfm)) {
3270 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3271 alg, name, PTR_ERR(tfm));
3272 return tfm;
3273 }
b411b363
PR
3274 return tfm;
3275}
3276
4a76b161
AG
3277static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3278{
3279 void *buffer = tconn->data.rbuf;
3280 int size = pi->size;
3281
3282 while (size) {
3283 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3284 s = drbd_recv(tconn, buffer, s);
3285 if (s <= 0) {
3286 if (s < 0)
3287 return s;
3288 break;
3289 }
3290 size -= s;
3291 }
3292 if (size)
3293 return -EIO;
3294 return 0;
3295}
3296
3297/*
3298 * config_unknown_volume - device configuration command for unknown volume
3299 *
3300 * When a device is added to an existing connection, the node on which the
3301 * device is added first will send configuration commands to its peer but the
3302 * peer will not know about the device yet. It will warn and ignore these
3303 * commands. Once the device is added on the second node, the second node will
3304 * send the same device configuration commands, but in the other direction.
3305 *
3306 * (We can also end up here if drbd is misconfigured.)
3307 */
3308static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3309{
2fcb8f30
AG
3310 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3311 cmdname(pi->cmd), pi->vnr);
4a76b161
AG
3312 return ignore_remaining_packet(tconn, pi);
3313}
3314
3315static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3316{
4a76b161 3317 struct drbd_conf *mdev;
e658983a 3318 struct p_rs_param_95 *p;
b411b363
PR
3319 unsigned int header_size, data_size, exp_max_sz;
3320 struct crypto_hash *verify_tfm = NULL;
3321 struct crypto_hash *csums_tfm = NULL;
2ec91e0e 3322 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3323 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
4a76b161 3324 const int apv = tconn->agreed_pro_version;
813472ce 3325 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3326 int fifo_size = 0;
82bc0194 3327 int err;
b411b363 3328
4a76b161
AG
3329 mdev = vnr_to_mdev(tconn, pi->vnr);
3330 if (!mdev)
3331 return config_unknown_volume(tconn, pi);
b411b363
PR
3332
3333 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3334 : apv == 88 ? sizeof(struct p_rs_param)
3335 + SHARED_SECRET_MAX
8e26f9cc
PR
3336 : apv <= 94 ? sizeof(struct p_rs_param_89)
3337 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3338
e2857216 3339 if (pi->size > exp_max_sz) {
b411b363 3340 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3341 pi->size, exp_max_sz);
82bc0194 3342 return -EIO;
b411b363
PR
3343 }
3344
3345 if (apv <= 88) {
e658983a 3346 header_size = sizeof(struct p_rs_param);
e2857216 3347 data_size = pi->size - header_size;
8e26f9cc 3348 } else if (apv <= 94) {
e658983a 3349 header_size = sizeof(struct p_rs_param_89);
e2857216 3350 data_size = pi->size - header_size;
b411b363 3351 D_ASSERT(data_size == 0);
8e26f9cc 3352 } else {
e658983a 3353 header_size = sizeof(struct p_rs_param_95);
e2857216 3354 data_size = pi->size - header_size;
b411b363
PR
3355 D_ASSERT(data_size == 0);
3356 }
3357
3358 /* initialize verify_alg and csums_alg */
e658983a 3359 p = pi->data;
b411b363
PR
3360 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3361
e658983a 3362 err = drbd_recv_all(mdev->tconn, p, header_size);
82bc0194
AG
3363 if (err)
3364 return err;
b411b363 3365
daeda1cc
PR
3366 mutex_lock(&mdev->tconn->conf_update);
3367 old_net_conf = mdev->tconn->net_conf;
813472ce
PR
3368 if (get_ldev(mdev)) {
3369 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3370 if (!new_disk_conf) {
3371 put_ldev(mdev);
3372 mutex_unlock(&mdev->tconn->conf_update);
3373 dev_err(DEV, "Allocation of new disk_conf failed\n");
3374 return -ENOMEM;
3375 }
daeda1cc 3376
813472ce
PR
3377 old_disk_conf = mdev->ldev->disk_conf;
3378 *new_disk_conf = *old_disk_conf;
b411b363 3379
6394b935 3380 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3381 }
b411b363
PR
3382
3383 if (apv >= 88) {
3384 if (apv == 88) {
5de73827
PR
3385 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3386 dev_err(DEV, "verify-alg of wrong size, "
3387 "peer wants %u, accepting only up to %u byte\n",
3388 data_size, SHARED_SECRET_MAX);
813472ce
PR
3389 err = -EIO;
3390 goto reconnect;
b411b363
PR
3391 }
3392
82bc0194 3393 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
813472ce
PR
3394 if (err)
3395 goto reconnect;
b411b363
PR
3396 /* we expect NUL terminated string */
3397 /* but just in case someone tries to be evil */
3398 D_ASSERT(p->verify_alg[data_size-1] == 0);
3399 p->verify_alg[data_size-1] = 0;
3400
3401 } else /* apv >= 89 */ {
3402 /* we still expect NUL terminated strings */
3403 /* but just in case someone tries to be evil */
3404 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3405 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3406 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3407 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3408 }
3409
2ec91e0e 3410 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3411 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3412 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3413 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3414 goto disconnect;
3415 }
3416 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3417 p->verify_alg, "verify-alg");
3418 if (IS_ERR(verify_tfm)) {
3419 verify_tfm = NULL;
3420 goto disconnect;
3421 }
3422 }
3423
2ec91e0e 3424 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3425 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3426 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3427 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3428 goto disconnect;
3429 }
3430 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3431 p->csums_alg, "csums-alg");
3432 if (IS_ERR(csums_tfm)) {
3433 csums_tfm = NULL;
3434 goto disconnect;
3435 }
3436 }
3437
813472ce 3438 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3439 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3440 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3441 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3442 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3443
daeda1cc 3444 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 3445 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
3446 new_plan = fifo_alloc(fifo_size);
3447 if (!new_plan) {
778f271d 3448 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3449 put_ldev(mdev);
778f271d
PR
3450 goto disconnect;
3451 }
3452 }
8e26f9cc 3453 }
b411b363 3454
91fd4dad 3455 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3456 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3457 if (!new_net_conf) {
91fd4dad
PR
3458 dev_err(DEV, "Allocation of new net_conf failed\n");
3459 goto disconnect;
3460 }
3461
2ec91e0e 3462 *new_net_conf = *old_net_conf;
91fd4dad
PR
3463
3464 if (verify_tfm) {
2ec91e0e
PR
3465 strcpy(new_net_conf->verify_alg, p->verify_alg);
3466 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
91fd4dad
PR
3467 crypto_free_hash(mdev->tconn->verify_tfm);
3468 mdev->tconn->verify_tfm = verify_tfm;
3469 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3470 }
3471 if (csums_tfm) {
2ec91e0e
PR
3472 strcpy(new_net_conf->csums_alg, p->csums_alg);
3473 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
91fd4dad
PR
3474 crypto_free_hash(mdev->tconn->csums_tfm);
3475 mdev->tconn->csums_tfm = csums_tfm;
3476 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3477 }
2ec91e0e 3478 rcu_assign_pointer(tconn->net_conf, new_net_conf);
778f271d 3479 }
b411b363
PR
3480 }
3481
813472ce
PR
3482 if (new_disk_conf) {
3483 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3484 put_ldev(mdev);
3485 }
3486
3487 if (new_plan) {
3488 old_plan = mdev->rs_plan_s;
3489 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
b411b363 3490 }
daeda1cc
PR
3491
3492 mutex_unlock(&mdev->tconn->conf_update);
3493 synchronize_rcu();
3494 if (new_net_conf)
3495 kfree(old_net_conf);
3496 kfree(old_disk_conf);
813472ce 3497 kfree(old_plan);
daeda1cc 3498
82bc0194 3499 return 0;
b411b363 3500
813472ce
PR
3501reconnect:
3502 if (new_disk_conf) {
3503 put_ldev(mdev);
3504 kfree(new_disk_conf);
3505 }
3506 mutex_unlock(&mdev->tconn->conf_update);
3507 return -EIO;
3508
b411b363 3509disconnect:
813472ce
PR
3510 kfree(new_plan);
3511 if (new_disk_conf) {
3512 put_ldev(mdev);
3513 kfree(new_disk_conf);
3514 }
a0095508 3515 mutex_unlock(&mdev->tconn->conf_update);
b411b363
PR
3516 /* just for completeness: actually not needed,
3517 * as this is not reached if csums_tfm was ok. */
3518 crypto_free_hash(csums_tfm);
3519 /* but free the verify_tfm again, if csums_tfm did not work out */
3520 crypto_free_hash(verify_tfm);
38fa9988 3521 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3522 return -EIO;
b411b363
PR
3523}
3524
b411b363
PR
3525/* warn if the arguments differ by more than 12.5% */
3526static void warn_if_differ_considerably(struct drbd_conf *mdev,
3527 const char *s, sector_t a, sector_t b)
3528{
3529 sector_t d;
3530 if (a == 0 || b == 0)
3531 return;
3532 d = (a > b) ? (a - b) : (b - a);
3533 if (d > (a>>3) || d > (b>>3))
3534 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3535 (unsigned long long)a, (unsigned long long)b);
3536}
3537
4a76b161 3538static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3539{
4a76b161 3540 struct drbd_conf *mdev;
e658983a 3541 struct p_sizes *p = pi->data;
e96c9633 3542 enum determine_dev_size dd = DS_UNCHANGED;
b411b363
PR
3543 sector_t p_size, p_usize, my_usize;
3544 int ldsc = 0; /* local disk size changed */
e89b591c 3545 enum dds_flags ddsf;
b411b363 3546
4a76b161
AG
3547 mdev = vnr_to_mdev(tconn, pi->vnr);
3548 if (!mdev)
3549 return config_unknown_volume(tconn, pi);
3550
b411b363
PR
3551 p_size = be64_to_cpu(p->d_size);
3552 p_usize = be64_to_cpu(p->u_size);
3553
b411b363
PR
3554 /* just store the peer's disk size for now.
3555 * we still need to figure out whether we accept that. */
3556 mdev->p_size = p_size;
3557
b411b363 3558 if (get_ldev(mdev)) {
daeda1cc
PR
3559 rcu_read_lock();
3560 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3561 rcu_read_unlock();
3562
b411b363
PR
3563 warn_if_differ_considerably(mdev, "lower level device sizes",
3564 p_size, drbd_get_max_capacity(mdev->ldev));
3565 warn_if_differ_considerably(mdev, "user requested size",
daeda1cc 3566 p_usize, my_usize);
b411b363
PR
3567
3568 /* if this is the first connect, or an otherwise expected
3569 * param exchange, choose the minimum */
3570 if (mdev->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 3571 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
3572
3573 /* Never shrink a device with usable data during connect.
3574 But allow online shrinking if we are connected. */
ef5e44a6 3575 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
daeda1cc
PR
3576 drbd_get_capacity(mdev->this_bdev) &&
3577 mdev->state.disk >= D_OUTDATED &&
3578 mdev->state.conn < C_CONNECTED) {
b411b363 3579 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3580 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 3581 put_ldev(mdev);
82bc0194 3582 return -EIO;
b411b363 3583 }
daeda1cc
PR
3584
3585 if (my_usize != p_usize) {
3586 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3587
3588 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3589 if (!new_disk_conf) {
3590 dev_err(DEV, "Allocation of new disk_conf failed\n");
3591 put_ldev(mdev);
3592 return -ENOMEM;
3593 }
3594
3595 mutex_lock(&mdev->tconn->conf_update);
3596 old_disk_conf = mdev->ldev->disk_conf;
3597 *new_disk_conf = *old_disk_conf;
3598 new_disk_conf->disk_size = p_usize;
3599
3600 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3601 mutex_unlock(&mdev->tconn->conf_update);
3602 synchronize_rcu();
3603 kfree(old_disk_conf);
3604
3605 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3606 (unsigned long)my_usize);
b411b363 3607 }
daeda1cc 3608
b411b363
PR
3609 put_ldev(mdev);
3610 }
b411b363 3611
e89b591c 3612 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3613 if (get_ldev(mdev)) {
d752b269 3614 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
b411b363 3615 put_ldev(mdev);
e96c9633 3616 if (dd == DS_ERROR)
82bc0194 3617 return -EIO;
b411b363
PR
3618 drbd_md_sync(mdev);
3619 } else {
3620 /* I am diskless, need to accept the peer's size. */
3621 drbd_set_my_capacity(mdev, p_size);
3622 }
3623
99432fcc
PR
3624 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3625 drbd_reconsider_max_bio_size(mdev);
3626
b411b363
PR
3627 if (get_ldev(mdev)) {
3628 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3629 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3630 ldsc = 1;
3631 }
3632
b411b363
PR
3633 put_ldev(mdev);
3634 }
3635
3636 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3637 if (be64_to_cpu(p->c_size) !=
3638 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3639 /* we have different sizes, probably peer
3640 * needs to know my new size... */
e89b591c 3641 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3642 }
3643 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
e96c9633 3644 (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
b411b363 3645 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3646 mdev->state.disk >= D_INCONSISTENT) {
3647 if (ddsf & DDSF_NO_RESYNC)
3648 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3649 else
3650 resync_after_online_grow(mdev);
3651 } else
b411b363
PR
3652 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3653 }
3654 }
3655
82bc0194 3656 return 0;
b411b363
PR
3657}
3658
4a76b161 3659static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3660{
4a76b161 3661 struct drbd_conf *mdev;
e658983a 3662 struct p_uuids *p = pi->data;
b411b363 3663 u64 *p_uuid;
62b0da3a 3664 int i, updated_uuids = 0;
b411b363 3665
4a76b161
AG
3666 mdev = vnr_to_mdev(tconn, pi->vnr);
3667 if (!mdev)
3668 return config_unknown_volume(tconn, pi);
3669
b411b363 3670 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
063eacf8
JW
3671 if (!p_uuid) {
3672 dev_err(DEV, "kmalloc of p_uuid failed\n");
3673 return false;
3674 }
b411b363
PR
3675
3676 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3677 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3678
3679 kfree(mdev->p_uuid);
3680 mdev->p_uuid = p_uuid;
3681
3682 if (mdev->state.conn < C_CONNECTED &&
3683 mdev->state.disk < D_INCONSISTENT &&
3684 mdev->state.role == R_PRIMARY &&
3685 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3686 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3687 (unsigned long long)mdev->ed_uuid);
38fa9988 3688 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3689 return -EIO;
b411b363
PR
3690 }
3691
3692 if (get_ldev(mdev)) {
3693 int skip_initial_sync =
3694 mdev->state.conn == C_CONNECTED &&
31890f4a 3695 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3696 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3697 (p_uuid[UI_FLAGS] & 8);
3698 if (skip_initial_sync) {
3699 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3700 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3701 "clear_n_write from receive_uuids",
3702 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3703 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3704 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3705 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3706 CS_VERBOSE, NULL);
3707 drbd_md_sync(mdev);
62b0da3a 3708 updated_uuids = 1;
b411b363
PR
3709 }
3710 put_ldev(mdev);
18a50fa2
PR
3711 } else if (mdev->state.disk < D_INCONSISTENT &&
3712 mdev->state.role == R_PRIMARY) {
3713 /* I am a diskless primary, the peer just created a new current UUID
3714 for me. */
62b0da3a 3715 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3716 }
3717
3718 /* Before we test for the disk state, we should wait until an eventually
3719 ongoing cluster wide state change is finished. That is important if
3720 we are primary and are detaching from our disk. We need to see the
3721 new disk state... */
8410da8f
PR
3722 mutex_lock(mdev->state_mutex);
3723 mutex_unlock(mdev->state_mutex);
b411b363 3724 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3725 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3726
3727 if (updated_uuids)
3728 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3729
82bc0194 3730 return 0;
b411b363
PR
3731}
3732
3733/**
3734 * convert_state() - Converts the peer's view of the cluster state to our point of view
3735 * @ps: The state as seen by the peer.
3736 */
3737static union drbd_state convert_state(union drbd_state ps)
3738{
3739 union drbd_state ms;
3740
3741 static enum drbd_conns c_tab[] = {
369bea63 3742 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
3743 [C_CONNECTED] = C_CONNECTED,
3744
3745 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3746 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3747 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3748 [C_VERIFY_S] = C_VERIFY_T,
3749 [C_MASK] = C_MASK,
3750 };
3751
3752 ms.i = ps.i;
3753
3754 ms.conn = c_tab[ps.conn];
3755 ms.peer = ps.role;
3756 ms.role = ps.peer;
3757 ms.pdsk = ps.disk;
3758 ms.disk = ps.pdsk;
3759 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3760
3761 return ms;
3762}
3763
4a76b161 3764static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3765{
4a76b161 3766 struct drbd_conf *mdev;
e658983a 3767 struct p_req_state *p = pi->data;
b411b363 3768 union drbd_state mask, val;
bf885f8a 3769 enum drbd_state_rv rv;
b411b363 3770
4a76b161
AG
3771 mdev = vnr_to_mdev(tconn, pi->vnr);
3772 if (!mdev)
3773 return -EIO;
3774
b411b363
PR
3775 mask.i = be32_to_cpu(p->mask);
3776 val.i = be32_to_cpu(p->val);
3777
427c0434 3778 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
8410da8f 3779 mutex_is_locked(mdev->state_mutex)) {
b411b363 3780 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3781 return 0;
b411b363
PR
3782 }
3783
3784 mask = convert_state(mask);
3785 val = convert_state(val);
3786
dfafcc8a
PR
3787 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3788 drbd_send_sr_reply(mdev, rv);
b411b363 3789
b411b363
PR
3790 drbd_md_sync(mdev);
3791
82bc0194 3792 return 0;
b411b363
PR
3793}
3794
e2857216 3795static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3796{
e658983a 3797 struct p_req_state *p = pi->data;
b411b363 3798 union drbd_state mask, val;
bf885f8a 3799 enum drbd_state_rv rv;
b411b363 3800
b411b363
PR
3801 mask.i = be32_to_cpu(p->mask);
3802 val.i = be32_to_cpu(p->val);
3803
427c0434 3804 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
dfafcc8a
PR
3805 mutex_is_locked(&tconn->cstate_mutex)) {
3806 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3807 return 0;
b411b363
PR
3808 }
3809
3810 mask = convert_state(mask);
3811 val = convert_state(val);
3812
778bcf2e 3813 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
dfafcc8a 3814 conn_send_sr_reply(tconn, rv);
b411b363 3815
82bc0194 3816 return 0;
b411b363
PR
3817}
3818
4a76b161 3819static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3820{
4a76b161 3821 struct drbd_conf *mdev;
e658983a 3822 struct p_state *p = pi->data;
4ac4aada 3823 union drbd_state os, ns, peer_state;
b411b363 3824 enum drbd_disk_state real_peer_disk;
65d922c3 3825 enum chg_state_flags cs_flags;
b411b363
PR
3826 int rv;
3827
4a76b161
AG
3828 mdev = vnr_to_mdev(tconn, pi->vnr);
3829 if (!mdev)
3830 return config_unknown_volume(tconn, pi);
3831
b411b363
PR
3832 peer_state.i = be32_to_cpu(p->state);
3833
3834 real_peer_disk = peer_state.disk;
3835 if (peer_state.disk == D_NEGOTIATING) {
3836 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3837 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3838 }
3839
87eeee41 3840 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3841 retry:
78bae59b 3842 os = ns = drbd_read_state(mdev);
87eeee41 3843 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3844
545752d5
LE
3845 /* If some other part of the code (asender thread, timeout)
3846 * already decided to close the connection again,
3847 * we must not "re-establish" it here. */
3848 if (os.conn <= C_TEAR_DOWN)
58ffa580 3849 return -ECONNRESET;
545752d5 3850
40424e4a
LE
3851 /* If this is the "end of sync" confirmation, usually the peer disk
3852 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3853 * set) resync started in PausedSyncT, or if the timing of pause-/
3854 * unpause-sync events has been "just right", the peer disk may
3855 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3856 */
3857 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3858 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
3859 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3860 /* If we are (becoming) SyncSource, but peer is still in sync
3861 * preparation, ignore its uptodate-ness to avoid flapping, it
3862 * will change to inconsistent once the peer reaches active
3863 * syncing states.
3864 * It may have changed syncer-paused flags, however, so we
3865 * cannot ignore this completely. */
3866 if (peer_state.conn > C_CONNECTED &&
3867 peer_state.conn < C_SYNC_SOURCE)
3868 real_peer_disk = D_INCONSISTENT;
3869
3870 /* if peer_state changes to connected at the same time,
3871 * it explicitly notifies us that it finished resync.
3872 * Maybe we should finish it up, too? */
3873 else if (os.conn >= C_SYNC_SOURCE &&
3874 peer_state.conn == C_CONNECTED) {
3875 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3876 drbd_resync_finished(mdev);
82bc0194 3877 return 0;
e9ef7bb6
LE
3878 }
3879 }
3880
02b91b55
LE
3881 /* explicit verify finished notification, stop sector reached. */
3882 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3883 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
58ffa580 3884 ov_out_of_sync_print(mdev);
02b91b55 3885 drbd_resync_finished(mdev);
58ffa580 3886 return 0;
02b91b55
LE
3887 }
3888
e9ef7bb6
LE
3889 /* peer says his disk is inconsistent, while we think it is uptodate,
3890 * and this happens while the peer still thinks we have a sync going on,
3891 * but we think we are already done with the sync.
3892 * We ignore this to avoid flapping pdsk.
3893 * This should not happen, if the peer is a recent version of drbd. */
3894 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3895 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3896 real_peer_disk = D_UP_TO_DATE;
3897
4ac4aada
LE
3898 if (ns.conn == C_WF_REPORT_PARAMS)
3899 ns.conn = C_CONNECTED;
b411b363 3900
67531718
PR
3901 if (peer_state.conn == C_AHEAD)
3902 ns.conn = C_BEHIND;
3903
b411b363
PR
3904 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3905 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3906 int cr; /* consider resync */
3907
3908 /* if we established a new connection */
4ac4aada 3909 cr = (os.conn < C_CONNECTED);
b411b363
PR
3910 /* if we had an established connection
3911 * and one of the nodes newly attaches a disk */
4ac4aada 3912 cr |= (os.conn == C_CONNECTED &&
b411b363 3913 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3914 os.disk == D_NEGOTIATING));
b411b363
PR
3915 /* if we have both been inconsistent, and the peer has been
3916 * forced to be UpToDate with --overwrite-data */
3917 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3918 /* if we had been plain connected, and the admin requested to
3919 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3920 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3921 (peer_state.conn >= C_STARTING_SYNC_S &&
3922 peer_state.conn <= C_WF_BITMAP_T));
3923
3924 if (cr)
4ac4aada 3925 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3926
3927 put_ldev(mdev);
4ac4aada
LE
3928 if (ns.conn == C_MASK) {
3929 ns.conn = C_CONNECTED;
b411b363 3930 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3931 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3932 } else if (peer_state.disk == D_NEGOTIATING) {
3933 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3934 peer_state.disk = D_DISKLESS;
580b9767 3935 real_peer_disk = D_DISKLESS;
b411b363 3936 } else {
8169e41b 3937 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3938 return -EIO;
4ac4aada 3939 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3940 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3941 return -EIO;
b411b363
PR
3942 }
3943 }
3944 }
3945
87eeee41 3946 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b 3947 if (os.i != drbd_read_state(mdev).i)
b411b363
PR
3948 goto retry;
3949 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3950 ns.peer = peer_state.role;
3951 ns.pdsk = real_peer_disk;
3952 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3953 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3954 ns.disk = mdev->new_state_tmp.disk;
4ac4aada 3955 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
2aebfabb 3956 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3957 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3958 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3959 for temporal network outages! */
87eeee41 3960 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3961 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3962 tl_clear(mdev->tconn);
481c6f50
PR
3963 drbd_uuid_new_current(mdev);
3964 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3965 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3966 return -EIO;
481c6f50 3967 }
65d922c3 3968 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
78bae59b 3969 ns = drbd_read_state(mdev);
87eeee41 3970 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3971
3972 if (rv < SS_SUCCESS) {
38fa9988 3973 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3974 return -EIO;
b411b363
PR
3975 }
3976
4ac4aada
LE
3977 if (os.conn > C_WF_REPORT_PARAMS) {
3978 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3979 peer_state.disk != D_NEGOTIATING ) {
3980 /* we want resync, peer has not yet decided to sync... */
3981 /* Nowadays only used when forcing a node into primary role and
3982 setting its disk to UpToDate with that */
3983 drbd_send_uuids(mdev);
f479ea06 3984 drbd_send_current_state(mdev);
b411b363
PR
3985 }
3986 }
3987
08b165ba 3988 clear_bit(DISCARD_MY_DATA, &mdev->flags);
b411b363 3989
cccac985 3990 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
b411b363 3991
82bc0194 3992 return 0;
b411b363
PR
3993}
3994
4a76b161 3995static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3996{
4a76b161 3997 struct drbd_conf *mdev;
e658983a 3998 struct p_rs_uuid *p = pi->data;
4a76b161
AG
3999
4000 mdev = vnr_to_mdev(tconn, pi->vnr);
4001 if (!mdev)
4002 return -EIO;
b411b363
PR
4003
4004 wait_event(mdev->misc_wait,
4005 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 4006 mdev->state.conn == C_BEHIND ||
b411b363
PR
4007 mdev->state.conn < C_CONNECTED ||
4008 mdev->state.disk < D_NEGOTIATING);
4009
4010 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4011
b411b363
PR
4012 /* Here the _drbd_uuid_ functions are right, current should
4013 _not_ be rotated into the history */
4014 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4015 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4016 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4017
62b0da3a 4018 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
4019 drbd_start_resync(mdev, C_SYNC_TARGET);
4020
4021 put_ldev(mdev);
4022 } else
4023 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4024
82bc0194 4025 return 0;
b411b363
PR
4026}
4027
2c46407d
AG
4028/**
4029 * receive_bitmap_plain
4030 *
4031 * Return 0 when done, 1 when another iteration is needed, and a negative error
4032 * code upon failure.
4033 */
4034static int
50d0b1ad 4035receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
e658983a 4036 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4037{
50d0b1ad
AG
4038 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4039 drbd_header_size(mdev->tconn);
e658983a 4040 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4041 c->bm_words - c->word_offset);
e658983a 4042 unsigned int want = num_words * sizeof(*p);
2c46407d 4043 int err;
b411b363 4044
50d0b1ad
AG
4045 if (want != size) {
4046 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4047 return -EIO;
b411b363
PR
4048 }
4049 if (want == 0)
2c46407d 4050 return 0;
e658983a 4051 err = drbd_recv_all(mdev->tconn, p, want);
82bc0194 4052 if (err)
2c46407d 4053 return err;
b411b363 4054
e658983a 4055 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
b411b363
PR
4056
4057 c->word_offset += num_words;
4058 c->bit_offset = c->word_offset * BITS_PER_LONG;
4059 if (c->bit_offset > c->bm_bits)
4060 c->bit_offset = c->bm_bits;
4061
2c46407d 4062 return 1;
b411b363
PR
4063}
4064
a02d1240
AG
4065static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4066{
4067 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4068}
4069
4070static int dcbp_get_start(struct p_compressed_bm *p)
4071{
4072 return (p->encoding & 0x80) != 0;
4073}
4074
4075static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4076{
4077 return (p->encoding >> 4) & 0x7;
4078}
4079
2c46407d
AG
4080/**
4081 * recv_bm_rle_bits
4082 *
4083 * Return 0 when done, 1 when another iteration is needed, and a negative error
4084 * code upon failure.
4085 */
4086static int
b411b363
PR
4087recv_bm_rle_bits(struct drbd_conf *mdev,
4088 struct p_compressed_bm *p,
c6d25cfe
PR
4089 struct bm_xfer_ctx *c,
4090 unsigned int len)
b411b363
PR
4091{
4092 struct bitstream bs;
4093 u64 look_ahead;
4094 u64 rl;
4095 u64 tmp;
4096 unsigned long s = c->bit_offset;
4097 unsigned long e;
a02d1240 4098 int toggle = dcbp_get_start(p);
b411b363
PR
4099 int have;
4100 int bits;
4101
a02d1240 4102 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4103
4104 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4105 if (bits < 0)
2c46407d 4106 return -EIO;
b411b363
PR
4107
4108 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4109 bits = vli_decode_bits(&rl, look_ahead);
4110 if (bits <= 0)
2c46407d 4111 return -EIO;
b411b363
PR
4112
4113 if (toggle) {
4114 e = s + rl -1;
4115 if (e >= c->bm_bits) {
4116 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4117 return -EIO;
b411b363
PR
4118 }
4119 _drbd_bm_set_bits(mdev, s, e);
4120 }
4121
4122 if (have < bits) {
4123 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4124 have, bits, look_ahead,
4125 (unsigned int)(bs.cur.b - p->code),
4126 (unsigned int)bs.buf_len);
2c46407d 4127 return -EIO;
b411b363 4128 }
d2da5b0c
LE
4129 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4130 if (likely(bits < 64))
4131 look_ahead >>= bits;
4132 else
4133 look_ahead = 0;
b411b363
PR
4134 have -= bits;
4135
4136 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4137 if (bits < 0)
2c46407d 4138 return -EIO;
b411b363
PR
4139 look_ahead |= tmp << have;
4140 have += bits;
4141 }
4142
4143 c->bit_offset = s;
4144 bm_xfer_ctx_bit_to_word_offset(c);
4145
2c46407d 4146 return (s != c->bm_bits);
b411b363
PR
4147}
4148
2c46407d
AG
4149/**
4150 * decode_bitmap_c
4151 *
4152 * Return 0 when done, 1 when another iteration is needed, and a negative error
4153 * code upon failure.
4154 */
4155static int
b411b363
PR
4156decode_bitmap_c(struct drbd_conf *mdev,
4157 struct p_compressed_bm *p,
c6d25cfe
PR
4158 struct bm_xfer_ctx *c,
4159 unsigned int len)
b411b363 4160{
a02d1240 4161 if (dcbp_get_code(p) == RLE_VLI_Bits)
e658983a 4162 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
b411b363
PR
4163
4164 /* other variants had been implemented for evaluation,
4165 * but have been dropped as this one turned out to be "best"
4166 * during all our tests. */
4167
4168 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 4169 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4170 return -EIO;
b411b363
PR
4171}
4172
4173void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4174 const char *direction, struct bm_xfer_ctx *c)
4175{
4176 /* what would it take to transfer it "plaintext" */
50d0b1ad
AG
4177 unsigned int header_size = drbd_header_size(mdev->tconn);
4178 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4179 unsigned int plain =
4180 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4181 c->bm_words * sizeof(unsigned long);
4182 unsigned int total = c->bytes[0] + c->bytes[1];
4183 unsigned int r;
b411b363
PR
4184
4185 /* total can not be zero. but just in case: */
4186 if (total == 0)
4187 return;
4188
4189 /* don't report if not compressed */
4190 if (total >= plain)
4191 return;
4192
4193 /* total < plain. check for overflow, still */
4194 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4195 : (1000 * total / plain);
4196
4197 if (r > 1000)
4198 r = 1000;
4199
4200 r = 1000 - r;
4201 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4202 "total %u; compression: %u.%u%%\n",
4203 direction,
4204 c->bytes[1], c->packets[1],
4205 c->bytes[0], c->packets[0],
4206 total, r/10, r % 10);
4207}
4208
4209/* Since we are processing the bitfield from lower addresses to higher,
4210 it does not matter if the process it in 32 bit chunks or 64 bit
4211 chunks as long as it is little endian. (Understand it as byte stream,
4212 beginning with the lowest byte...) If we would use big endian
4213 we would need to process it from the highest address to the lowest,
4214 in order to be agnostic to the 32 vs 64 bits issue.
4215
4216 returns 0 on failure, 1 if we successfully received it. */
4a76b161 4217static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4218{
4a76b161 4219 struct drbd_conf *mdev;
b411b363 4220 struct bm_xfer_ctx c;
2c46407d 4221 int err;
4a76b161
AG
4222
4223 mdev = vnr_to_mdev(tconn, pi->vnr);
4224 if (!mdev)
4225 return -EIO;
b411b363 4226
20ceb2b2
LE
4227 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4228 /* you are supposed to send additional out-of-sync information
4229 * if you actually set bits during this phase */
b411b363 4230
b411b363
PR
4231 c = (struct bm_xfer_ctx) {
4232 .bm_bits = drbd_bm_bits(mdev),
4233 .bm_words = drbd_bm_words(mdev),
4234 };
4235
2c46407d 4236 for(;;) {
e658983a
AG
4237 if (pi->cmd == P_BITMAP)
4238 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4239 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4240 /* MAYBE: sanity check that we speak proto >= 90,
4241 * and the feature is enabled! */
e658983a 4242 struct p_compressed_bm *p = pi->data;
b411b363 4243
50d0b1ad 4244 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
b411b363 4245 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 4246 err = -EIO;
b411b363
PR
4247 goto out;
4248 }
e658983a 4249 if (pi->size <= sizeof(*p)) {
e2857216 4250 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4251 err = -EIO;
78fcbdae 4252 goto out;
b411b363 4253 }
e658983a
AG
4254 err = drbd_recv_all(mdev->tconn, p, pi->size);
4255 if (err)
4256 goto out;
e2857216 4257 err = decode_bitmap_c(mdev, p, &c, pi->size);
b411b363 4258 } else {
e2857216 4259 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4260 err = -EIO;
b411b363
PR
4261 goto out;
4262 }
4263
e2857216 4264 c.packets[pi->cmd == P_BITMAP]++;
50d0b1ad 4265 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
b411b363 4266
2c46407d
AG
4267 if (err <= 0) {
4268 if (err < 0)
4269 goto out;
b411b363 4270 break;
2c46407d 4271 }
e2857216 4272 err = drbd_recv_header(mdev->tconn, pi);
82bc0194 4273 if (err)
b411b363 4274 goto out;
2c46407d 4275 }
b411b363
PR
4276
4277 INFO_bm_xfer_stats(mdev, "receive", &c);
4278
4279 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4280 enum drbd_state_rv rv;
4281
82bc0194
AG
4282 err = drbd_send_bitmap(mdev);
4283 if (err)
b411b363
PR
4284 goto out;
4285 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
4286 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4287 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
4288 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4289 /* admin may have requested C_DISCONNECTING,
4290 * other threads may have noticed network errors */
4291 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4292 drbd_conn_str(mdev->state.conn));
4293 }
82bc0194 4294 err = 0;
b411b363 4295
b411b363 4296 out:
20ceb2b2 4297 drbd_bm_unlock(mdev);
82bc0194 4298 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363 4299 drbd_start_resync(mdev, C_SYNC_SOURCE);
82bc0194 4300 return err;
b411b363
PR
4301}
4302
4a76b161 4303static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4304{
4a76b161 4305 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4306 pi->cmd, pi->size);
b411b363 4307
4a76b161 4308 return ignore_remaining_packet(tconn, pi);
b411b363
PR
4309}
4310
4a76b161 4311static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 4312{
e7f52dfb
LE
4313 /* Make sure we've acked all the TCP data associated
4314 * with the data requests being unplugged */
4a76b161 4315 drbd_tcp_quickack(tconn->data.socket);
0ced55a3 4316
82bc0194 4317 return 0;
0ced55a3
PR
4318}
4319
4a76b161 4320static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
73a01a18 4321{
4a76b161 4322 struct drbd_conf *mdev;
e658983a 4323 struct p_block_desc *p = pi->data;
4a76b161
AG
4324
4325 mdev = vnr_to_mdev(tconn, pi->vnr);
4326 if (!mdev)
4327 return -EIO;
73a01a18 4328
f735e363
LE
4329 switch (mdev->state.conn) {
4330 case C_WF_SYNC_UUID:
4331 case C_WF_BITMAP_T:
4332 case C_BEHIND:
4333 break;
4334 default:
4335 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4336 drbd_conn_str(mdev->state.conn));
4337 }
4338
73a01a18
PR
4339 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4340
82bc0194 4341 return 0;
73a01a18
PR
4342}
4343
02918be2
PR
4344struct data_cmd {
4345 int expect_payload;
4346 size_t pkt_size;
4a76b161 4347 int (*fn)(struct drbd_tconn *, struct packet_info *);
02918be2
PR
4348};
4349
4350static struct data_cmd drbd_cmd_handler[] = {
4351 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4352 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4353 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4354 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4355 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4356 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4357 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4358 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4359 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4360 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4361 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4362 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4363 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4364 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4365 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4366 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4367 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4368 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4369 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4372 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4373 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4374 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
b411b363
PR
4375};
4376
eefc2f7d 4377static void drbdd(struct drbd_tconn *tconn)
b411b363 4378{
77351055 4379 struct packet_info pi;
02918be2 4380 size_t shs; /* sub header size */
82bc0194 4381 int err;
b411b363 4382
eefc2f7d 4383 while (get_t_state(&tconn->receiver) == RUNNING) {
deebe195 4384 struct data_cmd *cmd;
b411b363 4385
eefc2f7d 4386 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 4387 if (drbd_recv_header(tconn, &pi))
02918be2 4388 goto err_out;
b411b363 4389
deebe195 4390 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4391 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
2fcb8f30
AG
4392 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4393 cmdname(pi.cmd), pi.cmd);
02918be2 4394 goto err_out;
0b33a916 4395 }
b411b363 4396
e658983a
AG
4397 shs = cmd->pkt_size;
4398 if (pi.size > shs && !cmd->expect_payload) {
2fcb8f30
AG
4399 conn_err(tconn, "No payload expected %s l:%d\n",
4400 cmdname(pi.cmd), pi.size);
02918be2 4401 goto err_out;
b411b363 4402 }
b411b363 4403
c13f7e1a 4404 if (shs) {
e658983a 4405 err = drbd_recv_all_warn(tconn, pi.data, shs);
a5c31904 4406 if (err)
c13f7e1a 4407 goto err_out;
e2857216 4408 pi.size -= shs;
c13f7e1a
LE
4409 }
4410
4a76b161
AG
4411 err = cmd->fn(tconn, &pi);
4412 if (err) {
9f5bdc33
AG
4413 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4414 cmdname(pi.cmd), err, pi.size);
02918be2 4415 goto err_out;
b411b363
PR
4416 }
4417 }
82bc0194 4418 return;
b411b363 4419
82bc0194
AG
4420 err_out:
4421 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4422}
4423
0e29d163 4424void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
4425{
4426 struct drbd_wq_barrier barr;
4427
4428 barr.w.cb = w_prev_work_done;
0e29d163 4429 barr.w.tconn = tconn;
b411b363 4430 init_completion(&barr.done);
d5b27b01 4431 drbd_queue_work(&tconn->sender_work, &barr.w);
b411b363
PR
4432 wait_for_completion(&barr.done);
4433}
4434
81fa2e67 4435static void conn_disconnect(struct drbd_tconn *tconn)
b411b363 4436{
c141ebda 4437 struct drbd_conf *mdev;
bbeb641c 4438 enum drbd_conns oc;
376694a0 4439 int vnr;
b411b363 4440
bbeb641c 4441 if (tconn->cstate == C_STANDALONE)
b411b363 4442 return;
b411b363 4443
545752d5
LE
4444 /* We are about to start the cleanup after connection loss.
4445 * Make sure drbd_make_request knows about that.
4446 * Usually we should be in some network failure state already,
4447 * but just in case we are not, we fix it up here.
4448 */
b8853dbd 4449 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4450
b411b363 4451 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4452 drbd_thread_stop(&tconn->asender);
4453 drbd_free_sock(tconn);
4454
c141ebda
PR
4455 rcu_read_lock();
4456 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4457 kref_get(&mdev->kref);
4458 rcu_read_unlock();
4459 drbd_disconnected(mdev);
4460 kref_put(&mdev->kref, &drbd_minor_destroy);
4461 rcu_read_lock();
4462 }
4463 rcu_read_unlock();
4464
12038a3a
PR
4465 if (!list_empty(&tconn->current_epoch->list))
4466 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4467 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4468 atomic_set(&tconn->current_epoch->epoch_size, 0);
b6dd1a89 4469 tconn->send.seen_any_write_yet = false;
12038a3a 4470
360cc740
PR
4471 conn_info(tconn, "Connection closed\n");
4472
cb703454
PR
4473 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4474 conn_try_outdate_peer_async(tconn);
4475
360cc740 4476 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4477 oc = tconn->cstate;
4478 if (oc >= C_UNCONNECTED)
376694a0 4479 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 4480
360cc740
PR
4481 spin_unlock_irq(&tconn->req_lock);
4482
f3dfa40a 4483 if (oc == C_DISCONNECTING)
d9cc6e23 4484 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
4485}
4486
c141ebda 4487static int drbd_disconnected(struct drbd_conf *mdev)
360cc740 4488{
360cc740 4489 unsigned int i;
b411b363 4490
85719573 4491 /* wait for current activity to cease. */
87eeee41 4492 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4493 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4494 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4495 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4496 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4497
4498 /* We do not have data structures that would allow us to
4499 * get the rs_pending_cnt down to 0 again.
4500 * * On C_SYNC_TARGET we do not have any data structures describing
4501 * the pending RSDataRequest's we have sent.
4502 * * On C_SYNC_SOURCE there is no data structure that tracks
4503 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4504 * And no, it is not the sum of the reference counts in the
4505 * resync_LRU. The resync_LRU tracks the whole operation including
4506 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4507 * on the fly. */
4508 drbd_rs_cancel_all(mdev);
4509 mdev->rs_total = 0;
4510 mdev->rs_failed = 0;
4511 atomic_set(&mdev->rs_pending_cnt, 0);
4512 wake_up(&mdev->misc_wait);
4513
b411b363 4514 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4515 resync_timer_fn((unsigned long)mdev);
4516
b411b363
PR
4517 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4518 * w_make_resync_request etc. which may still be on the worker queue
4519 * to be "canceled" */
4520 drbd_flush_workqueue(mdev);
4521
a990be46 4522 drbd_finish_peer_reqs(mdev);
b411b363 4523
d10b4ea3
PR
4524 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4525 might have issued a work again. The one before drbd_finish_peer_reqs() is
4526 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4527 drbd_flush_workqueue(mdev);
4528
08332d73
LE
4529 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4530 * again via drbd_try_clear_on_disk_bm(). */
4531 drbd_rs_cancel_all(mdev);
b411b363
PR
4532
4533 kfree(mdev->p_uuid);
4534 mdev->p_uuid = NULL;
4535
2aebfabb 4536 if (!drbd_suspended(mdev))
2f5cdd0b 4537 tl_clear(mdev->tconn);
b411b363
PR
4538
4539 drbd_md_sync(mdev);
4540
20ceb2b2
LE
4541 /* serialize with bitmap writeout triggered by the state change,
4542 * if any. */
4543 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4544
b411b363
PR
4545 /* tcp_close and release of sendpage pages can be deferred. I don't
4546 * want to use SO_LINGER, because apparently it can be deferred for
4547 * more than 20 seconds (longest time I checked).
4548 *
4549 * Actually we don't care for exactly when the network stack does its
4550 * put_page(), but release our reference on these pages right here.
4551 */
7721f567 4552 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
4553 if (i)
4554 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4555 i = atomic_read(&mdev->pp_in_use_by_net);
4556 if (i)
4557 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4558 i = atomic_read(&mdev->pp_in_use);
4559 if (i)
45bb912b 4560 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4561
4562 D_ASSERT(list_empty(&mdev->read_ee));
4563 D_ASSERT(list_empty(&mdev->active_ee));
4564 D_ASSERT(list_empty(&mdev->sync_ee));
4565 D_ASSERT(list_empty(&mdev->done_ee));
4566
360cc740 4567 return 0;
b411b363
PR
4568}
4569
4570/*
4571 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4572 * we can agree on is stored in agreed_pro_version.
4573 *
4574 * feature flags and the reserved array should be enough room for future
4575 * enhancements of the handshake protocol, and possible plugins...
4576 *
4577 * for now, they are expected to be zero, but ignored.
4578 */
6038178e 4579static int drbd_send_features(struct drbd_tconn *tconn)
b411b363 4580{
9f5bdc33
AG
4581 struct drbd_socket *sock;
4582 struct p_connection_features *p;
b411b363 4583
9f5bdc33
AG
4584 sock = &tconn->data;
4585 p = conn_prepare_command(tconn, sock);
4586 if (!p)
e8d17b01 4587 return -EIO;
b411b363
PR
4588 memset(p, 0, sizeof(*p));
4589 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4590 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
9f5bdc33 4591 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
4592}
4593
4594/*
4595 * return values:
4596 * 1 yes, we have a valid connection
4597 * 0 oops, did not work out, please try again
4598 * -1 peer talks different language,
4599 * no point in trying again, please go standalone.
4600 */
6038178e 4601static int drbd_do_features(struct drbd_tconn *tconn)
b411b363 4602{
65d11ed6 4603 /* ASSERT current == tconn->receiver ... */
e658983a
AG
4604 struct p_connection_features *p;
4605 const int expect = sizeof(struct p_connection_features);
77351055 4606 struct packet_info pi;
a5c31904 4607 int err;
b411b363 4608
6038178e 4609 err = drbd_send_features(tconn);
e8d17b01 4610 if (err)
b411b363
PR
4611 return 0;
4612
69bc7bc3
AG
4613 err = drbd_recv_header(tconn, &pi);
4614 if (err)
b411b363
PR
4615 return 0;
4616
6038178e
AG
4617 if (pi.cmd != P_CONNECTION_FEATURES) {
4618 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 4619 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4620 return -1;
4621 }
4622
77351055 4623 if (pi.size != expect) {
6038178e 4624 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 4625 expect, pi.size);
b411b363
PR
4626 return -1;
4627 }
4628
e658983a
AG
4629 p = pi.data;
4630 err = drbd_recv_all_warn(tconn, p, expect);
a5c31904 4631 if (err)
b411b363 4632 return 0;
b411b363 4633
b411b363
PR
4634 p->protocol_min = be32_to_cpu(p->protocol_min);
4635 p->protocol_max = be32_to_cpu(p->protocol_max);
4636 if (p->protocol_max == 0)
4637 p->protocol_max = p->protocol_min;
4638
4639 if (PRO_VERSION_MAX < p->protocol_min ||
4640 PRO_VERSION_MIN > p->protocol_max)
4641 goto incompat;
4642
65d11ed6 4643 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4644
65d11ed6
PR
4645 conn_info(tconn, "Handshake successful: "
4646 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4647
4648 return 1;
4649
4650 incompat:
65d11ed6 4651 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4652 "I support %d-%d, peer supports %d-%d\n",
4653 PRO_VERSION_MIN, PRO_VERSION_MAX,
4654 p->protocol_min, p->protocol_max);
4655 return -1;
4656}
4657
4658#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4659static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4660{
ef57f9e6
PR
4661 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4662 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4663 return -1;
b411b363
PR
4664}
4665#else
4666#define CHALLENGE_LEN 64
b10d96cb
JT
4667
4668/* Return value:
4669 1 - auth succeeded,
4670 0 - failed, try again (network error),
4671 -1 - auth failed, don't try again.
4672*/
4673
13e6037d 4674static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4675{
9f5bdc33 4676 struct drbd_socket *sock;
b411b363
PR
4677 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4678 struct scatterlist sg;
4679 char *response = NULL;
4680 char *right_response = NULL;
4681 char *peers_ch = NULL;
44ed167d
PR
4682 unsigned int key_len;
4683 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363
PR
4684 unsigned int resp_size;
4685 struct hash_desc desc;
77351055 4686 struct packet_info pi;
44ed167d 4687 struct net_conf *nc;
69bc7bc3 4688 int err, rv;
b411b363 4689
9f5bdc33 4690 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 4691
44ed167d
PR
4692 rcu_read_lock();
4693 nc = rcu_dereference(tconn->net_conf);
4694 key_len = strlen(nc->shared_secret);
4695 memcpy(secret, nc->shared_secret, key_len);
4696 rcu_read_unlock();
4697
13e6037d 4698 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4699 desc.flags = 0;
4700
44ed167d 4701 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 4702 if (rv) {
13e6037d 4703 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4704 rv = -1;
b411b363
PR
4705 goto fail;
4706 }
4707
4708 get_random_bytes(my_challenge, CHALLENGE_LEN);
4709
9f5bdc33
AG
4710 sock = &tconn->data;
4711 if (!conn_prepare_command(tconn, sock)) {
4712 rv = 0;
4713 goto fail;
4714 }
e658983a 4715 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 4716 my_challenge, CHALLENGE_LEN);
b411b363
PR
4717 if (!rv)
4718 goto fail;
4719
69bc7bc3
AG
4720 err = drbd_recv_header(tconn, &pi);
4721 if (err) {
4722 rv = 0;
b411b363 4723 goto fail;
69bc7bc3 4724 }
b411b363 4725
77351055 4726 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4727 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 4728 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4729 rv = 0;
4730 goto fail;
4731 }
4732
77351055 4733 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4734 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4735 rv = -1;
b411b363
PR
4736 goto fail;
4737 }
4738
77351055 4739 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4740 if (peers_ch == NULL) {
13e6037d 4741 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4742 rv = -1;
b411b363
PR
4743 goto fail;
4744 }
4745
a5c31904
AG
4746 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4747 if (err) {
b411b363
PR
4748 rv = 0;
4749 goto fail;
4750 }
4751
13e6037d 4752 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4753 response = kmalloc(resp_size, GFP_NOIO);
4754 if (response == NULL) {
13e6037d 4755 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4756 rv = -1;
b411b363
PR
4757 goto fail;
4758 }
4759
4760 sg_init_table(&sg, 1);
77351055 4761 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4762
4763 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4764 if (rv) {
13e6037d 4765 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4766 rv = -1;
b411b363
PR
4767 goto fail;
4768 }
4769
9f5bdc33
AG
4770 if (!conn_prepare_command(tconn, sock)) {
4771 rv = 0;
b411b363 4772 goto fail;
9f5bdc33 4773 }
e658983a 4774 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 4775 response, resp_size);
b411b363
PR
4776 if (!rv)
4777 goto fail;
4778
69bc7bc3
AG
4779 err = drbd_recv_header(tconn, &pi);
4780 if (err) {
b411b363
PR
4781 rv = 0;
4782 goto fail;
4783 }
4784
77351055 4785 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4786 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 4787 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4788 rv = 0;
4789 goto fail;
4790 }
4791
77351055 4792 if (pi.size != resp_size) {
13e6037d 4793 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4794 rv = 0;
4795 goto fail;
4796 }
b411b363 4797
a5c31904
AG
4798 err = drbd_recv_all_warn(tconn, response , resp_size);
4799 if (err) {
b411b363
PR
4800 rv = 0;
4801 goto fail;
4802 }
4803
4804 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4805 if (right_response == NULL) {
13e6037d 4806 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4807 rv = -1;
b411b363
PR
4808 goto fail;
4809 }
4810
4811 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4812
4813 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4814 if (rv) {
13e6037d 4815 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4816 rv = -1;
b411b363
PR
4817 goto fail;
4818 }
4819
4820 rv = !memcmp(response, right_response, resp_size);
4821
4822 if (rv)
44ed167d
PR
4823 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4824 resp_size);
b10d96cb
JT
4825 else
4826 rv = -1;
b411b363
PR
4827
4828 fail:
4829 kfree(peers_ch);
4830 kfree(response);
4831 kfree(right_response);
4832
4833 return rv;
4834}
4835#endif
4836
4837int drbdd_init(struct drbd_thread *thi)
4838{
392c8801 4839 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4840 int h;
4841
4d641dd7 4842 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4843
4844 do {
81fa2e67 4845 h = conn_connect(tconn);
b411b363 4846 if (h == 0) {
81fa2e67 4847 conn_disconnect(tconn);
20ee6390 4848 schedule_timeout_interruptible(HZ);
b411b363
PR
4849 }
4850 if (h == -1) {
4d641dd7 4851 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4852 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4853 }
4854 } while (h == 0);
4855
91fd4dad
PR
4856 if (h > 0)
4857 drbdd(tconn);
b411b363 4858
81fa2e67 4859 conn_disconnect(tconn);
b411b363 4860
4d641dd7 4861 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4862 return 0;
4863}
4864
4865/* ********* acknowledge sender ******** */
4866
e05e1e59 4867static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4868{
e658983a 4869 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
4870 int retcode = be32_to_cpu(p->retcode);
4871
4872 if (retcode >= SS_SUCCESS) {
4873 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4874 } else {
4875 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4876 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4877 drbd_set_st_err_str(retcode), retcode);
4878 }
4879 wake_up(&tconn->ping_wait);
4880
2735a594 4881 return 0;
e4f78ede 4882}
b411b363 4883
1952e916 4884static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4885{
1952e916 4886 struct drbd_conf *mdev;
e658983a 4887 struct p_req_state_reply *p = pi->data;
b411b363
PR
4888 int retcode = be32_to_cpu(p->retcode);
4889
1952e916
AG
4890 mdev = vnr_to_mdev(tconn, pi->vnr);
4891 if (!mdev)
2735a594 4892 return -EIO;
1952e916 4893
4d0fc3fd
PR
4894 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4895 D_ASSERT(tconn->agreed_pro_version < 100);
4896 return got_conn_RqSReply(tconn, pi);
4897 }
4898
b411b363 4899 if (retcode >= SS_SUCCESS) {
e4f78ede 4900 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
b411b363 4901 } else {
e4f78ede 4902 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
b411b363 4903 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 4904 drbd_set_st_err_str(retcode), retcode);
b411b363
PR
4905 }
4906 wake_up(&mdev->state_wait);
4907
2735a594 4908 return 0;
b411b363
PR
4909}
4910
e05e1e59 4911static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4912{
2735a594 4913 return drbd_send_ping_ack(tconn);
b411b363
PR
4914
4915}
4916
e05e1e59 4917static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363
PR
4918{
4919 /* restore idle timeout */
2a67d8b9
PR
4920 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4921 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4922 wake_up(&tconn->ping_wait);
b411b363 4923
2735a594 4924 return 0;
b411b363
PR
4925}
4926
1952e916 4927static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4928{
1952e916 4929 struct drbd_conf *mdev;
e658983a 4930 struct p_block_ack *p = pi->data;
b411b363
PR
4931 sector_t sector = be64_to_cpu(p->sector);
4932 int blksize = be32_to_cpu(p->blksize);
4933
1952e916
AG
4934 mdev = vnr_to_mdev(tconn, pi->vnr);
4935 if (!mdev)
2735a594 4936 return -EIO;
1952e916 4937
31890f4a 4938 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4939
4940 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4941
1d53f09e
LE
4942 if (get_ldev(mdev)) {
4943 drbd_rs_complete_io(mdev, sector);
4944 drbd_set_in_sync(mdev, sector, blksize);
4945 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4946 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4947 put_ldev(mdev);
4948 }
b411b363 4949 dec_rs_pending(mdev);
778f271d 4950 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4951
2735a594 4952 return 0;
b411b363
PR
4953}
4954
bc9c5c41
AG
4955static int
4956validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4957 struct rb_root *root, const char *func,
4958 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4959{
4960 struct drbd_request *req;
4961 struct bio_and_error m;
4962
87eeee41 4963 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4964 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4965 if (unlikely(!req)) {
87eeee41 4966 spin_unlock_irq(&mdev->tconn->req_lock);
85997675 4967 return -EIO;
b411b363
PR
4968 }
4969 __req_mod(req, what, &m);
87eeee41 4970 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4971
4972 if (m.bio)
4973 complete_master_bio(mdev, &m);
85997675 4974 return 0;
b411b363
PR
4975}
4976
1952e916 4977static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4978{
1952e916 4979 struct drbd_conf *mdev;
e658983a 4980 struct p_block_ack *p = pi->data;
b411b363
PR
4981 sector_t sector = be64_to_cpu(p->sector);
4982 int blksize = be32_to_cpu(p->blksize);
4983 enum drbd_req_event what;
4984
1952e916
AG
4985 mdev = vnr_to_mdev(tconn, pi->vnr);
4986 if (!mdev)
2735a594 4987 return -EIO;
1952e916 4988
b411b363
PR
4989 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4990
579b57ed 4991 if (p->block_id == ID_SYNCER) {
b411b363
PR
4992 drbd_set_in_sync(mdev, sector, blksize);
4993 dec_rs_pending(mdev);
2735a594 4994 return 0;
b411b363 4995 }
e05e1e59 4996 switch (pi->cmd) {
b411b363 4997 case P_RS_WRITE_ACK:
8554df1c 4998 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
4999 break;
5000 case P_WRITE_ACK:
8554df1c 5001 what = WRITE_ACKED_BY_PEER;
b411b363
PR
5002 break;
5003 case P_RECV_ACK:
8554df1c 5004 what = RECV_ACKED_BY_PEER;
b411b363 5005 break;
d4dabbe2
LE
5006 case P_SUPERSEDED:
5007 what = CONFLICT_RESOLVED;
b411b363 5008 break;
7be8da07 5009 case P_RETRY_WRITE:
7be8da07 5010 what = POSTPONE_WRITE;
b411b363
PR
5011 break;
5012 default:
2735a594 5013 BUG();
b411b363
PR
5014 }
5015
5016 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5017 &mdev->write_requests, __func__,
5018 what, false);
b411b363
PR
5019}
5020
1952e916 5021static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5022{
1952e916 5023 struct drbd_conf *mdev;
e658983a 5024 struct p_block_ack *p = pi->data;
b411b363 5025 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5026 int size = be32_to_cpu(p->blksize);
85997675 5027 int err;
b411b363 5028
1952e916
AG
5029 mdev = vnr_to_mdev(tconn, pi->vnr);
5030 if (!mdev)
2735a594 5031 return -EIO;
b411b363
PR
5032
5033 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5034
579b57ed 5035 if (p->block_id == ID_SYNCER) {
b411b363
PR
5036 dec_rs_pending(mdev);
5037 drbd_rs_failed_io(mdev, sector, size);
2735a594 5038 return 0;
b411b363 5039 }
2deb8336 5040
85997675
AG
5041 err = validate_req_change_req_state(mdev, p->block_id, sector,
5042 &mdev->write_requests, __func__,
303d1448 5043 NEG_ACKED, true);
85997675 5044 if (err) {
c3afd8f5
AG
5045 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5046 The master bio might already be completed, therefore the
5047 request is no longer in the collision hash. */
5048 /* In Protocol B we might already have got a P_RECV_ACK
5049 but then get a P_NEG_ACK afterwards. */
c3afd8f5 5050 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 5051 }
2735a594 5052 return 0;
b411b363
PR
5053}
5054
1952e916 5055static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5056{
1952e916 5057 struct drbd_conf *mdev;
e658983a 5058 struct p_block_ack *p = pi->data;
b411b363
PR
5059 sector_t sector = be64_to_cpu(p->sector);
5060
1952e916
AG
5061 mdev = vnr_to_mdev(tconn, pi->vnr);
5062 if (!mdev)
2735a594 5063 return -EIO;
1952e916 5064
b411b363 5065 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 5066
380207d0 5067 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5068 (unsigned long long)sector, be32_to_cpu(p->blksize));
5069
5070 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5071 &mdev->read_requests, __func__,
5072 NEG_ACKED, false);
b411b363
PR
5073}
5074
1952e916 5075static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5076{
1952e916 5077 struct drbd_conf *mdev;
b411b363
PR
5078 sector_t sector;
5079 int size;
e658983a 5080 struct p_block_ack *p = pi->data;
1952e916
AG
5081
5082 mdev = vnr_to_mdev(tconn, pi->vnr);
5083 if (!mdev)
2735a594 5084 return -EIO;
b411b363
PR
5085
5086 sector = be64_to_cpu(p->sector);
5087 size = be32_to_cpu(p->blksize);
b411b363
PR
5088
5089 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5090
5091 dec_rs_pending(mdev);
5092
5093 if (get_ldev_if_state(mdev, D_FAILED)) {
5094 drbd_rs_complete_io(mdev, sector);
e05e1e59 5095 switch (pi->cmd) {
d612d309
PR
5096 case P_NEG_RS_DREPLY:
5097 drbd_rs_failed_io(mdev, sector, size);
5098 case P_RS_CANCEL:
5099 break;
5100 default:
2735a594 5101 BUG();
d612d309 5102 }
b411b363
PR
5103 put_ldev(mdev);
5104 }
5105
2735a594 5106 return 0;
b411b363
PR
5107}
5108
1952e916 5109static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5110{
e658983a 5111 struct p_barrier_ack *p = pi->data;
9ed57dcb
LE
5112 struct drbd_conf *mdev;
5113 int vnr;
1952e916 5114
9ed57dcb 5115 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 5116
9ed57dcb
LE
5117 rcu_read_lock();
5118 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5119 if (mdev->state.conn == C_AHEAD &&
5120 atomic_read(&mdev->ap_in_flight) == 0 &&
5121 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5122 mdev->start_resync_timer.expires = jiffies + HZ;
5123 add_timer(&mdev->start_resync_timer);
5124 }
c4752ef1 5125 }
9ed57dcb 5126 rcu_read_unlock();
c4752ef1 5127
2735a594 5128 return 0;
b411b363
PR
5129}
5130
1952e916 5131static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5132{
1952e916 5133 struct drbd_conf *mdev;
e658983a 5134 struct p_block_ack *p = pi->data;
b411b363
PR
5135 struct drbd_work *w;
5136 sector_t sector;
5137 int size;
5138
1952e916
AG
5139 mdev = vnr_to_mdev(tconn, pi->vnr);
5140 if (!mdev)
2735a594 5141 return -EIO;
1952e916 5142
b411b363
PR
5143 sector = be64_to_cpu(p->sector);
5144 size = be32_to_cpu(p->blksize);
5145
5146 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5147
5148 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 5149 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 5150 else
8f7bed77 5151 ov_out_of_sync_print(mdev);
b411b363 5152
1d53f09e 5153 if (!get_ldev(mdev))
2735a594 5154 return 0;
1d53f09e 5155
b411b363
PR
5156 drbd_rs_complete_io(mdev, sector);
5157 dec_rs_pending(mdev);
5158
ea5442af
LE
5159 --mdev->ov_left;
5160
5161 /* let's advance progress step marks only for every other megabyte */
5162 if ((mdev->ov_left & 0x200) == 0x200)
5163 drbd_advance_rs_marks(mdev, mdev->ov_left);
5164
5165 if (mdev->ov_left == 0) {
b411b363
PR
5166 w = kmalloc(sizeof(*w), GFP_NOIO);
5167 if (w) {
5168 w->cb = w_ov_finished;
a21e9298 5169 w->mdev = mdev;
d5b27b01 5170 drbd_queue_work(&mdev->tconn->sender_work, w);
b411b363
PR
5171 } else {
5172 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 5173 ov_out_of_sync_print(mdev);
b411b363
PR
5174 drbd_resync_finished(mdev);
5175 }
5176 }
1d53f09e 5177 put_ldev(mdev);
2735a594 5178 return 0;
b411b363
PR
5179}
5180
1952e916 5181static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 5182{
2735a594 5183 return 0;
b411b363
PR
5184}
5185
a990be46 5186static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
0ced55a3 5187{
082a3439 5188 struct drbd_conf *mdev;
c141ebda 5189 int vnr, not_empty = 0;
32862ec7
PR
5190
5191 do {
5192 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5193 flush_signals(current);
c141ebda
PR
5194
5195 rcu_read_lock();
5196 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5197 kref_get(&mdev->kref);
5198 rcu_read_unlock();
d3fcb490 5199 if (drbd_finish_peer_reqs(mdev)) {
c141ebda
PR
5200 kref_put(&mdev->kref, &drbd_minor_destroy);
5201 return 1;
d3fcb490 5202 }
c141ebda
PR
5203 kref_put(&mdev->kref, &drbd_minor_destroy);
5204 rcu_read_lock();
082a3439 5205 }
32862ec7 5206 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
5207
5208 spin_lock_irq(&tconn->req_lock);
c141ebda 5209 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
082a3439
PR
5210 not_empty = !list_empty(&mdev->done_ee);
5211 if (not_empty)
5212 break;
5213 }
5214 spin_unlock_irq(&tconn->req_lock);
c141ebda 5215 rcu_read_unlock();
32862ec7
PR
5216 } while (not_empty);
5217
5218 return 0;
0ced55a3
PR
5219}
5220
b411b363
PR
5221struct asender_cmd {
5222 size_t pkt_size;
1952e916 5223 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
b411b363
PR
5224};
5225
7201b972 5226static struct asender_cmd asender_tbl[] = {
e658983a
AG
5227 [P_PING] = { 0, got_Ping },
5228 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5229 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5230 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5231 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5232 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5233 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5234 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5235 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5236 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5237 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5238 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5239 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5240 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5241 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5242 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5243 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5244};
b411b363
PR
5245
5246int drbd_asender(struct drbd_thread *thi)
5247{
392c8801 5248 struct drbd_tconn *tconn = thi->tconn;
b411b363 5249 struct asender_cmd *cmd = NULL;
77351055 5250 struct packet_info pi;
257d0af6 5251 int rv;
e658983a 5252 void *buf = tconn->meta.rbuf;
b411b363 5253 int received = 0;
52b061a4
AG
5254 unsigned int header_size = drbd_header_size(tconn);
5255 int expect = header_size;
44ed167d
PR
5256 bool ping_timeout_active = false;
5257 struct net_conf *nc;
bb77d34e 5258 int ping_timeo, tcp_cork, ping_int;
3990e04d 5259 struct sched_param param = { .sched_priority = 2 };
b411b363 5260
3990e04d
PR
5261 rv = sched_setscheduler(current, SCHED_RR, &param);
5262 if (rv < 0)
5263 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
b411b363 5264
e77a0a5c 5265 while (get_t_state(thi) == RUNNING) {
80822284 5266 drbd_thread_current_set_cpu(thi);
b411b363 5267
44ed167d
PR
5268 rcu_read_lock();
5269 nc = rcu_dereference(tconn->net_conf);
5270 ping_timeo = nc->ping_timeo;
bb77d34e 5271 tcp_cork = nc->tcp_cork;
44ed167d
PR
5272 ping_int = nc->ping_int;
5273 rcu_read_unlock();
5274
32862ec7 5275 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
a17647aa 5276 if (drbd_send_ping(tconn)) {
32862ec7 5277 conn_err(tconn, "drbd_send_ping has failed\n");
b411b363 5278 goto reconnect;
841ce241 5279 }
44ed167d
PR
5280 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5281 ping_timeout_active = true;
b411b363
PR
5282 }
5283
32862ec7
PR
5284 /* TODO: conditionally cork; it may hurt latency if we cork without
5285 much to send */
bb77d34e 5286 if (tcp_cork)
32862ec7 5287 drbd_tcp_cork(tconn->meta.socket);
a990be46
AG
5288 if (tconn_finish_peer_reqs(tconn)) {
5289 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
32862ec7 5290 goto reconnect;
b411b363
PR
5291 }
5292 /* but unconditionally uncork unless disabled */
bb77d34e 5293 if (tcp_cork)
32862ec7 5294 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
5295
5296 /* short circuit, recv_msg would return EINTR anyways. */
5297 if (signal_pending(current))
5298 continue;
5299
32862ec7
PR
5300 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5301 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
5302
5303 flush_signals(current);
5304
5305 /* Note:
5306 * -EINTR (on meta) we got a signal
5307 * -EAGAIN (on meta) rcvtimeo expired
5308 * -ECONNRESET other side closed the connection
5309 * -ERESTARTSYS (on data) we got a signal
5310 * rv < 0 other than above: unexpected error!
5311 * rv == expected: full header or command
5312 * rv < expected: "woken" by signal during receive
5313 * rv == 0 : "connection shut down by peer"
5314 */
5315 if (likely(rv > 0)) {
5316 received += rv;
5317 buf += rv;
5318 } else if (rv == 0) {
b66623e3
PR
5319 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5320 long t;
5321 rcu_read_lock();
5322 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5323 rcu_read_unlock();
5324
5325 t = wait_event_timeout(tconn->ping_wait,
5326 tconn->cstate < C_WF_REPORT_PARAMS,
5327 t);
599377ac
PR
5328 if (t)
5329 break;
5330 }
32862ec7 5331 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
5332 goto reconnect;
5333 } else if (rv == -EAGAIN) {
cb6518cb
LE
5334 /* If the data socket received something meanwhile,
5335 * that is good enough: peer is still alive. */
32862ec7
PR
5336 if (time_after(tconn->last_received,
5337 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 5338 continue;
f36af18c 5339 if (ping_timeout_active) {
32862ec7 5340 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
5341 goto reconnect;
5342 }
32862ec7 5343 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
5344 continue;
5345 } else if (rv == -EINTR) {
5346 continue;
5347 } else {
32862ec7 5348 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5349 goto reconnect;
5350 }
5351
5352 if (received == expect && cmd == NULL) {
e658983a 5353 if (decode_header(tconn, tconn->meta.rbuf, &pi))
b411b363 5354 goto reconnect;
7201b972 5355 cmd = &asender_tbl[pi.cmd];
1952e916 5356 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
2fcb8f30
AG
5357 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5358 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5359 goto disconnect;
5360 }
e658983a 5361 expect = header_size + cmd->pkt_size;
52b061a4 5362 if (pi.size != expect - header_size) {
32862ec7 5363 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5364 pi.cmd, pi.size);
b411b363 5365 goto reconnect;
257d0af6 5366 }
b411b363
PR
5367 }
5368 if (received == expect) {
2735a594 5369 bool err;
a4fbda8e 5370
2735a594
AG
5371 err = cmd->fn(tconn, &pi);
5372 if (err) {
1952e916 5373 conn_err(tconn, "%pf failed\n", cmd->fn);
b411b363 5374 goto reconnect;
1952e916 5375 }
b411b363 5376
a4fbda8e 5377 tconn->last_received = jiffies;
f36af18c 5378
44ed167d
PR
5379 if (cmd == &asender_tbl[P_PING_ACK]) {
5380 /* restore idle timeout */
5381 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5382 ping_timeout_active = false;
5383 }
f36af18c 5384
e658983a 5385 buf = tconn->meta.rbuf;
b411b363 5386 received = 0;
52b061a4 5387 expect = header_size;
b411b363
PR
5388 cmd = NULL;
5389 }
5390 }
5391
5392 if (0) {
5393reconnect:
bbeb641c 5394 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
19fffd7b 5395 conn_md_sync(tconn);
b411b363
PR
5396 }
5397 if (0) {
5398disconnect:
bbeb641c 5399 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5400 }
32862ec7 5401 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 5402
32862ec7 5403 conn_info(tconn, "asender terminated\n");
b411b363
PR
5404
5405 return 0;
5406}