]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/drbd/drbd_receiver.c
drbd: respect no-md-barriers setting also when changed online via disk-options
[mirror_ubuntu-bionic-kernel.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
77351055
PR
51struct packet_info {
52 enum drbd_packet cmd;
e2857216
AG
53 unsigned int size;
54 unsigned int vnr;
e658983a 55 void *data;
77351055
PR
56};
57
b411b363
PR
58enum finish_epoch {
59 FE_STILL_LIVE,
60 FE_DESTROYED,
61 FE_RECYCLED,
62};
63
6038178e 64static int drbd_do_features(struct drbd_tconn *tconn);
13e6037d 65static int drbd_do_auth(struct drbd_tconn *tconn);
c141ebda 66static int drbd_disconnected(struct drbd_conf *mdev);
b411b363 67
1e9dd291 68static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
99920dc5 69static int e_end_block(struct drbd_work *, int);
b411b363 70
b411b363
PR
71
72#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
45bb912b
LE
74/*
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
77 */
78
79/* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
82 */
83static struct page *page_chain_del(struct page **head, int n)
84{
85 struct page *page;
86 struct page *tmp;
87
88 BUG_ON(!n);
89 BUG_ON(!head);
90
91 page = *head;
23ce4227
PR
92
93 if (!page)
94 return NULL;
95
45bb912b
LE
96 while (page) {
97 tmp = page_chain_next(page);
98 if (--n == 0)
99 break; /* found sufficient pages */
100 if (tmp == NULL)
101 /* insufficient pages, don't use any of them. */
102 return NULL;
103 page = tmp;
104 }
105
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
109 page = *head;
110 *head = tmp;
111 return page;
112}
113
114/* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117static struct page *page_chain_tail(struct page *page, int *len)
118{
119 struct page *tmp;
120 int i = 1;
121 while ((tmp = page_chain_next(page)))
122 ++i, page = tmp;
123 if (len)
124 *len = i;
125 return page;
126}
127
128static int page_chain_free(struct page *page)
129{
130 struct page *tmp;
131 int i = 0;
132 page_chain_for_each_safe(page, tmp) {
133 put_page(page);
134 ++i;
135 }
136 return i;
137}
138
139static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
141{
142#if 1
143 struct page *tmp;
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
146#endif
147
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
150 *head = chain_first;
151}
152
18c2d522
AG
153static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 unsigned int number)
b411b363
PR
155{
156 struct page *page = NULL;
45bb912b 157 struct page *tmp = NULL;
18c2d522 158 unsigned int i = 0;
b411b363
PR
159
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
45bb912b 162 if (drbd_pp_vacant >= number) {
b411b363 163 spin_lock(&drbd_pp_lock);
45bb912b
LE
164 page = page_chain_del(&drbd_pp_pool, number);
165 if (page)
166 drbd_pp_vacant -= number;
b411b363 167 spin_unlock(&drbd_pp_lock);
45bb912b
LE
168 if (page)
169 return page;
b411b363 170 }
45bb912b 171
b411b363
PR
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
45bb912b
LE
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
177 if (!tmp)
178 break;
179 set_page_private(tmp, (unsigned long)page);
180 page = tmp;
181 }
182
183 if (i == number)
184 return page;
185
186 /* Not enough pages immediately available this time.
c37c8ecf 187 * No need to jump around here, drbd_alloc_pages will retry this
45bb912b
LE
188 * function "soon". */
189 if (page) {
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
193 drbd_pp_vacant += i;
194 spin_unlock(&drbd_pp_lock);
195 }
196 return NULL;
b411b363
PR
197}
198
a990be46
AG
199static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
b411b363 201{
db830c46 202 struct drbd_peer_request *peer_req;
b411b363
PR
203 struct list_head *le, *tle;
204
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
209
210 list_for_each_safe(le, tle, &mdev->net_ee) {
db830c46 211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
045417f7 212 if (drbd_peer_req_has_active_page(peer_req))
b411b363
PR
213 break;
214 list_move(le, to_be_freed);
215 }
216}
217
218static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219{
220 LIST_HEAD(reclaimed);
db830c46 221 struct drbd_peer_request *peer_req, *t;
b411b363 222
87eeee41 223 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
87eeee41 225 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 226
db830c46 227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 228 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
229}
230
231/**
c37c8ecf 232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
b411b363 233 * @mdev: DRBD device.
45bb912b
LE
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
236 *
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 240 *
45bb912b 241 * Returns a page chain linked via page->private.
b411b363 242 */
c37c8ecf
AG
243struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 bool retry)
b411b363
PR
245{
246 struct page *page = NULL;
44ed167d 247 struct net_conf *nc;
b411b363 248 DEFINE_WAIT(wait);
44ed167d 249 int mxb;
b411b363 250
45bb912b
LE
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
44ed167d
PR
253 rcu_read_lock();
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
256 rcu_read_unlock();
257
258 if (atomic_read(&mdev->pp_in_use) < mxb)
18c2d522 259 page = __drbd_alloc_pages(mdev, number);
b411b363 260
45bb912b 261 while (page == NULL) {
b411b363
PR
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264 drbd_kick_lo_and_reclaim_net(mdev);
265
44ed167d 266 if (atomic_read(&mdev->pp_in_use) < mxb) {
18c2d522 267 page = __drbd_alloc_pages(mdev, number);
b411b363
PR
268 if (page)
269 break;
270 }
271
272 if (!retry)
273 break;
274
275 if (signal_pending(current)) {
c37c8ecf 276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
b411b363
PR
277 break;
278 }
279
280 schedule();
281 }
282 finish_wait(&drbd_pp_wait, &wait);
283
45bb912b
LE
284 if (page)
285 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
286 return page;
287}
288
c37c8ecf 289/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
87eeee41 290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
45bb912b
LE
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
5cc287e0 293static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 294{
435f0740 295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 296 int i;
435f0740 297
a73ff323
LE
298 if (page == NULL)
299 return;
300
81a5d60e 301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
45bb912b
LE
302 i = page_chain_free(page);
303 else {
304 struct page *tmp;
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
308 drbd_pp_vacant += i;
309 spin_unlock(&drbd_pp_lock);
b411b363 310 }
435f0740 311 i = atomic_sub_return(i, a);
45bb912b 312 if (i < 0)
435f0740
LE
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
315 wake_up(&drbd_pp_wait);
316}
317
318/*
319You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
321
322You must not have the req_lock:
3967deb1 323 drbd_free_peer_req()
0db55363 324 drbd_alloc_peer_req()
7721f567 325 drbd_free_peer_reqs()
b411b363 326 drbd_ee_fix_bhs()
a990be46 327 drbd_finish_peer_reqs()
b411b363
PR
328 drbd_clear_done_ee()
329 drbd_wait_ee_list_empty()
330*/
331
f6ffca9f 332struct drbd_peer_request *
0db55363
AG
333drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
b411b363 335{
db830c46 336 struct drbd_peer_request *peer_req;
a73ff323 337 struct page *page = NULL;
45bb912b 338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363 339
0cf9d27e 340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
b411b363
PR
341 return NULL;
342
db830c46
AG
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 if (!peer_req) {
b411b363 345 if (!(gfp_mask & __GFP_NOWARN))
0db55363 346 dev_err(DEV, "%s: allocation failed\n", __func__);
b411b363
PR
347 return NULL;
348 }
349
a73ff323 350 if (data_size) {
81a3537a 351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
a73ff323
LE
352 if (!page)
353 goto fail;
354 }
b411b363 355
db830c46
AG
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
361
362 peer_req->epoch = NULL;
a21e9298 363 peer_req->w.mdev = mdev;
db830c46
AG
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
366 peer_req->flags = 0;
9a8e7753
AG
367 /*
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
370 */
db830c46 371 peer_req->block_id = id;
b411b363 372
db830c46 373 return peer_req;
b411b363 374
45bb912b 375 fail:
db830c46 376 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
377 return NULL;
378}
379
3967deb1 380void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
f6ffca9f 381 int is_net)
b411b363 382{
db830c46
AG
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
5cc287e0 385 drbd_free_pages(mdev, peer_req->pages, is_net);
db830c46
AG
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
b411b363
PR
389}
390
7721f567 391int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
b411b363
PR
392{
393 LIST_HEAD(work_list);
db830c46 394 struct drbd_peer_request *peer_req, *t;
b411b363 395 int count = 0;
435f0740 396 int is_net = list == &mdev->net_ee;
b411b363 397
87eeee41 398 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 399 list_splice_init(list, &work_list);
87eeee41 400 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 401
db830c46 402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
3967deb1 403 __drbd_free_peer_req(mdev, peer_req, is_net);
b411b363
PR
404 count++;
405 }
406 return count;
407}
408
b411b363 409/*
a990be46 410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
b411b363 411 */
a990be46 412static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
413{
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
db830c46 416 struct drbd_peer_request *peer_req, *t;
e2b3032b 417 int err = 0;
b411b363 418
87eeee41 419 spin_lock_irq(&mdev->tconn->req_lock);
a990be46 420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
b411b363 421 list_splice_init(&mdev->done_ee, &work_list);
87eeee41 422 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 423
db830c46 424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
3967deb1 425 drbd_free_net_peer_req(mdev, peer_req);
b411b363
PR
426
427 /* possible callbacks here:
d4dabbe2 428 * e_end_block, and e_end_resync_block, e_send_superseded.
b411b363
PR
429 * all ignore the last argument.
430 */
db830c46 431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
e2b3032b
AG
432 int err2;
433
b411b363 434 /* list_del not necessary, next/prev members not touched */
e2b3032b
AG
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
436 if (!err)
437 err = err2;
3967deb1 438 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
439 }
440 wake_up(&mdev->ee_wait);
441
e2b3032b 442 return err;
b411b363
PR
443}
444
d4da1537
AG
445static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
b411b363
PR
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
87eeee41 454 spin_unlock_irq(&mdev->tconn->req_lock);
7eaceacc 455 io_schedule();
b411b363 456 finish_wait(&mdev->ee_wait, &wait);
87eeee41 457 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
458 }
459}
460
d4da1537
AG
461static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
b411b363 463{
87eeee41 464 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 465 _drbd_wait_ee_list_empty(mdev, head);
87eeee41 466 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
467}
468
dbd9eea0 469static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
b411b363
PR
470{
471 mm_segment_t oldfs;
472 struct kvec iov = {
473 .iov_base = buf,
474 .iov_len = size,
475 };
476 struct msghdr msg = {
477 .msg_iovlen = 1,
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 };
481 int rv;
482
483 oldfs = get_fs();
484 set_fs(KERNEL_DS);
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 set_fs(oldfs);
487
488 return rv;
489}
490
de0ff338 491static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
b411b363 492{
b411b363
PR
493 int rv;
494
1393b59f 495 rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
b411b363 496
dbd0820c
PR
497 if (rv < 0) {
498 if (rv == -ECONNRESET)
155522df 499 conn_info(tconn, "sock was reset by peer\n");
dbd0820c 500 else if (rv != -ERESTARTSYS)
155522df 501 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
dbd0820c 502 } else if (rv == 0) {
b66623e3
PR
503 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 long t;
505 rcu_read_lock();
506 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 rcu_read_unlock();
508
509 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
599377ac
PR
511 if (t)
512 goto out;
513 }
b66623e3 514 conn_info(tconn, "sock was shut down by peer\n");
599377ac
PR
515 }
516
b411b363 517 if (rv != size)
bbeb641c 518 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 519
599377ac 520out:
b411b363
PR
521 return rv;
522}
523
c6967746
AG
524static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525{
526 int err;
527
528 err = drbd_recv(tconn, buf, size);
529 if (err != size) {
530 if (err >= 0)
531 err = -EIO;
532 } else
533 err = 0;
534 return err;
535}
536
a5c31904
AG
537static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538{
539 int err;
540
541 err = drbd_recv_all(tconn, buf, size);
542 if (err && !signal_pending(current))
543 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 return err;
545}
546
5dbf1673
LE
547/* quoting tcp(7):
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
551 */
552static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 unsigned int rcv)
554{
555 /* open coded SO_SNDBUF, SO_RCVBUF */
556 if (snd) {
557 sock->sk->sk_sndbuf = snd;
558 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 }
560 if (rcv) {
561 sock->sk->sk_rcvbuf = rcv;
562 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 }
564}
565
eac3e990 566static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
b411b363
PR
567{
568 const char *what;
569 struct socket *sock;
570 struct sockaddr_in6 src_in6;
44ed167d
PR
571 struct sockaddr_in6 peer_in6;
572 struct net_conf *nc;
573 int err, peer_addr_len, my_addr_len;
69ef82de 574 int sndbuf_size, rcvbuf_size, connect_int;
b411b363
PR
575 int disconnect_on_error = 1;
576
44ed167d
PR
577 rcu_read_lock();
578 nc = rcu_dereference(tconn->net_conf);
579 if (!nc) {
580 rcu_read_unlock();
b411b363 581 return NULL;
44ed167d 582 }
44ed167d
PR
583 sndbuf_size = nc->sndbuf_size;
584 rcvbuf_size = nc->rcvbuf_size;
69ef82de 585 connect_int = nc->connect_int;
089c075d 586 rcu_read_unlock();
44ed167d 587
089c075d
AG
588 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
44ed167d 590
089c075d 591 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
44ed167d
PR
592 src_in6.sin6_port = 0;
593 else
594 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
089c075d
AG
596 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
b411b363
PR
598
599 what = "sock_create_kern";
44ed167d
PR
600 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 SOCK_STREAM, IPPROTO_TCP, &sock);
b411b363
PR
602 if (err < 0) {
603 sock = NULL;
604 goto out;
605 }
606
607 sock->sk->sk_rcvtimeo =
69ef82de 608 sock->sk->sk_sndtimeo = connect_int * HZ;
44ed167d 609 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
b411b363
PR
610
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
617 */
b411b363 618 what = "bind before connect";
44ed167d 619 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
b411b363
PR
620 if (err < 0)
621 goto out;
622
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error = 0;
626 what = "connect";
44ed167d 627 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
b411b363
PR
628
629out:
630 if (err < 0) {
631 if (sock) {
632 sock_release(sock);
633 sock = NULL;
634 }
635 switch (-err) {
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 case EINTR: case ERESTARTSYS:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED: case ENETUNREACH:
641 case EHOSTDOWN: case EHOSTUNREACH:
642 disconnect_on_error = 0;
643 break;
644 default:
eac3e990 645 conn_err(tconn, "%s failed, err = %d\n", what, err);
b411b363
PR
646 }
647 if (disconnect_on_error)
bbeb641c 648 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 649 }
44ed167d 650
b411b363
PR
651 return sock;
652}
653
7a426fd8
PR
654struct accept_wait_data {
655 struct drbd_tconn *tconn;
656 struct socket *s_listen;
657 struct completion door_bell;
658 void (*original_sk_state_change)(struct sock *sk);
659
660};
661
715306f6 662static void drbd_incoming_connection(struct sock *sk)
7a426fd8
PR
663{
664 struct accept_wait_data *ad = sk->sk_user_data;
715306f6 665 void (*state_change)(struct sock *sk);
7a426fd8 666
715306f6
AG
667 state_change = ad->original_sk_state_change;
668 if (sk->sk_state == TCP_ESTABLISHED)
669 complete(&ad->door_bell);
670 state_change(sk);
7a426fd8
PR
671}
672
673static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 674{
1f3e509b 675 int err, sndbuf_size, rcvbuf_size, my_addr_len;
44ed167d 676 struct sockaddr_in6 my_addr;
1f3e509b 677 struct socket *s_listen;
44ed167d 678 struct net_conf *nc;
b411b363
PR
679 const char *what;
680
44ed167d
PR
681 rcu_read_lock();
682 nc = rcu_dereference(tconn->net_conf);
683 if (!nc) {
684 rcu_read_unlock();
7a426fd8 685 return -EIO;
44ed167d 686 }
44ed167d
PR
687 sndbuf_size = nc->sndbuf_size;
688 rcvbuf_size = nc->rcvbuf_size;
44ed167d 689 rcu_read_unlock();
b411b363 690
089c075d
AG
691 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
b411b363
PR
693
694 what = "sock_create_kern";
44ed167d 695 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
1f3e509b 696 SOCK_STREAM, IPPROTO_TCP, &s_listen);
b411b363
PR
697 if (err) {
698 s_listen = NULL;
699 goto out;
700 }
701
98683650 702 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
44ed167d 703 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
b411b363
PR
704
705 what = "bind before listen";
44ed167d 706 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
b411b363
PR
707 if (err < 0)
708 goto out;
709
7a426fd8
PR
710 ad->s_listen = s_listen;
711 write_lock_bh(&s_listen->sk->sk_callback_lock);
712 ad->original_sk_state_change = s_listen->sk->sk_state_change;
715306f6 713 s_listen->sk->sk_state_change = drbd_incoming_connection;
7a426fd8
PR
714 s_listen->sk->sk_user_data = ad;
715 write_unlock_bh(&s_listen->sk->sk_callback_lock);
b411b363 716
2820fd39
PR
717 what = "listen";
718 err = s_listen->ops->listen(s_listen, 5);
719 if (err < 0)
720 goto out;
721
7a426fd8 722 return 0;
b411b363
PR
723out:
724 if (s_listen)
725 sock_release(s_listen);
726 if (err < 0) {
727 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b
PR
728 conn_err(tconn, "%s failed, err = %d\n", what, err);
729 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
730 }
731 }
b411b363 732
7a426fd8 733 return -EIO;
b411b363
PR
734}
735
715306f6 736static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
b411b363 737{
715306f6
AG
738 write_lock_bh(&sk->sk_callback_lock);
739 sk->sk_state_change = ad->original_sk_state_change;
740 sk->sk_user_data = NULL;
741 write_unlock_bh(&sk->sk_callback_lock);
b411b363
PR
742}
743
7a426fd8 744static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
b411b363 745{
1f3e509b
PR
746 int timeo, connect_int, err = 0;
747 struct socket *s_estab = NULL;
1f3e509b
PR
748 struct net_conf *nc;
749
750 rcu_read_lock();
751 nc = rcu_dereference(tconn->net_conf);
752 if (!nc) {
753 rcu_read_unlock();
754 return NULL;
755 }
756 connect_int = nc->connect_int;
757 rcu_read_unlock();
758
759 timeo = connect_int * HZ;
760 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
761
7a426fd8
PR
762 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
763 if (err <= 0)
764 return NULL;
b411b363 765
7a426fd8 766 err = kernel_accept(ad->s_listen, &s_estab, 0);
b411b363
PR
767 if (err < 0) {
768 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
1f3e509b 769 conn_err(tconn, "accept failed, err = %d\n", err);
bbeb641c 770 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
771 }
772 }
b411b363 773
715306f6
AG
774 if (s_estab)
775 unregister_state_change(s_estab->sk, ad);
b411b363 776
b411b363
PR
777 return s_estab;
778}
b411b363 779
e658983a 780static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
b411b363 781
9f5bdc33
AG
782static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
783 enum drbd_packet cmd)
784{
785 if (!conn_prepare_command(tconn, sock))
786 return -EIO;
e658983a 787 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
b411b363
PR
788}
789
9f5bdc33 790static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
b411b363 791{
9f5bdc33
AG
792 unsigned int header_size = drbd_header_size(tconn);
793 struct packet_info pi;
794 int err;
b411b363 795
9f5bdc33
AG
796 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
797 if (err != header_size) {
798 if (err >= 0)
799 err = -EIO;
800 return err;
801 }
802 err = decode_header(tconn, tconn->data.rbuf, &pi);
803 if (err)
804 return err;
805 return pi.cmd;
b411b363
PR
806}
807
808/**
809 * drbd_socket_okay() - Free the socket if its connection is not okay
b411b363
PR
810 * @sock: pointer to the pointer to the socket.
811 */
dbd9eea0 812static int drbd_socket_okay(struct socket **sock)
b411b363
PR
813{
814 int rr;
815 char tb[4];
816
817 if (!*sock)
81e84650 818 return false;
b411b363 819
dbd9eea0 820 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
b411b363
PR
821
822 if (rr > 0 || rr == -EAGAIN) {
81e84650 823 return true;
b411b363
PR
824 } else {
825 sock_release(*sock);
826 *sock = NULL;
81e84650 827 return false;
b411b363
PR
828 }
829}
2325eb66
PR
830/* Gets called if a connection is established, or if a new minor gets created
831 in a connection */
c141ebda 832int drbd_connected(struct drbd_conf *mdev)
907599e0 833{
0829f5ed 834 int err;
907599e0
PR
835
836 atomic_set(&mdev->packet_seq, 0);
837 mdev->peer_seq = 0;
838
8410da8f
PR
839 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
840 &mdev->tconn->cstate_mutex :
841 &mdev->own_state_mutex;
842
0829f5ed
AG
843 err = drbd_send_sync_param(mdev);
844 if (!err)
845 err = drbd_send_sizes(mdev, 0, 0);
846 if (!err)
847 err = drbd_send_uuids(mdev);
848 if (!err)
43de7c85 849 err = drbd_send_current_state(mdev);
907599e0
PR
850 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
851 clear_bit(RESIZE_PENDING, &mdev->flags);
8b924f1d 852 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
0829f5ed 853 return err;
907599e0 854}
b411b363
PR
855
856/*
857 * return values:
858 * 1 yes, we have a valid connection
859 * 0 oops, did not work out, please try again
860 * -1 peer talks different language,
861 * no point in trying again, please go standalone.
862 * -2 We do not have a network config...
863 */
81fa2e67 864static int conn_connect(struct drbd_tconn *tconn)
b411b363 865{
7da35862 866 struct drbd_socket sock, msock;
c141ebda 867 struct drbd_conf *mdev;
44ed167d 868 struct net_conf *nc;
92f14951 869 int vnr, timeout, h, ok;
08b165ba 870 bool discard_my_data;
197296ff 871 enum drbd_state_rv rv;
7a426fd8
PR
872 struct accept_wait_data ad = {
873 .tconn = tconn,
874 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
875 };
b411b363 876
b66623e3 877 clear_bit(DISCONNECT_SENT, &tconn->flags);
bbeb641c 878 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
b411b363
PR
879 return -2;
880
7da35862
PR
881 mutex_init(&sock.mutex);
882 sock.sbuf = tconn->data.sbuf;
883 sock.rbuf = tconn->data.rbuf;
884 sock.socket = NULL;
885 mutex_init(&msock.mutex);
886 msock.sbuf = tconn->meta.sbuf;
887 msock.rbuf = tconn->meta.rbuf;
888 msock.socket = NULL;
889
0916e0e3
AG
890 /* Assume that the peer only understands protocol 80 until we know better. */
891 tconn->agreed_pro_version = 80;
b411b363 892
7a426fd8
PR
893 if (prepare_listen_socket(tconn, &ad))
894 return 0;
b411b363
PR
895
896 do {
2bf89621 897 struct socket *s;
b411b363 898
92f14951 899 s = drbd_try_connect(tconn);
b411b363 900 if (s) {
7da35862
PR
901 if (!sock.socket) {
902 sock.socket = s;
903 send_first_packet(tconn, &sock, P_INITIAL_DATA);
904 } else if (!msock.socket) {
427c0434 905 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862
PR
906 msock.socket = s;
907 send_first_packet(tconn, &msock, P_INITIAL_META);
b411b363 908 } else {
81fa2e67 909 conn_err(tconn, "Logic error in conn_connect()\n");
b411b363
PR
910 goto out_release_sockets;
911 }
912 }
913
7da35862
PR
914 if (sock.socket && msock.socket) {
915 rcu_read_lock();
916 nc = rcu_dereference(tconn->net_conf);
917 timeout = nc->ping_timeo * HZ / 10;
918 rcu_read_unlock();
919 schedule_timeout_interruptible(timeout);
920 ok = drbd_socket_okay(&sock.socket);
921 ok = drbd_socket_okay(&msock.socket) && ok;
b411b363
PR
922 if (ok)
923 break;
924 }
925
926retry:
7a426fd8 927 s = drbd_wait_for_connect(tconn, &ad);
b411b363 928 if (s) {
92f14951 929 int fp = receive_first_packet(tconn, s);
7da35862
PR
930 drbd_socket_okay(&sock.socket);
931 drbd_socket_okay(&msock.socket);
92f14951 932 switch (fp) {
e5d6f33a 933 case P_INITIAL_DATA:
7da35862 934 if (sock.socket) {
907599e0 935 conn_warn(tconn, "initial packet S crossed\n");
7da35862 936 sock_release(sock.socket);
80c6eed4
PR
937 sock.socket = s;
938 goto randomize;
b411b363 939 }
7da35862 940 sock.socket = s;
b411b363 941 break;
e5d6f33a 942 case P_INITIAL_META:
427c0434 943 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
7da35862 944 if (msock.socket) {
907599e0 945 conn_warn(tconn, "initial packet M crossed\n");
7da35862 946 sock_release(msock.socket);
80c6eed4
PR
947 msock.socket = s;
948 goto randomize;
b411b363 949 }
7da35862 950 msock.socket = s;
b411b363
PR
951 break;
952 default:
907599e0 953 conn_warn(tconn, "Error receiving initial packet\n");
b411b363 954 sock_release(s);
80c6eed4 955randomize:
b411b363
PR
956 if (random32() & 1)
957 goto retry;
958 }
959 }
960
bbeb641c 961 if (tconn->cstate <= C_DISCONNECTING)
b411b363
PR
962 goto out_release_sockets;
963 if (signal_pending(current)) {
964 flush_signals(current);
965 smp_rmb();
907599e0 966 if (get_t_state(&tconn->receiver) == EXITING)
b411b363
PR
967 goto out_release_sockets;
968 }
969
b666dbf8
PR
970 ok = drbd_socket_okay(&sock.socket);
971 ok = drbd_socket_okay(&msock.socket) && ok;
972 } while (!ok);
b411b363 973
7a426fd8
PR
974 if (ad.s_listen)
975 sock_release(ad.s_listen);
b411b363 976
98683650
PR
977 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
978 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
b411b363 979
7da35862
PR
980 sock.socket->sk->sk_allocation = GFP_NOIO;
981 msock.socket->sk->sk_allocation = GFP_NOIO;
b411b363 982
7da35862
PR
983 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
984 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
b411b363 985
b411b363 986 /* NOT YET ...
7da35862
PR
987 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
988 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
6038178e 989 * first set it to the P_CONNECTION_FEATURES timeout,
b411b363 990 * which we set to 4x the configured ping_timeout. */
44ed167d
PR
991 rcu_read_lock();
992 nc = rcu_dereference(tconn->net_conf);
993
7da35862
PR
994 sock.socket->sk->sk_sndtimeo =
995 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
b411b363 996
7da35862 997 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
44ed167d 998 timeout = nc->timeout * HZ / 10;
08b165ba 999 discard_my_data = nc->discard_my_data;
44ed167d 1000 rcu_read_unlock();
b411b363 1001
7da35862 1002 msock.socket->sk->sk_sndtimeo = timeout;
b411b363
PR
1003
1004 /* we don't want delays.
25985edc 1005 * we use TCP_CORK where appropriate, though */
7da35862
PR
1006 drbd_tcp_nodelay(sock.socket);
1007 drbd_tcp_nodelay(msock.socket);
b411b363 1008
7da35862
PR
1009 tconn->data.socket = sock.socket;
1010 tconn->meta.socket = msock.socket;
907599e0 1011 tconn->last_received = jiffies;
b411b363 1012
6038178e 1013 h = drbd_do_features(tconn);
b411b363
PR
1014 if (h <= 0)
1015 return h;
1016
907599e0 1017 if (tconn->cram_hmac_tfm) {
b411b363 1018 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
907599e0 1019 switch (drbd_do_auth(tconn)) {
b10d96cb 1020 case -1:
907599e0 1021 conn_err(tconn, "Authentication of peer failed\n");
b411b363 1022 return -1;
b10d96cb 1023 case 0:
907599e0 1024 conn_err(tconn, "Authentication of peer failed, trying again.\n");
b10d96cb 1025 return 0;
b411b363
PR
1026 }
1027 }
1028
7da35862
PR
1029 tconn->data.socket->sk->sk_sndtimeo = timeout;
1030 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
b411b363 1031
387eb308 1032 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
7e2455c1 1033 return -1;
b411b363 1034
a1096a6e
PR
1035 set_bit(STATE_SENT, &tconn->flags);
1036
c141ebda
PR
1037 rcu_read_lock();
1038 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1039 kref_get(&mdev->kref);
1040 rcu_read_unlock();
08b165ba
PR
1041
1042 if (discard_my_data)
1043 set_bit(DISCARD_MY_DATA, &mdev->flags);
1044 else
1045 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1046
c141ebda
PR
1047 drbd_connected(mdev);
1048 kref_put(&mdev->kref, &drbd_minor_destroy);
1049 rcu_read_lock();
1050 }
1051 rcu_read_unlock();
1052
a1096a6e 1053 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
ed635cb0 1054 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
a1096a6e 1055 clear_bit(STATE_SENT, &tconn->flags);
1e86ac48 1056 return 0;
a1096a6e 1057 }
1e86ac48 1058
823bd832 1059 drbd_thread_start(&tconn->asender);
b411b363 1060
08b165ba
PR
1061 mutex_lock(&tconn->conf_update);
1062 /* The discard_my_data flag is a single-shot modifier to the next
1063 * connection attempt, the handshake of which is now well underway.
1064 * No need for rcu style copying of the whole struct
1065 * just to clear a single value. */
1066 tconn->net_conf->discard_my_data = 0;
1067 mutex_unlock(&tconn->conf_update);
1068
d3fcb490 1069 return h;
b411b363
PR
1070
1071out_release_sockets:
7a426fd8
PR
1072 if (ad.s_listen)
1073 sock_release(ad.s_listen);
7da35862
PR
1074 if (sock.socket)
1075 sock_release(sock.socket);
1076 if (msock.socket)
1077 sock_release(msock.socket);
b411b363
PR
1078 return -1;
1079}
1080
e658983a 1081static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
b411b363 1082{
e658983a
AG
1083 unsigned int header_size = drbd_header_size(tconn);
1084
0c8e36d9
AG
1085 if (header_size == sizeof(struct p_header100) &&
1086 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1087 struct p_header100 *h = header;
1088 if (h->pad != 0) {
1089 conn_err(tconn, "Header padding is not zero\n");
1090 return -EINVAL;
1091 }
1092 pi->vnr = be16_to_cpu(h->volume);
1093 pi->cmd = be16_to_cpu(h->command);
1094 pi->size = be32_to_cpu(h->length);
1095 } else if (header_size == sizeof(struct p_header95) &&
1096 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
e658983a 1097 struct p_header95 *h = header;
e658983a 1098 pi->cmd = be16_to_cpu(h->command);
b55d84ba
AG
1099 pi->size = be32_to_cpu(h->length);
1100 pi->vnr = 0;
e658983a
AG
1101 } else if (header_size == sizeof(struct p_header80) &&
1102 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1103 struct p_header80 *h = header;
1104 pi->cmd = be16_to_cpu(h->command);
1105 pi->size = be16_to_cpu(h->length);
77351055 1106 pi->vnr = 0;
02918be2 1107 } else {
e658983a
AG
1108 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1109 be32_to_cpu(*(__be32 *)header),
1110 tconn->agreed_pro_version);
8172f3e9 1111 return -EINVAL;
b411b363 1112 }
e658983a 1113 pi->data = header + header_size;
8172f3e9 1114 return 0;
257d0af6 1115}
b411b363 1116
9ba7aa00 1117static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
257d0af6 1118{
e658983a 1119 void *buffer = tconn->data.rbuf;
69bc7bc3 1120 int err;
257d0af6 1121
e658983a 1122 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
a5c31904 1123 if (err)
69bc7bc3 1124 return err;
257d0af6 1125
e658983a 1126 err = decode_header(tconn, buffer, pi);
9ba7aa00 1127 tconn->last_received = jiffies;
b411b363 1128
69bc7bc3 1129 return err;
b411b363
PR
1130}
1131
4b0007c0 1132static void drbd_flush(struct drbd_tconn *tconn)
b411b363
PR
1133{
1134 int rv;
4b0007c0
PR
1135 struct drbd_conf *mdev;
1136 int vnr;
1137
1138 if (tconn->write_ordering >= WO_bdev_flush) {
615e087f 1139 rcu_read_lock();
4b0007c0 1140 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
615e087f
LE
1141 if (!get_ldev(mdev))
1142 continue;
1143 kref_get(&mdev->kref);
1144 rcu_read_unlock();
1145
1146 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1147 GFP_NOIO, NULL);
1148 if (rv) {
1149 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1150 /* would rather check on EOPNOTSUPP, but that is not reliable.
1151 * don't try again for ANY return value != 0
1152 * if (rv == -EOPNOTSUPP) */
1153 drbd_bump_write_ordering(tconn, WO_drain_io);
4b0007c0 1154 }
615e087f
LE
1155 put_ldev(mdev);
1156 kref_put(&mdev->kref, &drbd_minor_destroy);
b411b363 1157
615e087f
LE
1158 rcu_read_lock();
1159 if (rv)
1160 break;
b411b363 1161 }
615e087f 1162 rcu_read_unlock();
b411b363 1163 }
b411b363
PR
1164}
1165
1166/**
1167 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1168 * @mdev: DRBD device.
1169 * @epoch: Epoch object.
1170 * @ev: Epoch event.
1171 */
1e9dd291 1172static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
b411b363
PR
1173 struct drbd_epoch *epoch,
1174 enum epoch_event ev)
1175{
2451fc3b 1176 int epoch_size;
b411b363 1177 struct drbd_epoch *next_epoch;
b411b363
PR
1178 enum finish_epoch rv = FE_STILL_LIVE;
1179
12038a3a 1180 spin_lock(&tconn->epoch_lock);
b411b363
PR
1181 do {
1182 next_epoch = NULL;
b411b363
PR
1183
1184 epoch_size = atomic_read(&epoch->epoch_size);
1185
1186 switch (ev & ~EV_CLEANUP) {
1187 case EV_PUT:
1188 atomic_dec(&epoch->active);
1189 break;
1190 case EV_GOT_BARRIER_NR:
1191 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1192 break;
1193 case EV_BECAME_LAST:
1194 /* nothing to do*/
1195 break;
1196 }
1197
b411b363
PR
1198 if (epoch_size != 0 &&
1199 atomic_read(&epoch->active) == 0 &&
80f9fd55 1200 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
b411b363 1201 if (!(ev & EV_CLEANUP)) {
12038a3a 1202 spin_unlock(&tconn->epoch_lock);
9ed57dcb 1203 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
12038a3a 1204 spin_lock(&tconn->epoch_lock);
b411b363 1205 }
9ed57dcb
LE
1206#if 0
1207 /* FIXME: dec unacked on connection, once we have
1208 * something to count pending connection packets in. */
80f9fd55 1209 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
9ed57dcb
LE
1210 dec_unacked(epoch->tconn);
1211#endif
b411b363 1212
12038a3a 1213 if (tconn->current_epoch != epoch) {
b411b363
PR
1214 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1215 list_del(&epoch->list);
1216 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
12038a3a 1217 tconn->epochs--;
b411b363
PR
1218 kfree(epoch);
1219
1220 if (rv == FE_STILL_LIVE)
1221 rv = FE_DESTROYED;
1222 } else {
1223 epoch->flags = 0;
1224 atomic_set(&epoch->epoch_size, 0);
698f9315 1225 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1226 if (rv == FE_STILL_LIVE)
1227 rv = FE_RECYCLED;
1228 }
1229 }
1230
1231 if (!next_epoch)
1232 break;
1233
1234 epoch = next_epoch;
1235 } while (1);
1236
12038a3a 1237 spin_unlock(&tconn->epoch_lock);
b411b363 1238
b411b363
PR
1239 return rv;
1240}
1241
1242/**
1243 * drbd_bump_write_ordering() - Fall back to an other write ordering method
4b0007c0 1244 * @tconn: DRBD connection.
b411b363
PR
1245 * @wo: Write ordering method to try.
1246 */
4b0007c0 1247void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
b411b363 1248{
daeda1cc 1249 struct disk_conf *dc;
4b0007c0 1250 struct drbd_conf *mdev;
b411b363 1251 enum write_ordering_e pwo;
4b0007c0 1252 int vnr;
b411b363
PR
1253 static char *write_ordering_str[] = {
1254 [WO_none] = "none",
1255 [WO_drain_io] = "drain",
1256 [WO_bdev_flush] = "flush",
b411b363
PR
1257 };
1258
4b0007c0 1259 pwo = tconn->write_ordering;
b411b363 1260 wo = min(pwo, wo);
daeda1cc 1261 rcu_read_lock();
4b0007c0 1262 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
27eb13e9 1263 if (!get_ldev_if_state(mdev, D_ATTACHING))
4b0007c0
PR
1264 continue;
1265 dc = rcu_dereference(mdev->ldev->disk_conf);
1266
1267 if (wo == WO_bdev_flush && !dc->disk_flushes)
1268 wo = WO_drain_io;
1269 if (wo == WO_drain_io && !dc->disk_drain)
1270 wo = WO_none;
1271 put_ldev(mdev);
1272 }
daeda1cc 1273 rcu_read_unlock();
4b0007c0
PR
1274 tconn->write_ordering = wo;
1275 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1276 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
b411b363
PR
1277}
1278
45bb912b 1279/**
fbe29dec 1280 * drbd_submit_peer_request()
45bb912b 1281 * @mdev: DRBD device.
db830c46 1282 * @peer_req: peer request
45bb912b 1283 * @rw: flag field, see bio->bi_rw
10f6d992
LE
1284 *
1285 * May spread the pages to multiple bios,
1286 * depending on bio_add_page restrictions.
1287 *
1288 * Returns 0 if all bios have been submitted,
1289 * -ENOMEM if we could not allocate enough bios,
1290 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1291 * single page to an empty bio (which should never happen and likely indicates
1292 * that the lower level IO stack is in some way broken). This has been observed
1293 * on certain Xen deployments.
45bb912b
LE
1294 */
1295/* TODO allocate from our own bio_set. */
fbe29dec
AG
1296int drbd_submit_peer_request(struct drbd_conf *mdev,
1297 struct drbd_peer_request *peer_req,
1298 const unsigned rw, const int fault_type)
45bb912b
LE
1299{
1300 struct bio *bios = NULL;
1301 struct bio *bio;
db830c46
AG
1302 struct page *page = peer_req->pages;
1303 sector_t sector = peer_req->i.sector;
1304 unsigned ds = peer_req->i.size;
45bb912b
LE
1305 unsigned n_bios = 0;
1306 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
10f6d992 1307 int err = -ENOMEM;
45bb912b
LE
1308
1309 /* In most cases, we will only need one bio. But in case the lower
1310 * level restrictions happen to be different at this offset on this
1311 * side than those of the sending peer, we may need to submit the
9476f39d
LE
1312 * request in more than one bio.
1313 *
1314 * Plain bio_alloc is good enough here, this is no DRBD internally
1315 * generated bio, but a bio allocated on behalf of the peer.
1316 */
45bb912b
LE
1317next_bio:
1318 bio = bio_alloc(GFP_NOIO, nr_pages);
1319 if (!bio) {
1320 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1321 goto fail;
1322 }
db830c46 1323 /* > peer_req->i.sector, unless this is the first bio */
45bb912b
LE
1324 bio->bi_sector = sector;
1325 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b 1326 bio->bi_rw = rw;
db830c46 1327 bio->bi_private = peer_req;
fcefa62e 1328 bio->bi_end_io = drbd_peer_request_endio;
45bb912b
LE
1329
1330 bio->bi_next = bios;
1331 bios = bio;
1332 ++n_bios;
1333
1334 page_chain_for_each(page) {
1335 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1336 if (!bio_add_page(bio, page, len, 0)) {
10f6d992
LE
1337 /* A single page must always be possible!
1338 * But in case it fails anyways,
1339 * we deal with it, and complain (below). */
1340 if (bio->bi_vcnt == 0) {
1341 dev_err(DEV,
1342 "bio_add_page failed for len=%u, "
1343 "bi_vcnt=0 (bi_sector=%llu)\n",
1344 len, (unsigned long long)bio->bi_sector);
1345 err = -ENOSPC;
1346 goto fail;
1347 }
45bb912b
LE
1348 goto next_bio;
1349 }
1350 ds -= len;
1351 sector += len >> 9;
1352 --nr_pages;
1353 }
1354 D_ASSERT(page == NULL);
1355 D_ASSERT(ds == 0);
1356
db830c46 1357 atomic_set(&peer_req->pending_bios, n_bios);
45bb912b
LE
1358 do {
1359 bio = bios;
1360 bios = bios->bi_next;
1361 bio->bi_next = NULL;
1362
45bb912b 1363 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1364 } while (bios);
45bb912b
LE
1365 return 0;
1366
1367fail:
1368 while (bios) {
1369 bio = bios;
1370 bios = bios->bi_next;
1371 bio_put(bio);
1372 }
10f6d992 1373 return err;
45bb912b
LE
1374}
1375
53840641 1376static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
db830c46 1377 struct drbd_peer_request *peer_req)
53840641 1378{
db830c46 1379 struct drbd_interval *i = &peer_req->i;
53840641
AG
1380
1381 drbd_remove_interval(&mdev->write_requests, i);
1382 drbd_clear_interval(i);
1383
6c852bec 1384 /* Wake up any processes waiting for this peer request to complete. */
53840641
AG
1385 if (i->waiting)
1386 wake_up(&mdev->misc_wait);
1387}
1388
77fede51
PR
1389void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1390{
1391 struct drbd_conf *mdev;
1392 int vnr;
1393
1394 rcu_read_lock();
1395 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1396 kref_get(&mdev->kref);
1397 rcu_read_unlock();
1398 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1399 kref_put(&mdev->kref, &drbd_minor_destroy);
1400 rcu_read_lock();
1401 }
1402 rcu_read_unlock();
1403}
1404
4a76b161 1405static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1406{
2451fc3b 1407 int rv;
e658983a 1408 struct p_barrier *p = pi->data;
b411b363
PR
1409 struct drbd_epoch *epoch;
1410
9ed57dcb
LE
1411 /* FIXME these are unacked on connection,
1412 * not a specific (peer)device.
1413 */
12038a3a 1414 tconn->current_epoch->barrier_nr = p->barrier;
9ed57dcb 1415 tconn->current_epoch->tconn = tconn;
1e9dd291 1416 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
b411b363
PR
1417
1418 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1419 * the activity log, which means it would not be resynced in case the
1420 * R_PRIMARY crashes now.
1421 * Therefore we must send the barrier_ack after the barrier request was
1422 * completed. */
4b0007c0 1423 switch (tconn->write_ordering) {
b411b363
PR
1424 case WO_none:
1425 if (rv == FE_RECYCLED)
82bc0194 1426 return 0;
2451fc3b
PR
1427
1428 /* receiver context, in the writeout path of the other node.
1429 * avoid potential distributed deadlock */
1430 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1431 if (epoch)
1432 break;
1433 else
9ed57dcb 1434 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
2451fc3b 1435 /* Fall through */
b411b363
PR
1436
1437 case WO_bdev_flush:
1438 case WO_drain_io:
77fede51 1439 conn_wait_active_ee_empty(tconn);
4b0007c0 1440 drbd_flush(tconn);
2451fc3b 1441
12038a3a 1442 if (atomic_read(&tconn->current_epoch->epoch_size)) {
2451fc3b
PR
1443 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1444 if (epoch)
1445 break;
b411b363
PR
1446 }
1447
82bc0194 1448 return 0;
2451fc3b 1449 default:
9ed57dcb 1450 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
82bc0194 1451 return -EIO;
b411b363
PR
1452 }
1453
1454 epoch->flags = 0;
1455 atomic_set(&epoch->epoch_size, 0);
1456 atomic_set(&epoch->active, 0);
1457
12038a3a
PR
1458 spin_lock(&tconn->epoch_lock);
1459 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1460 list_add(&epoch->list, &tconn->current_epoch->list);
1461 tconn->current_epoch = epoch;
1462 tconn->epochs++;
b411b363
PR
1463 } else {
1464 /* The current_epoch got recycled while we allocated this one... */
1465 kfree(epoch);
1466 }
12038a3a 1467 spin_unlock(&tconn->epoch_lock);
b411b363 1468
82bc0194 1469 return 0;
b411b363
PR
1470}
1471
1472/* used from receive_RSDataReply (recv_resync_read)
1473 * and from receive_Data */
f6ffca9f
AG
1474static struct drbd_peer_request *
1475read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1476 int data_size) __must_hold(local)
b411b363 1477{
6666032a 1478 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
db830c46 1479 struct drbd_peer_request *peer_req;
b411b363 1480 struct page *page;
a5c31904 1481 int dgs, ds, err;
a0638456
PR
1482 void *dig_in = mdev->tconn->int_dig_in;
1483 void *dig_vv = mdev->tconn->int_dig_vv;
6b4388ac 1484 unsigned long *data;
b411b363 1485
88104ca4
AG
1486 dgs = 0;
1487 if (mdev->tconn->peer_integrity_tfm) {
1488 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
9f5bdc33
AG
1489 /*
1490 * FIXME: Receive the incoming digest into the receive buffer
1491 * here, together with its struct p_data?
1492 */
a5c31904
AG
1493 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1494 if (err)
b411b363 1495 return NULL;
88104ca4 1496 data_size -= dgs;
b411b363
PR
1497 }
1498
841ce241
AG
1499 if (!expect(IS_ALIGNED(data_size, 512)))
1500 return NULL;
1501 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1502 return NULL;
b411b363 1503
6666032a
LE
1504 /* even though we trust out peer,
1505 * we sometimes have to double check. */
1506 if (sector + (data_size>>9) > capacity) {
fdda6544
LE
1507 dev_err(DEV, "request from peer beyond end of local disk: "
1508 "capacity: %llus < sector: %llus + size: %u\n",
6666032a
LE
1509 (unsigned long long)capacity,
1510 (unsigned long long)sector, data_size);
1511 return NULL;
1512 }
1513
b411b363
PR
1514 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1515 * "criss-cross" setup, that might cause write-out on some other DRBD,
1516 * which in turn might block on the other node at this very place. */
0db55363 1517 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
db830c46 1518 if (!peer_req)
b411b363 1519 return NULL;
45bb912b 1520
a73ff323 1521 if (!data_size)
81a3537a 1522 return peer_req;
a73ff323 1523
b411b363 1524 ds = data_size;
db830c46 1525 page = peer_req->pages;
45bb912b
LE
1526 page_chain_for_each(page) {
1527 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1528 data = kmap(page);
a5c31904 1529 err = drbd_recv_all_warn(mdev->tconn, data, len);
0cf9d27e 1530 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
6b4388ac
PR
1531 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1532 data[0] = data[0] ^ (unsigned long)-1;
1533 }
b411b363 1534 kunmap(page);
a5c31904 1535 if (err) {
3967deb1 1536 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1537 return NULL;
1538 }
a5c31904 1539 ds -= len;
b411b363
PR
1540 }
1541
1542 if (dgs) {
5b614abe 1543 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
b411b363 1544 if (memcmp(dig_in, dig_vv, dgs)) {
470be44a
LE
1545 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1546 (unsigned long long)sector, data_size);
3967deb1 1547 drbd_free_peer_req(mdev, peer_req);
b411b363
PR
1548 return NULL;
1549 }
1550 }
1551 mdev->recv_cnt += data_size>>9;
db830c46 1552 return peer_req;
b411b363
PR
1553}
1554
1555/* drbd_drain_block() just takes a data block
1556 * out of the socket input buffer, and discards it.
1557 */
1558static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1559{
1560 struct page *page;
a5c31904 1561 int err = 0;
b411b363
PR
1562 void *data;
1563
c3470cde 1564 if (!data_size)
fc5be839 1565 return 0;
c3470cde 1566
c37c8ecf 1567 page = drbd_alloc_pages(mdev, 1, 1);
b411b363
PR
1568
1569 data = kmap(page);
1570 while (data_size) {
fc5be839
AG
1571 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1572
a5c31904
AG
1573 err = drbd_recv_all_warn(mdev->tconn, data, len);
1574 if (err)
b411b363 1575 break;
a5c31904 1576 data_size -= len;
b411b363
PR
1577 }
1578 kunmap(page);
5cc287e0 1579 drbd_free_pages(mdev, page, 0);
fc5be839 1580 return err;
b411b363
PR
1581}
1582
1583static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1584 sector_t sector, int data_size)
1585{
1586 struct bio_vec *bvec;
1587 struct bio *bio;
a5c31904 1588 int dgs, err, i, expect;
a0638456
PR
1589 void *dig_in = mdev->tconn->int_dig_in;
1590 void *dig_vv = mdev->tconn->int_dig_vv;
b411b363 1591
88104ca4
AG
1592 dgs = 0;
1593 if (mdev->tconn->peer_integrity_tfm) {
1594 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a5c31904
AG
1595 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1596 if (err)
1597 return err;
88104ca4 1598 data_size -= dgs;
b411b363
PR
1599 }
1600
b411b363
PR
1601 /* optimistically update recv_cnt. if receiving fails below,
1602 * we disconnect anyways, and counters will be reset. */
1603 mdev->recv_cnt += data_size>>9;
1604
1605 bio = req->master_bio;
1606 D_ASSERT(sector == bio->bi_sector);
1607
1608 bio_for_each_segment(bvec, bio, i) {
a5c31904 1609 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
b411b363 1610 expect = min_t(int, data_size, bvec->bv_len);
a5c31904 1611 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
b411b363 1612 kunmap(bvec->bv_page);
a5c31904
AG
1613 if (err)
1614 return err;
1615 data_size -= expect;
b411b363
PR
1616 }
1617
1618 if (dgs) {
5b614abe 1619 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
b411b363
PR
1620 if (memcmp(dig_in, dig_vv, dgs)) {
1621 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
28284cef 1622 return -EINVAL;
b411b363
PR
1623 }
1624 }
1625
1626 D_ASSERT(data_size == 0);
28284cef 1627 return 0;
b411b363
PR
1628}
1629
a990be46
AG
1630/*
1631 * e_end_resync_block() is called in asender context via
1632 * drbd_finish_peer_reqs().
1633 */
99920dc5 1634static int e_end_resync_block(struct drbd_work *w, int unused)
b411b363 1635{
8050e6d0
AG
1636 struct drbd_peer_request *peer_req =
1637 container_of(w, struct drbd_peer_request, w);
00d56944 1638 struct drbd_conf *mdev = w->mdev;
db830c46 1639 sector_t sector = peer_req->i.sector;
99920dc5 1640 int err;
b411b363 1641
db830c46 1642 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1643
db830c46
AG
1644 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1645 drbd_set_in_sync(mdev, sector, peer_req->i.size);
99920dc5 1646 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
b411b363
PR
1647 } else {
1648 /* Record failure to sync */
db830c46 1649 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
b411b363 1650
99920dc5 1651 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1652 }
1653 dec_unacked(mdev);
1654
99920dc5 1655 return err;
b411b363
PR
1656}
1657
1658static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1659{
db830c46 1660 struct drbd_peer_request *peer_req;
b411b363 1661
db830c46
AG
1662 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1663 if (!peer_req)
45bb912b 1664 goto fail;
b411b363
PR
1665
1666 dec_rs_pending(mdev);
1667
b411b363
PR
1668 inc_unacked(mdev);
1669 /* corresponding dec_unacked() in e_end_resync_block()
1670 * respective _drbd_clear_done_ee */
1671
db830c46 1672 peer_req->w.cb = e_end_resync_block;
45bb912b 1673
87eeee41 1674 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1675 list_add(&peer_req->w.list, &mdev->sync_ee);
87eeee41 1676 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 1677
0f0601f4 1678 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
fbe29dec 1679 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
e1c1b0fc 1680 return 0;
b411b363 1681
10f6d992
LE
1682 /* don't care for the reason here */
1683 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 1684 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 1685 list_del(&peer_req->w.list);
87eeee41 1686 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9 1687
3967deb1 1688 drbd_free_peer_req(mdev, peer_req);
45bb912b
LE
1689fail:
1690 put_ldev(mdev);
e1c1b0fc 1691 return -EIO;
b411b363
PR
1692}
1693
668eebc6 1694static struct drbd_request *
bc9c5c41
AG
1695find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1696 sector_t sector, bool missing_ok, const char *func)
51624585 1697{
51624585
AG
1698 struct drbd_request *req;
1699
bc9c5c41
AG
1700 /* Request object according to our peer */
1701 req = (struct drbd_request *)(unsigned long)id;
5e472264 1702 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
668eebc6 1703 return req;
c3afd8f5 1704 if (!missing_ok) {
5af172ed 1705 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
c3afd8f5
AG
1706 (unsigned long)id, (unsigned long long)sector);
1707 }
51624585 1708 return NULL;
b411b363
PR
1709}
1710
4a76b161 1711static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1712{
4a76b161 1713 struct drbd_conf *mdev;
b411b363
PR
1714 struct drbd_request *req;
1715 sector_t sector;
82bc0194 1716 int err;
e658983a 1717 struct p_data *p = pi->data;
4a76b161
AG
1718
1719 mdev = vnr_to_mdev(tconn, pi->vnr);
1720 if (!mdev)
1721 return -EIO;
b411b363
PR
1722
1723 sector = be64_to_cpu(p->sector);
1724
87eeee41 1725 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 1726 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
87eeee41 1727 spin_unlock_irq(&mdev->tconn->req_lock);
c3afd8f5 1728 if (unlikely(!req))
82bc0194 1729 return -EIO;
b411b363 1730
24c4830c 1731 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
b411b363
PR
1732 * special casing it there for the various failure cases.
1733 * still no race with drbd_fail_pending_reads */
e2857216 1734 err = recv_dless_read(mdev, req, sector, pi->size);
82bc0194 1735 if (!err)
8554df1c 1736 req_mod(req, DATA_RECEIVED);
b411b363
PR
1737 /* else: nothing. handled from drbd_disconnect...
1738 * I don't think we may complete this just yet
1739 * in case we are "on-disconnect: freeze" */
1740
82bc0194 1741 return err;
b411b363
PR
1742}
1743
4a76b161 1744static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 1745{
4a76b161 1746 struct drbd_conf *mdev;
b411b363 1747 sector_t sector;
82bc0194 1748 int err;
e658983a 1749 struct p_data *p = pi->data;
4a76b161
AG
1750
1751 mdev = vnr_to_mdev(tconn, pi->vnr);
1752 if (!mdev)
1753 return -EIO;
b411b363
PR
1754
1755 sector = be64_to_cpu(p->sector);
1756 D_ASSERT(p->block_id == ID_SYNCER);
1757
1758 if (get_ldev(mdev)) {
1759 /* data is submitted to disk within recv_resync_read.
1760 * corresponding put_ldev done below on error,
fcefa62e 1761 * or in drbd_peer_request_endio. */
e2857216 1762 err = recv_resync_read(mdev, sector, pi->size);
b411b363
PR
1763 } else {
1764 if (__ratelimit(&drbd_ratelimit_state))
1765 dev_err(DEV, "Can not write resync data to local disk.\n");
1766
e2857216 1767 err = drbd_drain_block(mdev, pi->size);
b411b363 1768
e2857216 1769 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
b411b363
PR
1770 }
1771
e2857216 1772 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
778f271d 1773
82bc0194 1774 return err;
b411b363
PR
1775}
1776
7be8da07
AG
1777static void restart_conflicting_writes(struct drbd_conf *mdev,
1778 sector_t sector, int size)
b411b363 1779{
7be8da07
AG
1780 struct drbd_interval *i;
1781 struct drbd_request *req;
1782
1783 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1784 if (!i->local)
1785 continue;
1786 req = container_of(i, struct drbd_request, i);
1787 if (req->rq_state & RQ_LOCAL_PENDING ||
1788 !(req->rq_state & RQ_POSTPONED))
1789 continue;
2312f0b3
LE
1790 /* as it is RQ_POSTPONED, this will cause it to
1791 * be queued on the retry workqueue. */
d4dabbe2 1792 __req_mod(req, CONFLICT_RESOLVED, NULL);
7be8da07
AG
1793 }
1794}
b411b363 1795
a990be46
AG
1796/*
1797 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
b411b363 1798 */
99920dc5 1799static int e_end_block(struct drbd_work *w, int cancel)
b411b363 1800{
8050e6d0
AG
1801 struct drbd_peer_request *peer_req =
1802 container_of(w, struct drbd_peer_request, w);
00d56944 1803 struct drbd_conf *mdev = w->mdev;
db830c46 1804 sector_t sector = peer_req->i.sector;
99920dc5 1805 int err = 0, pcmd;
b411b363 1806
303d1448 1807 if (peer_req->flags & EE_SEND_WRITE_ACK) {
db830c46 1808 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1809 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1810 mdev->state.conn <= C_PAUSED_SYNC_T &&
db830c46 1811 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
b411b363 1812 P_RS_WRITE_ACK : P_WRITE_ACK;
99920dc5 1813 err = drbd_send_ack(mdev, pcmd, peer_req);
b411b363 1814 if (pcmd == P_RS_WRITE_ACK)
db830c46 1815 drbd_set_in_sync(mdev, sector, peer_req->i.size);
b411b363 1816 } else {
99920dc5 1817 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
b411b363
PR
1818 /* we expect it to be marked out of sync anyways...
1819 * maybe assert this? */
1820 }
1821 dec_unacked(mdev);
1822 }
1823 /* we delete from the conflict detection hash _after_ we sent out the
1824 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
302bdeae 1825 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
87eeee41 1826 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
1827 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1828 drbd_remove_epoch_entry_interval(mdev, peer_req);
7be8da07
AG
1829 if (peer_req->flags & EE_RESTART_REQUESTS)
1830 restart_conflicting_writes(mdev, sector, peer_req->i.size);
87eeee41 1831 spin_unlock_irq(&mdev->tconn->req_lock);
bb3bfe96 1832 } else
db830c46 1833 D_ASSERT(drbd_interval_empty(&peer_req->i));
b411b363 1834
1e9dd291 1835 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
b411b363 1836
99920dc5 1837 return err;
b411b363
PR
1838}
1839
7be8da07 1840static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
b411b363 1841{
7be8da07 1842 struct drbd_conf *mdev = w->mdev;
8050e6d0
AG
1843 struct drbd_peer_request *peer_req =
1844 container_of(w, struct drbd_peer_request, w);
99920dc5 1845 int err;
b411b363 1846
99920dc5 1847 err = drbd_send_ack(mdev, ack, peer_req);
b411b363
PR
1848 dec_unacked(mdev);
1849
99920dc5 1850 return err;
b411b363
PR
1851}
1852
d4dabbe2 1853static int e_send_superseded(struct drbd_work *w, int unused)
7be8da07 1854{
d4dabbe2 1855 return e_send_ack(w, P_SUPERSEDED);
7be8da07
AG
1856}
1857
99920dc5 1858static int e_send_retry_write(struct drbd_work *w, int unused)
7be8da07
AG
1859{
1860 struct drbd_tconn *tconn = w->mdev->tconn;
1861
1862 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
d4dabbe2 1863 P_RETRY_WRITE : P_SUPERSEDED);
7be8da07 1864}
b411b363 1865
3e394da1
AG
1866static bool seq_greater(u32 a, u32 b)
1867{
1868 /*
1869 * We assume 32-bit wrap-around here.
1870 * For 24-bit wrap-around, we would have to shift:
1871 * a <<= 8; b <<= 8;
1872 */
1873 return (s32)a - (s32)b > 0;
1874}
b411b363 1875
3e394da1
AG
1876static u32 seq_max(u32 a, u32 b)
1877{
1878 return seq_greater(a, b) ? a : b;
b411b363
PR
1879}
1880
7be8da07 1881static bool need_peer_seq(struct drbd_conf *mdev)
b411b363 1882{
7be8da07 1883 struct drbd_tconn *tconn = mdev->tconn;
302bdeae 1884 int tp;
b411b363 1885
7be8da07
AG
1886 /*
1887 * We only need to keep track of the last packet_seq number of our peer
427c0434 1888 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
7be8da07
AG
1889 * handle_write_conflicts().
1890 */
b411b363 1891
302bdeae
PR
1892 rcu_read_lock();
1893 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1894 rcu_read_unlock();
b411b363 1895
427c0434 1896 return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07 1897}
b411b363 1898
43ae077d 1899static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
3e394da1 1900{
3c13b680 1901 unsigned int newest_peer_seq;
3e394da1 1902
7be8da07
AG
1903 if (need_peer_seq(mdev)) {
1904 spin_lock(&mdev->peer_seq_lock);
3c13b680
LE
1905 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1906 mdev->peer_seq = newest_peer_seq;
7be8da07 1907 spin_unlock(&mdev->peer_seq_lock);
3c13b680
LE
1908 /* wake up only if we actually changed mdev->peer_seq */
1909 if (peer_seq == newest_peer_seq)
7be8da07
AG
1910 wake_up(&mdev->seq_wait);
1911 }
b411b363
PR
1912}
1913
d93f6302 1914static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
b6a370ba 1915{
d93f6302
LE
1916 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1917}
b6a370ba 1918
d93f6302 1919/* maybe change sync_ee into interval trees as well? */
3ea35df8 1920static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
d93f6302
LE
1921{
1922 struct drbd_peer_request *rs_req;
b6a370ba
PR
1923 bool rv = 0;
1924
d93f6302
LE
1925 spin_lock_irq(&mdev->tconn->req_lock);
1926 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1927 if (overlaps(peer_req->i.sector, peer_req->i.size,
1928 rs_req->i.sector, rs_req->i.size)) {
b6a370ba
PR
1929 rv = 1;
1930 break;
1931 }
1932 }
d93f6302 1933 spin_unlock_irq(&mdev->tconn->req_lock);
b6a370ba
PR
1934
1935 return rv;
1936}
1937
b411b363
PR
1938/* Called from receive_Data.
1939 * Synchronize packets on sock with packets on msock.
1940 *
1941 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1942 * packet traveling on msock, they are still processed in the order they have
1943 * been sent.
1944 *
1945 * Note: we don't care for Ack packets overtaking P_DATA packets.
1946 *
1947 * In case packet_seq is larger than mdev->peer_seq number, there are
1948 * outstanding packets on the msock. We wait for them to arrive.
1949 * In case we are the logically next packet, we update mdev->peer_seq
1950 * ourselves. Correctly handles 32bit wrap around.
1951 *
1952 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1953 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1954 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1955 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1956 *
1957 * returns 0 if we may process the packet,
1958 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
7be8da07 1959static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
b411b363
PR
1960{
1961 DEFINE_WAIT(wait);
b411b363 1962 long timeout;
7be8da07
AG
1963 int ret;
1964
1965 if (!need_peer_seq(mdev))
1966 return 0;
1967
b411b363
PR
1968 spin_lock(&mdev->peer_seq_lock);
1969 for (;;) {
7be8da07
AG
1970 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1971 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1972 ret = 0;
b411b363 1973 break;
7be8da07 1974 }
b411b363
PR
1975 if (signal_pending(current)) {
1976 ret = -ERESTARTSYS;
1977 break;
1978 }
7be8da07 1979 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
b411b363 1980 spin_unlock(&mdev->peer_seq_lock);
44ed167d
PR
1981 rcu_read_lock();
1982 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1983 rcu_read_unlock();
71b1c1eb 1984 timeout = schedule_timeout(timeout);
b411b363 1985 spin_lock(&mdev->peer_seq_lock);
7be8da07 1986 if (!timeout) {
b411b363 1987 ret = -ETIMEDOUT;
71b1c1eb 1988 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
b411b363
PR
1989 break;
1990 }
1991 }
b411b363 1992 spin_unlock(&mdev->peer_seq_lock);
7be8da07 1993 finish_wait(&mdev->seq_wait, &wait);
b411b363
PR
1994 return ret;
1995}
1996
688593c5
LE
1997/* see also bio_flags_to_wire()
1998 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1999 * flags and back. We may replicate to other kernel versions. */
2000static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
76d2e7ec 2001{
688593c5
LE
2002 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2003 (dpf & DP_FUA ? REQ_FUA : 0) |
2004 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2005 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
76d2e7ec
PR
2006}
2007
7be8da07
AG
2008static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2009 unsigned int size)
2010{
2011 struct drbd_interval *i;
2012
2013 repeat:
2014 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2015 struct drbd_request *req;
2016 struct bio_and_error m;
2017
2018 if (!i->local)
2019 continue;
2020 req = container_of(i, struct drbd_request, i);
2021 if (!(req->rq_state & RQ_POSTPONED))
2022 continue;
2023 req->rq_state &= ~RQ_POSTPONED;
2024 __req_mod(req, NEG_ACKED, &m);
2025 spin_unlock_irq(&mdev->tconn->req_lock);
2026 if (m.bio)
2027 complete_master_bio(mdev, &m);
2028 spin_lock_irq(&mdev->tconn->req_lock);
2029 goto repeat;
2030 }
2031}
2032
2033static int handle_write_conflicts(struct drbd_conf *mdev,
2034 struct drbd_peer_request *peer_req)
2035{
2036 struct drbd_tconn *tconn = mdev->tconn;
427c0434 2037 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
7be8da07
AG
2038 sector_t sector = peer_req->i.sector;
2039 const unsigned int size = peer_req->i.size;
2040 struct drbd_interval *i;
2041 bool equal;
2042 int err;
2043
2044 /*
2045 * Inserting the peer request into the write_requests tree will prevent
2046 * new conflicting local requests from being added.
2047 */
2048 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2049
2050 repeat:
2051 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2052 if (i == &peer_req->i)
2053 continue;
2054
2055 if (!i->local) {
2056 /*
2057 * Our peer has sent a conflicting remote request; this
2058 * should not happen in a two-node setup. Wait for the
2059 * earlier peer request to complete.
2060 */
2061 err = drbd_wait_misc(mdev, i);
2062 if (err)
2063 goto out;
2064 goto repeat;
2065 }
2066
2067 equal = i->sector == sector && i->size == size;
2068 if (resolve_conflicts) {
2069 /*
2070 * If the peer request is fully contained within the
d4dabbe2
LE
2071 * overlapping request, it can be considered overwritten
2072 * and thus superseded; otherwise, it will be retried
2073 * once all overlapping requests have completed.
7be8da07 2074 */
d4dabbe2 2075 bool superseded = i->sector <= sector && i->sector +
7be8da07
AG
2076 (i->size >> 9) >= sector + (size >> 9);
2077
2078 if (!equal)
2079 dev_alert(DEV, "Concurrent writes detected: "
2080 "local=%llus +%u, remote=%llus +%u, "
2081 "assuming %s came first\n",
2082 (unsigned long long)i->sector, i->size,
2083 (unsigned long long)sector, size,
d4dabbe2 2084 superseded ? "local" : "remote");
7be8da07
AG
2085
2086 inc_unacked(mdev);
d4dabbe2 2087 peer_req->w.cb = superseded ? e_send_superseded :
7be8da07
AG
2088 e_send_retry_write;
2089 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2090 wake_asender(mdev->tconn);
2091
2092 err = -ENOENT;
2093 goto out;
2094 } else {
2095 struct drbd_request *req =
2096 container_of(i, struct drbd_request, i);
2097
2098 if (!equal)
2099 dev_alert(DEV, "Concurrent writes detected: "
2100 "local=%llus +%u, remote=%llus +%u\n",
2101 (unsigned long long)i->sector, i->size,
2102 (unsigned long long)sector, size);
2103
2104 if (req->rq_state & RQ_LOCAL_PENDING ||
2105 !(req->rq_state & RQ_POSTPONED)) {
2106 /*
2107 * Wait for the node with the discard flag to
d4dabbe2
LE
2108 * decide if this request has been superseded
2109 * or needs to be retried.
2110 * Requests that have been superseded will
7be8da07
AG
2111 * disappear from the write_requests tree.
2112 *
2113 * In addition, wait for the conflicting
2114 * request to finish locally before submitting
2115 * the conflicting peer request.
2116 */
2117 err = drbd_wait_misc(mdev, &req->i);
2118 if (err) {
2119 _conn_request_state(mdev->tconn,
2120 NS(conn, C_TIMEOUT),
2121 CS_HARD);
2122 fail_postponed_requests(mdev, sector, size);
2123 goto out;
2124 }
2125 goto repeat;
2126 }
2127 /*
2128 * Remember to restart the conflicting requests after
2129 * the new peer request has completed.
2130 */
2131 peer_req->flags |= EE_RESTART_REQUESTS;
2132 }
2133 }
2134 err = 0;
2135
2136 out:
2137 if (err)
2138 drbd_remove_epoch_entry_interval(mdev, peer_req);
2139 return err;
2140}
2141
b411b363 2142/* mirrored write */
4a76b161 2143static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2144{
4a76b161 2145 struct drbd_conf *mdev;
b411b363 2146 sector_t sector;
db830c46 2147 struct drbd_peer_request *peer_req;
e658983a 2148 struct p_data *p = pi->data;
7be8da07 2149 u32 peer_seq = be32_to_cpu(p->seq_num);
b411b363
PR
2150 int rw = WRITE;
2151 u32 dp_flags;
302bdeae 2152 int err, tp;
b411b363 2153
4a76b161
AG
2154 mdev = vnr_to_mdev(tconn, pi->vnr);
2155 if (!mdev)
2156 return -EIO;
b411b363 2157
7be8da07 2158 if (!get_ldev(mdev)) {
82bc0194
AG
2159 int err2;
2160
7be8da07 2161 err = wait_for_and_update_peer_seq(mdev, peer_seq);
e2857216 2162 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
12038a3a 2163 atomic_inc(&tconn->current_epoch->epoch_size);
e2857216 2164 err2 = drbd_drain_block(mdev, pi->size);
82bc0194
AG
2165 if (!err)
2166 err = err2;
2167 return err;
b411b363
PR
2168 }
2169
fcefa62e
AG
2170 /*
2171 * Corresponding put_ldev done either below (on various errors), or in
2172 * drbd_peer_request_endio, if we successfully submit the data at the
2173 * end of this function.
2174 */
b411b363
PR
2175
2176 sector = be64_to_cpu(p->sector);
e2857216 2177 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
db830c46 2178 if (!peer_req) {
b411b363 2179 put_ldev(mdev);
82bc0194 2180 return -EIO;
b411b363
PR
2181 }
2182
db830c46 2183 peer_req->w.cb = e_end_block;
b411b363 2184
688593c5
LE
2185 dp_flags = be32_to_cpu(p->dp_flags);
2186 rw |= wire_flags_to_bio(mdev, dp_flags);
81a3537a
LE
2187 if (peer_req->pages == NULL) {
2188 D_ASSERT(peer_req->i.size == 0);
a73ff323
LE
2189 D_ASSERT(dp_flags & DP_FLUSH);
2190 }
688593c5
LE
2191
2192 if (dp_flags & DP_MAY_SET_IN_SYNC)
db830c46 2193 peer_req->flags |= EE_MAY_SET_IN_SYNC;
688593c5 2194
12038a3a
PR
2195 spin_lock(&tconn->epoch_lock);
2196 peer_req->epoch = tconn->current_epoch;
db830c46
AG
2197 atomic_inc(&peer_req->epoch->epoch_size);
2198 atomic_inc(&peer_req->epoch->active);
12038a3a 2199 spin_unlock(&tconn->epoch_lock);
b411b363 2200
302bdeae
PR
2201 rcu_read_lock();
2202 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2203 rcu_read_unlock();
2204 if (tp) {
2205 peer_req->flags |= EE_IN_INTERVAL_TREE;
7be8da07
AG
2206 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2207 if (err)
b411b363 2208 goto out_interrupted;
87eeee41 2209 spin_lock_irq(&mdev->tconn->req_lock);
7be8da07
AG
2210 err = handle_write_conflicts(mdev, peer_req);
2211 if (err) {
2212 spin_unlock_irq(&mdev->tconn->req_lock);
2213 if (err == -ENOENT) {
b411b363 2214 put_ldev(mdev);
82bc0194 2215 return 0;
b411b363 2216 }
7be8da07 2217 goto out_interrupted;
b411b363 2218 }
7be8da07
AG
2219 } else
2220 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2221 list_add(&peer_req->w.list, &mdev->active_ee);
87eeee41 2222 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2223
d93f6302 2224 if (mdev->state.conn == C_SYNC_TARGET)
3ea35df8 2225 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
b411b363 2226
303d1448 2227 if (mdev->tconn->agreed_pro_version < 100) {
44ed167d
PR
2228 rcu_read_lock();
2229 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
303d1448
PR
2230 case DRBD_PROT_C:
2231 dp_flags |= DP_SEND_WRITE_ACK;
2232 break;
2233 case DRBD_PROT_B:
2234 dp_flags |= DP_SEND_RECEIVE_ACK;
2235 break;
b411b363 2236 }
44ed167d 2237 rcu_read_unlock();
b411b363
PR
2238 }
2239
303d1448
PR
2240 if (dp_flags & DP_SEND_WRITE_ACK) {
2241 peer_req->flags |= EE_SEND_WRITE_ACK;
b411b363
PR
2242 inc_unacked(mdev);
2243 /* corresponding dec_unacked() in e_end_block()
2244 * respective _drbd_clear_done_ee */
303d1448
PR
2245 }
2246
2247 if (dp_flags & DP_SEND_RECEIVE_ACK) {
b411b363
PR
2248 /* I really don't like it that the receiver thread
2249 * sends on the msock, but anyways */
db830c46 2250 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
b411b363
PR
2251 }
2252
6719fb03 2253 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363 2254 /* In case we have the only disk of the cluster, */
db830c46
AG
2255 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2256 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2257 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
181286ad 2258 drbd_al_begin_io(mdev, &peer_req->i);
b411b363
PR
2259 }
2260
82bc0194
AG
2261 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2262 if (!err)
2263 return 0;
b411b363 2264
10f6d992
LE
2265 /* don't care for the reason here */
2266 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2267 spin_lock_irq(&mdev->tconn->req_lock);
db830c46
AG
2268 list_del(&peer_req->w.list);
2269 drbd_remove_epoch_entry_interval(mdev, peer_req);
87eeee41 2270 spin_unlock_irq(&mdev->tconn->req_lock);
db830c46 2271 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
181286ad 2272 drbd_al_complete_io(mdev, &peer_req->i);
22cc37a9 2273
b411b363 2274out_interrupted:
1e9dd291 2275 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
b411b363 2276 put_ldev(mdev);
3967deb1 2277 drbd_free_peer_req(mdev, peer_req);
82bc0194 2278 return err;
b411b363
PR
2279}
2280
0f0601f4
LE
2281/* We may throttle resync, if the lower device seems to be busy,
2282 * and current sync rate is above c_min_rate.
2283 *
2284 * To decide whether or not the lower device is busy, we use a scheme similar
2285 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2286 * (more than 64 sectors) of activity we cannot account for with our own resync
2287 * activity, it obviously is "busy".
2288 *
2289 * The current sync rate used here uses only the most recent two step marks,
2290 * to have a short time average so we can react faster.
2291 */
e3555d85 2292int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
0f0601f4
LE
2293{
2294 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2295 unsigned long db, dt, dbdt;
e3555d85 2296 struct lc_element *tmp;
0f0601f4
LE
2297 int curr_events;
2298 int throttle = 0;
daeda1cc
PR
2299 unsigned int c_min_rate;
2300
2301 rcu_read_lock();
2302 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2303 rcu_read_unlock();
0f0601f4
LE
2304
2305 /* feature disabled? */
daeda1cc 2306 if (c_min_rate == 0)
0f0601f4
LE
2307 return 0;
2308
e3555d85
PR
2309 spin_lock_irq(&mdev->al_lock);
2310 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2311 if (tmp) {
2312 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2313 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2314 spin_unlock_irq(&mdev->al_lock);
2315 return 0;
2316 }
2317 /* Do not slow down if app IO is already waiting for this extent */
2318 }
2319 spin_unlock_irq(&mdev->al_lock);
2320
0f0601f4
LE
2321 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2322 (int)part_stat_read(&disk->part0, sectors[1]) -
2323 atomic_read(&mdev->rs_sect_ev);
e3555d85 2324
0f0601f4
LE
2325 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2326 unsigned long rs_left;
2327 int i;
2328
2329 mdev->rs_last_events = curr_events;
2330
2331 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2332 * approx. */
2649f080
LE
2333 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2334
2335 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2336 rs_left = mdev->ov_left;
2337 else
2338 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
0f0601f4
LE
2339
2340 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2341 if (!dt)
2342 dt++;
2343 db = mdev->rs_mark_left[i] - rs_left;
2344 dbdt = Bit2KB(db/dt);
2345
daeda1cc 2346 if (dbdt > c_min_rate)
0f0601f4
LE
2347 throttle = 1;
2348 }
2349 return throttle;
2350}
2351
2352
4a76b161 2353static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 2354{
4a76b161 2355 struct drbd_conf *mdev;
b411b363 2356 sector_t sector;
4a76b161 2357 sector_t capacity;
db830c46 2358 struct drbd_peer_request *peer_req;
b411b363 2359 struct digest_info *di = NULL;
b18b37be 2360 int size, verb;
b411b363 2361 unsigned int fault_type;
e658983a 2362 struct p_block_req *p = pi->data;
4a76b161
AG
2363
2364 mdev = vnr_to_mdev(tconn, pi->vnr);
2365 if (!mdev)
2366 return -EIO;
2367 capacity = drbd_get_capacity(mdev->this_bdev);
b411b363
PR
2368
2369 sector = be64_to_cpu(p->sector);
2370 size = be32_to_cpu(p->blksize);
2371
c670a398 2372 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
2373 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2374 (unsigned long long)sector, size);
82bc0194 2375 return -EINVAL;
b411b363
PR
2376 }
2377 if (sector + (size>>9) > capacity) {
2378 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2379 (unsigned long long)sector, size);
82bc0194 2380 return -EINVAL;
b411b363
PR
2381 }
2382
2383 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be 2384 verb = 1;
e2857216 2385 switch (pi->cmd) {
b18b37be
PR
2386 case P_DATA_REQUEST:
2387 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2388 break;
2389 case P_RS_DATA_REQUEST:
2390 case P_CSUM_RS_REQUEST:
2391 case P_OV_REQUEST:
2392 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2393 break;
2394 case P_OV_REPLY:
2395 verb = 0;
2396 dec_rs_pending(mdev);
2397 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2398 break;
2399 default:
49ba9b1b 2400 BUG();
b18b37be
PR
2401 }
2402 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
2403 dev_err(DEV, "Can not satisfy peer's read request, "
2404 "no local data.\n");
b18b37be 2405
a821cc4a 2406 /* drain possibly payload */
e2857216 2407 return drbd_drain_block(mdev, pi->size);
b411b363
PR
2408 }
2409
2410 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2411 * "criss-cross" setup, that might cause write-out on some other DRBD,
2412 * which in turn might block on the other node at this very place. */
0db55363 2413 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
db830c46 2414 if (!peer_req) {
b411b363 2415 put_ldev(mdev);
82bc0194 2416 return -ENOMEM;
b411b363
PR
2417 }
2418
e2857216 2419 switch (pi->cmd) {
b411b363 2420 case P_DATA_REQUEST:
db830c46 2421 peer_req->w.cb = w_e_end_data_req;
b411b363 2422 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
2423 /* application IO, don't drbd_rs_begin_io */
2424 goto submit;
2425
b411b363 2426 case P_RS_DATA_REQUEST:
db830c46 2427 peer_req->w.cb = w_e_end_rsdata_req;
b411b363 2428 fault_type = DRBD_FAULT_RS_RD;
5f9915bb
LE
2429 /* used in the sector offset progress display */
2430 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
b411b363
PR
2431 break;
2432
2433 case P_OV_REPLY:
2434 case P_CSUM_RS_REQUEST:
2435 fault_type = DRBD_FAULT_RS_RD;
e2857216 2436 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
b411b363
PR
2437 if (!di)
2438 goto out_free_e;
2439
e2857216 2440 di->digest_size = pi->size;
b411b363
PR
2441 di->digest = (((char *)di)+sizeof(struct digest_info));
2442
db830c46
AG
2443 peer_req->digest = di;
2444 peer_req->flags |= EE_HAS_DIGEST;
c36c3ced 2445
e2857216 2446 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
b411b363
PR
2447 goto out_free_e;
2448
e2857216 2449 if (pi->cmd == P_CSUM_RS_REQUEST) {
31890f4a 2450 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
db830c46 2451 peer_req->w.cb = w_e_end_csum_rs_req;
5f9915bb
LE
2452 /* used in the sector offset progress display */
2453 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
e2857216 2454 } else if (pi->cmd == P_OV_REPLY) {
2649f080
LE
2455 /* track progress, we may need to throttle */
2456 atomic_add(size >> 9, &mdev->rs_sect_in);
db830c46 2457 peer_req->w.cb = w_e_end_ov_reply;
b411b363 2458 dec_rs_pending(mdev);
0f0601f4
LE
2459 /* drbd_rs_begin_io done when we sent this request,
2460 * but accounting still needs to be done. */
2461 goto submit_for_resync;
b411b363
PR
2462 }
2463 break;
2464
2465 case P_OV_REQUEST:
b411b363 2466 if (mdev->ov_start_sector == ~(sector_t)0 &&
31890f4a 2467 mdev->tconn->agreed_pro_version >= 90) {
de228bba
LE
2468 unsigned long now = jiffies;
2469 int i;
b411b363
PR
2470 mdev->ov_start_sector = sector;
2471 mdev->ov_position = sector;
30b743a2
LE
2472 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2473 mdev->rs_total = mdev->ov_left;
de228bba
LE
2474 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2475 mdev->rs_mark_left[i] = mdev->ov_left;
2476 mdev->rs_mark_time[i] = now;
2477 }
b411b363
PR
2478 dev_info(DEV, "Online Verify start sector: %llu\n",
2479 (unsigned long long)sector);
2480 }
db830c46 2481 peer_req->w.cb = w_e_end_ov_req;
b411b363 2482 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2483 break;
2484
b411b363 2485 default:
49ba9b1b 2486 BUG();
b411b363
PR
2487 }
2488
0f0601f4
LE
2489 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2490 * wrt the receiver, but it is not as straightforward as it may seem.
2491 * Various places in the resync start and stop logic assume resync
2492 * requests are processed in order, requeuing this on the worker thread
2493 * introduces a bunch of new code for synchronization between threads.
2494 *
2495 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2496 * "forever", throttling after drbd_rs_begin_io will lock that extent
2497 * for application writes for the same time. For now, just throttle
2498 * here, where the rest of the code expects the receiver to sleep for
2499 * a while, anyways.
2500 */
2501
2502 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2503 * this defers syncer requests for some time, before letting at least
2504 * on request through. The resync controller on the receiving side
2505 * will adapt to the incoming rate accordingly.
2506 *
2507 * We cannot throttle here if remote is Primary/SyncTarget:
2508 * we would also throttle its application reads.
2509 * In that case, throttling is done on the SyncTarget only.
2510 */
e3555d85
PR
2511 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2512 schedule_timeout_uninterruptible(HZ/10);
2513 if (drbd_rs_begin_io(mdev, sector))
80a40e43 2514 goto out_free_e;
b411b363 2515
0f0601f4
LE
2516submit_for_resync:
2517 atomic_add(size >> 9, &mdev->rs_sect_ev);
2518
80a40e43 2519submit:
b411b363 2520 inc_unacked(mdev);
87eeee41 2521 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2522 list_add_tail(&peer_req->w.list, &mdev->read_ee);
87eeee41 2523 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 2524
fbe29dec 2525 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
82bc0194 2526 return 0;
b411b363 2527
10f6d992
LE
2528 /* don't care for the reason here */
2529 dev_err(DEV, "submit failed, triggering re-connect\n");
87eeee41 2530 spin_lock_irq(&mdev->tconn->req_lock);
db830c46 2531 list_del(&peer_req->w.list);
87eeee41 2532 spin_unlock_irq(&mdev->tconn->req_lock);
22cc37a9
LE
2533 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2534
b411b363 2535out_free_e:
b411b363 2536 put_ldev(mdev);
3967deb1 2537 drbd_free_peer_req(mdev, peer_req);
82bc0194 2538 return -EIO;
b411b363
PR
2539}
2540
2541static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2542{
2543 int self, peer, rv = -100;
2544 unsigned long ch_self, ch_peer;
44ed167d 2545 enum drbd_after_sb_p after_sb_0p;
b411b363
PR
2546
2547 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2548 peer = mdev->p_uuid[UI_BITMAP] & 1;
2549
2550 ch_peer = mdev->p_uuid[UI_SIZE];
2551 ch_self = mdev->comm_bm_set;
2552
44ed167d
PR
2553 rcu_read_lock();
2554 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2555 rcu_read_unlock();
2556 switch (after_sb_0p) {
b411b363
PR
2557 case ASB_CONSENSUS:
2558 case ASB_DISCARD_SECONDARY:
2559 case ASB_CALL_HELPER:
44ed167d 2560 case ASB_VIOLENTLY:
b411b363
PR
2561 dev_err(DEV, "Configuration error.\n");
2562 break;
2563 case ASB_DISCONNECT:
2564 break;
2565 case ASB_DISCARD_YOUNGER_PRI:
2566 if (self == 0 && peer == 1) {
2567 rv = -1;
2568 break;
2569 }
2570 if (self == 1 && peer == 0) {
2571 rv = 1;
2572 break;
2573 }
2574 /* Else fall through to one of the other strategies... */
2575 case ASB_DISCARD_OLDER_PRI:
2576 if (self == 0 && peer == 1) {
2577 rv = 1;
2578 break;
2579 }
2580 if (self == 1 && peer == 0) {
2581 rv = -1;
2582 break;
2583 }
2584 /* Else fall through to one of the other strategies... */
ad19bf6e 2585 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2586 "Using discard-least-changes instead\n");
2587 case ASB_DISCARD_ZERO_CHG:
2588 if (ch_peer == 0 && ch_self == 0) {
427c0434 2589 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2590 ? -1 : 1;
2591 break;
2592 } else {
2593 if (ch_peer == 0) { rv = 1; break; }
2594 if (ch_self == 0) { rv = -1; break; }
2595 }
44ed167d 2596 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
b411b363
PR
2597 break;
2598 case ASB_DISCARD_LEAST_CHG:
2599 if (ch_self < ch_peer)
2600 rv = -1;
2601 else if (ch_self > ch_peer)
2602 rv = 1;
2603 else /* ( ch_self == ch_peer ) */
2604 /* Well, then use something else. */
427c0434 2605 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
b411b363
PR
2606 ? -1 : 1;
2607 break;
2608 case ASB_DISCARD_LOCAL:
2609 rv = -1;
2610 break;
2611 case ASB_DISCARD_REMOTE:
2612 rv = 1;
2613 }
2614
2615 return rv;
2616}
2617
2618static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2619{
6184ea21 2620 int hg, rv = -100;
44ed167d 2621 enum drbd_after_sb_p after_sb_1p;
b411b363 2622
44ed167d
PR
2623 rcu_read_lock();
2624 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2625 rcu_read_unlock();
2626 switch (after_sb_1p) {
b411b363
PR
2627 case ASB_DISCARD_YOUNGER_PRI:
2628 case ASB_DISCARD_OLDER_PRI:
2629 case ASB_DISCARD_LEAST_CHG:
2630 case ASB_DISCARD_LOCAL:
2631 case ASB_DISCARD_REMOTE:
44ed167d 2632 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2633 dev_err(DEV, "Configuration error.\n");
2634 break;
2635 case ASB_DISCONNECT:
2636 break;
2637 case ASB_CONSENSUS:
2638 hg = drbd_asb_recover_0p(mdev);
2639 if (hg == -1 && mdev->state.role == R_SECONDARY)
2640 rv = hg;
2641 if (hg == 1 && mdev->state.role == R_PRIMARY)
2642 rv = hg;
2643 break;
2644 case ASB_VIOLENTLY:
2645 rv = drbd_asb_recover_0p(mdev);
2646 break;
2647 case ASB_DISCARD_SECONDARY:
2648 return mdev->state.role == R_PRIMARY ? 1 : -1;
2649 case ASB_CALL_HELPER:
2650 hg = drbd_asb_recover_0p(mdev);
2651 if (hg == -1 && mdev->state.role == R_PRIMARY) {
bb437946
AG
2652 enum drbd_state_rv rv2;
2653
2654 drbd_set_role(mdev, R_SECONDARY, 0);
b411b363
PR
2655 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2656 * we might be here in C_WF_REPORT_PARAMS which is transient.
2657 * we do not need to wait for the after state change work either. */
bb437946
AG
2658 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2659 if (rv2 != SS_SUCCESS) {
b411b363
PR
2660 drbd_khelper(mdev, "pri-lost-after-sb");
2661 } else {
2662 dev_warn(DEV, "Successfully gave up primary role.\n");
2663 rv = hg;
2664 }
2665 } else
2666 rv = hg;
2667 }
2668
2669 return rv;
2670}
2671
2672static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2673{
6184ea21 2674 int hg, rv = -100;
44ed167d 2675 enum drbd_after_sb_p after_sb_2p;
b411b363 2676
44ed167d
PR
2677 rcu_read_lock();
2678 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2679 rcu_read_unlock();
2680 switch (after_sb_2p) {
b411b363
PR
2681 case ASB_DISCARD_YOUNGER_PRI:
2682 case ASB_DISCARD_OLDER_PRI:
2683 case ASB_DISCARD_LEAST_CHG:
2684 case ASB_DISCARD_LOCAL:
2685 case ASB_DISCARD_REMOTE:
2686 case ASB_CONSENSUS:
2687 case ASB_DISCARD_SECONDARY:
44ed167d 2688 case ASB_DISCARD_ZERO_CHG:
b411b363
PR
2689 dev_err(DEV, "Configuration error.\n");
2690 break;
2691 case ASB_VIOLENTLY:
2692 rv = drbd_asb_recover_0p(mdev);
2693 break;
2694 case ASB_DISCONNECT:
2695 break;
2696 case ASB_CALL_HELPER:
2697 hg = drbd_asb_recover_0p(mdev);
2698 if (hg == -1) {
bb437946
AG
2699 enum drbd_state_rv rv2;
2700
b411b363
PR
2701 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2702 * we might be here in C_WF_REPORT_PARAMS which is transient.
2703 * we do not need to wait for the after state change work either. */
bb437946
AG
2704 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2705 if (rv2 != SS_SUCCESS) {
b411b363
PR
2706 drbd_khelper(mdev, "pri-lost-after-sb");
2707 } else {
2708 dev_warn(DEV, "Successfully gave up primary role.\n");
2709 rv = hg;
2710 }
2711 } else
2712 rv = hg;
2713 }
2714
2715 return rv;
2716}
2717
2718static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2719 u64 bits, u64 flags)
2720{
2721 if (!uuid) {
2722 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2723 return;
2724 }
2725 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2726 text,
2727 (unsigned long long)uuid[UI_CURRENT],
2728 (unsigned long long)uuid[UI_BITMAP],
2729 (unsigned long long)uuid[UI_HISTORY_START],
2730 (unsigned long long)uuid[UI_HISTORY_END],
2731 (unsigned long long)bits,
2732 (unsigned long long)flags);
2733}
2734
2735/*
2736 100 after split brain try auto recover
2737 2 C_SYNC_SOURCE set BitMap
2738 1 C_SYNC_SOURCE use BitMap
2739 0 no Sync
2740 -1 C_SYNC_TARGET use BitMap
2741 -2 C_SYNC_TARGET set BitMap
2742 -100 after split brain, disconnect
2743-1000 unrelated data
4a23f264
PR
2744-1091 requires proto 91
2745-1096 requires proto 96
b411b363
PR
2746 */
2747static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2748{
2749 u64 self, peer;
2750 int i, j;
2751
2752 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2753 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2754
2755 *rule_nr = 10;
2756 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2757 return 0;
2758
2759 *rule_nr = 20;
2760 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2761 peer != UUID_JUST_CREATED)
2762 return -2;
2763
2764 *rule_nr = 30;
2765 if (self != UUID_JUST_CREATED &&
2766 (peer == UUID_JUST_CREATED || peer == (u64)0))
2767 return 2;
2768
2769 if (self == peer) {
2770 int rct, dc; /* roles at crash time */
2771
2772 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2773
31890f4a 2774 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2775 return -1091;
b411b363
PR
2776
2777 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2778 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2779 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
9f2247bb
PR
2780 drbd_uuid_move_history(mdev);
2781 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2782 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363
PR
2783
2784 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2785 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2786 *rule_nr = 34;
2787 } else {
2788 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2789 *rule_nr = 36;
2790 }
2791
2792 return 1;
2793 }
2794
2795 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2796
31890f4a 2797 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2798 return -1091;
b411b363
PR
2799
2800 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2801 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2802 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2803
2804 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2805 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2806 mdev->p_uuid[UI_BITMAP] = 0UL;
2807
2808 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2809 *rule_nr = 35;
2810 } else {
2811 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2812 *rule_nr = 37;
2813 }
2814
2815 return -1;
2816 }
2817
2818 /* Common power [off|failure] */
2819 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2820 (mdev->p_uuid[UI_FLAGS] & 2);
2821 /* lowest bit is set when we were primary,
2822 * next bit (weight 2) is set when peer was primary */
2823 *rule_nr = 40;
2824
2825 switch (rct) {
2826 case 0: /* !self_pri && !peer_pri */ return 0;
2827 case 1: /* self_pri && !peer_pri */ return 1;
2828 case 2: /* !self_pri && peer_pri */ return -1;
2829 case 3: /* self_pri && peer_pri */
427c0434 2830 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2831 return dc ? -1 : 1;
2832 }
2833 }
2834
2835 *rule_nr = 50;
2836 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2837 if (self == peer)
2838 return -1;
2839
2840 *rule_nr = 51;
2841 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2842 if (self == peer) {
31890f4a 2843 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2844 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2845 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2846 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2847 /* The last P_SYNC_UUID did not get though. Undo the last start of
2848 resync as sync source modifications of the peer's UUIDs. */
2849
31890f4a 2850 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2851 return -1091;
b411b363
PR
2852
2853 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2854 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
4a23f264 2855
92b4ca29 2856 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
4a23f264
PR
2857 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2858
b411b363
PR
2859 return -1;
2860 }
2861 }
2862
2863 *rule_nr = 60;
2864 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2865 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2866 peer = mdev->p_uuid[i] & ~((u64)1);
2867 if (self == peer)
2868 return -2;
2869 }
2870
2871 *rule_nr = 70;
2872 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2873 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2874 if (self == peer)
2875 return 1;
2876
2877 *rule_nr = 71;
2878 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2879 if (self == peer) {
31890f4a 2880 if (mdev->tconn->agreed_pro_version < 96 ?
4a23f264
PR
2881 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2882 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2883 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
b411b363
PR
2884 /* The last P_SYNC_UUID did not get though. Undo the last start of
2885 resync as sync source modifications of our UUIDs. */
2886
31890f4a 2887 if (mdev->tconn->agreed_pro_version < 91)
4a23f264 2888 return -1091;
b411b363 2889
9f2247bb
PR
2890 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2891 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
b411b363 2892
4a23f264 2893 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
b411b363
PR
2894 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2895 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2896
2897 return 1;
2898 }
2899 }
2900
2901
2902 *rule_nr = 80;
d8c2a36b 2903 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2904 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2905 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2906 if (self == peer)
2907 return 2;
2908 }
2909
2910 *rule_nr = 90;
2911 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2912 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2913 if (self == peer && self != ((u64)0))
2914 return 100;
2915
2916 *rule_nr = 100;
2917 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2918 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2919 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2920 peer = mdev->p_uuid[j] & ~((u64)1);
2921 if (self == peer)
2922 return -100;
2923 }
2924 }
2925
2926 return -1000;
2927}
2928
2929/* drbd_sync_handshake() returns the new conn state on success, or
2930 CONN_MASK (-1) on failure.
2931 */
2932static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2933 enum drbd_disk_state peer_disk) __must_hold(local)
2934{
b411b363
PR
2935 enum drbd_conns rv = C_MASK;
2936 enum drbd_disk_state mydisk;
44ed167d 2937 struct net_conf *nc;
6dff2902 2938 int hg, rule_nr, rr_conflict, tentative;
b411b363
PR
2939
2940 mydisk = mdev->state.disk;
2941 if (mydisk == D_NEGOTIATING)
2942 mydisk = mdev->new_state_tmp.disk;
2943
2944 dev_info(DEV, "drbd_sync_handshake:\n");
9f2247bb
PR
2945
2946 spin_lock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2947 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2948 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2949 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2950
2951 hg = drbd_uuid_compare(mdev, &rule_nr);
9f2247bb 2952 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
b411b363
PR
2953
2954 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2955
2956 if (hg == -1000) {
2957 dev_alert(DEV, "Unrelated data, aborting!\n");
2958 return C_MASK;
2959 }
4a23f264
PR
2960 if (hg < -1000) {
2961 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
b411b363
PR
2962 return C_MASK;
2963 }
2964
2965 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2966 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2967 int f = (hg == -100) || abs(hg) == 2;
2968 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2969 if (f)
2970 hg = hg*2;
2971 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2972 hg > 0 ? "source" : "target");
2973 }
2974
3a11a487
AG
2975 if (abs(hg) == 100)
2976 drbd_khelper(mdev, "initial-split-brain");
2977
44ed167d
PR
2978 rcu_read_lock();
2979 nc = rcu_dereference(mdev->tconn->net_conf);
2980
2981 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
b411b363
PR
2982 int pcount = (mdev->state.role == R_PRIMARY)
2983 + (peer_role == R_PRIMARY);
2984 int forced = (hg == -100);
2985
2986 switch (pcount) {
2987 case 0:
2988 hg = drbd_asb_recover_0p(mdev);
2989 break;
2990 case 1:
2991 hg = drbd_asb_recover_1p(mdev);
2992 break;
2993 case 2:
2994 hg = drbd_asb_recover_2p(mdev);
2995 break;
2996 }
2997 if (abs(hg) < 100) {
2998 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2999 "automatically solved. Sync from %s node\n",
3000 pcount, (hg < 0) ? "peer" : "this");
3001 if (forced) {
3002 dev_warn(DEV, "Doing a full sync, since"
3003 " UUIDs where ambiguous.\n");
3004 hg = hg*2;
3005 }
3006 }
3007 }
3008
3009 if (hg == -100) {
08b165ba 3010 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
b411b363 3011 hg = -1;
08b165ba 3012 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
b411b363
PR
3013 hg = 1;
3014
3015 if (abs(hg) < 100)
3016 dev_warn(DEV, "Split-Brain detected, manually solved. "
3017 "Sync from %s node\n",
3018 (hg < 0) ? "peer" : "this");
3019 }
44ed167d 3020 rr_conflict = nc->rr_conflict;
6dff2902 3021 tentative = nc->tentative;
44ed167d 3022 rcu_read_unlock();
b411b363
PR
3023
3024 if (hg == -100) {
580b9767
LE
3025 /* FIXME this log message is not correct if we end up here
3026 * after an attempted attach on a diskless node.
3027 * We just refuse to attach -- well, we drop the "connection"
3028 * to that disk, in a way... */
3a11a487 3029 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
3030 drbd_khelper(mdev, "split-brain");
3031 return C_MASK;
3032 }
3033
3034 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3035 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3036 return C_MASK;
3037 }
3038
3039 if (hg < 0 && /* by intention we do not use mydisk here. */
3040 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
44ed167d 3041 switch (rr_conflict) {
b411b363
PR
3042 case ASB_CALL_HELPER:
3043 drbd_khelper(mdev, "pri-lost");
3044 /* fall through */
3045 case ASB_DISCONNECT:
3046 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3047 return C_MASK;
3048 case ASB_VIOLENTLY:
3049 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3050 "assumption\n");
3051 }
3052 }
3053
6dff2902 3054 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
cf14c2e9
PR
3055 if (hg == 0)
3056 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3057 else
3058 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3059 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3060 abs(hg) >= 2 ? "full" : "bit-map based");
3061 return C_MASK;
3062 }
3063
b411b363
PR
3064 if (abs(hg) >= 2) {
3065 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
20ceb2b2
LE
3066 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3067 BM_LOCKED_SET_ALLOWED))
b411b363
PR
3068 return C_MASK;
3069 }
3070
3071 if (hg > 0) { /* become sync source. */
3072 rv = C_WF_BITMAP_S;
3073 } else if (hg < 0) { /* become sync target */
3074 rv = C_WF_BITMAP_T;
3075 } else {
3076 rv = C_CONNECTED;
3077 if (drbd_bm_total_weight(mdev)) {
3078 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3079 drbd_bm_total_weight(mdev));
3080 }
3081 }
3082
3083 return rv;
3084}
3085
f179d76d 3086static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
b411b363
PR
3087{
3088 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
f179d76d
PR
3089 if (peer == ASB_DISCARD_REMOTE)
3090 return ASB_DISCARD_LOCAL;
b411b363
PR
3091
3092 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
f179d76d
PR
3093 if (peer == ASB_DISCARD_LOCAL)
3094 return ASB_DISCARD_REMOTE;
b411b363
PR
3095
3096 /* everything else is valid if they are equal on both sides. */
f179d76d 3097 return peer;
b411b363
PR
3098}
3099
e2857216 3100static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3101{
e658983a 3102 struct p_protocol *p = pi->data;
036b17ea
PR
3103 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3104 int p_proto, p_discard_my_data, p_two_primaries, cf;
3105 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3106 char integrity_alg[SHARED_SECRET_MAX] = "";
accdbcc5 3107 struct crypto_hash *peer_integrity_tfm = NULL;
7aca6c75 3108 void *int_dig_in = NULL, *int_dig_vv = NULL;
b411b363 3109
b411b363
PR
3110 p_proto = be32_to_cpu(p->protocol);
3111 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3112 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3113 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 3114 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9 3115 cf = be32_to_cpu(p->conn_flags);
6139f60d 3116 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
cf14c2e9 3117
86db0618
AG
3118 if (tconn->agreed_pro_version >= 87) {
3119 int err;
cf14c2e9 3120
88104ca4 3121 if (pi->size > sizeof(integrity_alg))
86db0618 3122 return -EIO;
88104ca4 3123 err = drbd_recv_all(tconn, integrity_alg, pi->size);
86db0618
AG
3124 if (err)
3125 return err;
036b17ea 3126 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
b411b363
PR
3127 }
3128
7d4c782c 3129 if (pi->cmd != P_PROTOCOL_UPDATE) {
fbc12f45 3130 clear_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3131
fbc12f45
AG
3132 if (cf & CF_DRY_RUN)
3133 set_bit(CONN_DRY_RUN, &tconn->flags);
b411b363 3134
fbc12f45
AG
3135 rcu_read_lock();
3136 nc = rcu_dereference(tconn->net_conf);
b411b363 3137
fbc12f45 3138 if (p_proto != nc->wire_protocol) {
d505d9be 3139 conn_err(tconn, "incompatible %s settings\n", "protocol");
fbc12f45
AG
3140 goto disconnect_rcu_unlock;
3141 }
b411b363 3142
fbc12f45 3143 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
d505d9be 3144 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
fbc12f45
AG
3145 goto disconnect_rcu_unlock;
3146 }
b411b363 3147
fbc12f45 3148 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
d505d9be 3149 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
fbc12f45
AG
3150 goto disconnect_rcu_unlock;
3151 }
b411b363 3152
fbc12f45 3153 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
d505d9be 3154 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
fbc12f45
AG
3155 goto disconnect_rcu_unlock;
3156 }
b411b363 3157
fbc12f45 3158 if (p_discard_my_data && nc->discard_my_data) {
d505d9be 3159 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
fbc12f45
AG
3160 goto disconnect_rcu_unlock;
3161 }
b411b363 3162
fbc12f45 3163 if (p_two_primaries != nc->two_primaries) {
d505d9be 3164 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
fbc12f45
AG
3165 goto disconnect_rcu_unlock;
3166 }
b411b363 3167
fbc12f45 3168 if (strcmp(integrity_alg, nc->integrity_alg)) {
d505d9be 3169 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
fbc12f45
AG
3170 goto disconnect_rcu_unlock;
3171 }
b411b363 3172
fbc12f45 3173 rcu_read_unlock();
b411b363
PR
3174 }
3175
7d4c782c
AG
3176 if (integrity_alg[0]) {
3177 int hash_size;
3178
3179 /*
3180 * We can only change the peer data integrity algorithm
3181 * here. Changing our own data integrity algorithm
3182 * requires that we send a P_PROTOCOL_UPDATE packet at
3183 * the same time; otherwise, the peer has no way to
3184 * tell between which packets the algorithm should
3185 * change.
3186 */
b411b363 3187
7d4c782c
AG
3188 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3189 if (!peer_integrity_tfm) {
3190 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3191 integrity_alg);
3192 goto disconnect;
3193 }
b411b363 3194
7d4c782c
AG
3195 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3196 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3197 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3198 if (!(int_dig_in && int_dig_vv)) {
3199 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
b411b363
PR
3200 goto disconnect;
3201 }
b411b363
PR
3202 }
3203
7d4c782c
AG
3204 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3205 if (!new_net_conf) {
3206 conn_err(tconn, "Allocation of new net_conf failed\n");
3207 goto disconnect;
3208 }
3209
3210 mutex_lock(&tconn->data.mutex);
3211 mutex_lock(&tconn->conf_update);
3212 old_net_conf = tconn->net_conf;
3213 *new_net_conf = *old_net_conf;
3214
3215 new_net_conf->wire_protocol = p_proto;
3216 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3217 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3218 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3219 new_net_conf->two_primaries = p_two_primaries;
3220
3221 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3222 mutex_unlock(&tconn->conf_update);
3223 mutex_unlock(&tconn->data.mutex);
3224
3225 crypto_free_hash(tconn->peer_integrity_tfm);
3226 kfree(tconn->int_dig_in);
3227 kfree(tconn->int_dig_vv);
3228 tconn->peer_integrity_tfm = peer_integrity_tfm;
3229 tconn->int_dig_in = int_dig_in;
3230 tconn->int_dig_vv = int_dig_vv;
3231
3232 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3233 conn_info(tconn, "peer data-integrity-alg: %s\n",
3234 integrity_alg[0] ? integrity_alg : "(none)");
3235
3236 synchronize_rcu();
3237 kfree(old_net_conf);
82bc0194 3238 return 0;
b411b363 3239
44ed167d
PR
3240disconnect_rcu_unlock:
3241 rcu_read_unlock();
b411b363 3242disconnect:
b792c35c 3243 crypto_free_hash(peer_integrity_tfm);
036b17ea
PR
3244 kfree(int_dig_in);
3245 kfree(int_dig_vv);
7204624c 3246 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3247 return -EIO;
b411b363
PR
3248}
3249
3250/* helper function
3251 * input: alg name, feature name
3252 * return: NULL (alg name was "")
3253 * ERR_PTR(error) if something goes wrong
3254 * or the crypto hash ptr, if it worked out ok. */
3255struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3256 const char *alg, const char *name)
3257{
3258 struct crypto_hash *tfm;
3259
3260 if (!alg[0])
3261 return NULL;
3262
3263 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3264 if (IS_ERR(tfm)) {
3265 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3266 alg, name, PTR_ERR(tfm));
3267 return tfm;
3268 }
b411b363
PR
3269 return tfm;
3270}
3271
4a76b161
AG
3272static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3273{
3274 void *buffer = tconn->data.rbuf;
3275 int size = pi->size;
3276
3277 while (size) {
3278 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3279 s = drbd_recv(tconn, buffer, s);
3280 if (s <= 0) {
3281 if (s < 0)
3282 return s;
3283 break;
3284 }
3285 size -= s;
3286 }
3287 if (size)
3288 return -EIO;
3289 return 0;
3290}
3291
3292/*
3293 * config_unknown_volume - device configuration command for unknown volume
3294 *
3295 * When a device is added to an existing connection, the node on which the
3296 * device is added first will send configuration commands to its peer but the
3297 * peer will not know about the device yet. It will warn and ignore these
3298 * commands. Once the device is added on the second node, the second node will
3299 * send the same device configuration commands, but in the other direction.
3300 *
3301 * (We can also end up here if drbd is misconfigured.)
3302 */
3303static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3304{
2fcb8f30
AG
3305 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3306 cmdname(pi->cmd), pi->vnr);
4a76b161
AG
3307 return ignore_remaining_packet(tconn, pi);
3308}
3309
3310static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3311{
4a76b161 3312 struct drbd_conf *mdev;
e658983a 3313 struct p_rs_param_95 *p;
b411b363
PR
3314 unsigned int header_size, data_size, exp_max_sz;
3315 struct crypto_hash *verify_tfm = NULL;
3316 struct crypto_hash *csums_tfm = NULL;
2ec91e0e 3317 struct net_conf *old_net_conf, *new_net_conf = NULL;
813472ce 3318 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
4a76b161 3319 const int apv = tconn->agreed_pro_version;
813472ce 3320 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
778f271d 3321 int fifo_size = 0;
82bc0194 3322 int err;
b411b363 3323
4a76b161
AG
3324 mdev = vnr_to_mdev(tconn, pi->vnr);
3325 if (!mdev)
3326 return config_unknown_volume(tconn, pi);
b411b363
PR
3327
3328 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3329 : apv == 88 ? sizeof(struct p_rs_param)
3330 + SHARED_SECRET_MAX
8e26f9cc
PR
3331 : apv <= 94 ? sizeof(struct p_rs_param_89)
3332 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 3333
e2857216 3334 if (pi->size > exp_max_sz) {
b411b363 3335 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
e2857216 3336 pi->size, exp_max_sz);
82bc0194 3337 return -EIO;
b411b363
PR
3338 }
3339
3340 if (apv <= 88) {
e658983a 3341 header_size = sizeof(struct p_rs_param);
e2857216 3342 data_size = pi->size - header_size;
8e26f9cc 3343 } else if (apv <= 94) {
e658983a 3344 header_size = sizeof(struct p_rs_param_89);
e2857216 3345 data_size = pi->size - header_size;
b411b363 3346 D_ASSERT(data_size == 0);
8e26f9cc 3347 } else {
e658983a 3348 header_size = sizeof(struct p_rs_param_95);
e2857216 3349 data_size = pi->size - header_size;
b411b363
PR
3350 D_ASSERT(data_size == 0);
3351 }
3352
3353 /* initialize verify_alg and csums_alg */
e658983a 3354 p = pi->data;
b411b363
PR
3355 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3356
e658983a 3357 err = drbd_recv_all(mdev->tconn, p, header_size);
82bc0194
AG
3358 if (err)
3359 return err;
b411b363 3360
daeda1cc
PR
3361 mutex_lock(&mdev->tconn->conf_update);
3362 old_net_conf = mdev->tconn->net_conf;
813472ce
PR
3363 if (get_ldev(mdev)) {
3364 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3365 if (!new_disk_conf) {
3366 put_ldev(mdev);
3367 mutex_unlock(&mdev->tconn->conf_update);
3368 dev_err(DEV, "Allocation of new disk_conf failed\n");
3369 return -ENOMEM;
3370 }
daeda1cc 3371
813472ce
PR
3372 old_disk_conf = mdev->ldev->disk_conf;
3373 *new_disk_conf = *old_disk_conf;
b411b363 3374
6394b935 3375 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
813472ce 3376 }
b411b363
PR
3377
3378 if (apv >= 88) {
3379 if (apv == 88) {
5de73827
PR
3380 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3381 dev_err(DEV, "verify-alg of wrong size, "
3382 "peer wants %u, accepting only up to %u byte\n",
3383 data_size, SHARED_SECRET_MAX);
813472ce
PR
3384 err = -EIO;
3385 goto reconnect;
b411b363
PR
3386 }
3387
82bc0194 3388 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
813472ce
PR
3389 if (err)
3390 goto reconnect;
b411b363
PR
3391 /* we expect NUL terminated string */
3392 /* but just in case someone tries to be evil */
3393 D_ASSERT(p->verify_alg[data_size-1] == 0);
3394 p->verify_alg[data_size-1] = 0;
3395
3396 } else /* apv >= 89 */ {
3397 /* we still expect NUL terminated strings */
3398 /* but just in case someone tries to be evil */
3399 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3400 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3401 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3402 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3403 }
3404
2ec91e0e 3405 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
b411b363
PR
3406 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3407 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3408 old_net_conf->verify_alg, p->verify_alg);
b411b363
PR
3409 goto disconnect;
3410 }
3411 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3412 p->verify_alg, "verify-alg");
3413 if (IS_ERR(verify_tfm)) {
3414 verify_tfm = NULL;
3415 goto disconnect;
3416 }
3417 }
3418
2ec91e0e 3419 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
b411b363
PR
3420 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3421 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2ec91e0e 3422 old_net_conf->csums_alg, p->csums_alg);
b411b363
PR
3423 goto disconnect;
3424 }
3425 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3426 p->csums_alg, "csums-alg");
3427 if (IS_ERR(csums_tfm)) {
3428 csums_tfm = NULL;
3429 goto disconnect;
3430 }
3431 }
3432
813472ce 3433 if (apv > 94 && new_disk_conf) {
daeda1cc
PR
3434 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3435 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3436 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3437 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d 3438
daeda1cc 3439 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 3440 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
3441 new_plan = fifo_alloc(fifo_size);
3442 if (!new_plan) {
778f271d 3443 dev_err(DEV, "kmalloc of fifo_buffer failed");
f399002e 3444 put_ldev(mdev);
778f271d
PR
3445 goto disconnect;
3446 }
3447 }
8e26f9cc 3448 }
b411b363 3449
91fd4dad 3450 if (verify_tfm || csums_tfm) {
2ec91e0e
PR
3451 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3452 if (!new_net_conf) {
91fd4dad
PR
3453 dev_err(DEV, "Allocation of new net_conf failed\n");
3454 goto disconnect;
3455 }
3456
2ec91e0e 3457 *new_net_conf = *old_net_conf;
91fd4dad
PR
3458
3459 if (verify_tfm) {
2ec91e0e
PR
3460 strcpy(new_net_conf->verify_alg, p->verify_alg);
3461 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
91fd4dad
PR
3462 crypto_free_hash(mdev->tconn->verify_tfm);
3463 mdev->tconn->verify_tfm = verify_tfm;
3464 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3465 }
3466 if (csums_tfm) {
2ec91e0e
PR
3467 strcpy(new_net_conf->csums_alg, p->csums_alg);
3468 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
91fd4dad
PR
3469 crypto_free_hash(mdev->tconn->csums_tfm);
3470 mdev->tconn->csums_tfm = csums_tfm;
3471 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3472 }
2ec91e0e 3473 rcu_assign_pointer(tconn->net_conf, new_net_conf);
778f271d 3474 }
b411b363
PR
3475 }
3476
813472ce
PR
3477 if (new_disk_conf) {
3478 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3479 put_ldev(mdev);
3480 }
3481
3482 if (new_plan) {
3483 old_plan = mdev->rs_plan_s;
3484 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
b411b363 3485 }
daeda1cc
PR
3486
3487 mutex_unlock(&mdev->tconn->conf_update);
3488 synchronize_rcu();
3489 if (new_net_conf)
3490 kfree(old_net_conf);
3491 kfree(old_disk_conf);
813472ce 3492 kfree(old_plan);
daeda1cc 3493
82bc0194 3494 return 0;
b411b363 3495
813472ce
PR
3496reconnect:
3497 if (new_disk_conf) {
3498 put_ldev(mdev);
3499 kfree(new_disk_conf);
3500 }
3501 mutex_unlock(&mdev->tconn->conf_update);
3502 return -EIO;
3503
b411b363 3504disconnect:
813472ce
PR
3505 kfree(new_plan);
3506 if (new_disk_conf) {
3507 put_ldev(mdev);
3508 kfree(new_disk_conf);
3509 }
a0095508 3510 mutex_unlock(&mdev->tconn->conf_update);
b411b363
PR
3511 /* just for completeness: actually not needed,
3512 * as this is not reached if csums_tfm was ok. */
3513 crypto_free_hash(csums_tfm);
3514 /* but free the verify_tfm again, if csums_tfm did not work out */
3515 crypto_free_hash(verify_tfm);
38fa9988 3516 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3517 return -EIO;
b411b363
PR
3518}
3519
b411b363
PR
3520/* warn if the arguments differ by more than 12.5% */
3521static void warn_if_differ_considerably(struct drbd_conf *mdev,
3522 const char *s, sector_t a, sector_t b)
3523{
3524 sector_t d;
3525 if (a == 0 || b == 0)
3526 return;
3527 d = (a > b) ? (a - b) : (b - a);
3528 if (d > (a>>3) || d > (b>>3))
3529 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3530 (unsigned long long)a, (unsigned long long)b);
3531}
3532
4a76b161 3533static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3534{
4a76b161 3535 struct drbd_conf *mdev;
e658983a 3536 struct p_sizes *p = pi->data;
b411b363 3537 enum determine_dev_size dd = unchanged;
b411b363
PR
3538 sector_t p_size, p_usize, my_usize;
3539 int ldsc = 0; /* local disk size changed */
e89b591c 3540 enum dds_flags ddsf;
b411b363 3541
4a76b161
AG
3542 mdev = vnr_to_mdev(tconn, pi->vnr);
3543 if (!mdev)
3544 return config_unknown_volume(tconn, pi);
3545
b411b363
PR
3546 p_size = be64_to_cpu(p->d_size);
3547 p_usize = be64_to_cpu(p->u_size);
3548
b411b363
PR
3549 /* just store the peer's disk size for now.
3550 * we still need to figure out whether we accept that. */
3551 mdev->p_size = p_size;
3552
b411b363 3553 if (get_ldev(mdev)) {
daeda1cc
PR
3554 rcu_read_lock();
3555 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3556 rcu_read_unlock();
3557
b411b363
PR
3558 warn_if_differ_considerably(mdev, "lower level device sizes",
3559 p_size, drbd_get_max_capacity(mdev->ldev));
3560 warn_if_differ_considerably(mdev, "user requested size",
daeda1cc 3561 p_usize, my_usize);
b411b363
PR
3562
3563 /* if this is the first connect, or an otherwise expected
3564 * param exchange, choose the minimum */
3565 if (mdev->state.conn == C_WF_REPORT_PARAMS)
daeda1cc 3566 p_usize = min_not_zero(my_usize, p_usize);
b411b363
PR
3567
3568 /* Never shrink a device with usable data during connect.
3569 But allow online shrinking if we are connected. */
ef5e44a6 3570 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
daeda1cc
PR
3571 drbd_get_capacity(mdev->this_bdev) &&
3572 mdev->state.disk >= D_OUTDATED &&
3573 mdev->state.conn < C_CONNECTED) {
b411b363 3574 dev_err(DEV, "The peer's disk size is too small!\n");
38fa9988 3575 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 3576 put_ldev(mdev);
82bc0194 3577 return -EIO;
b411b363 3578 }
daeda1cc
PR
3579
3580 if (my_usize != p_usize) {
3581 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3582
3583 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3584 if (!new_disk_conf) {
3585 dev_err(DEV, "Allocation of new disk_conf failed\n");
3586 put_ldev(mdev);
3587 return -ENOMEM;
3588 }
3589
3590 mutex_lock(&mdev->tconn->conf_update);
3591 old_disk_conf = mdev->ldev->disk_conf;
3592 *new_disk_conf = *old_disk_conf;
3593 new_disk_conf->disk_size = p_usize;
3594
3595 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3596 mutex_unlock(&mdev->tconn->conf_update);
3597 synchronize_rcu();
3598 kfree(old_disk_conf);
3599
3600 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3601 (unsigned long)my_usize);
b411b363 3602 }
daeda1cc 3603
b411b363
PR
3604 put_ldev(mdev);
3605 }
b411b363 3606
e89b591c 3607 ddsf = be16_to_cpu(p->dds_flags);
b411b363 3608 if (get_ldev(mdev)) {
24c4830c 3609 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
3610 put_ldev(mdev);
3611 if (dd == dev_size_error)
82bc0194 3612 return -EIO;
b411b363
PR
3613 drbd_md_sync(mdev);
3614 } else {
3615 /* I am diskless, need to accept the peer's size. */
3616 drbd_set_my_capacity(mdev, p_size);
3617 }
3618
99432fcc
PR
3619 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3620 drbd_reconsider_max_bio_size(mdev);
3621
b411b363
PR
3622 if (get_ldev(mdev)) {
3623 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3624 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3625 ldsc = 1;
3626 }
3627
b411b363
PR
3628 put_ldev(mdev);
3629 }
3630
3631 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3632 if (be64_to_cpu(p->c_size) !=
3633 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3634 /* we have different sizes, probably peer
3635 * needs to know my new size... */
e89b591c 3636 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
3637 }
3638 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3639 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3640 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
3641 mdev->state.disk >= D_INCONSISTENT) {
3642 if (ddsf & DDSF_NO_RESYNC)
3643 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3644 else
3645 resync_after_online_grow(mdev);
3646 } else
b411b363
PR
3647 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3648 }
3649 }
3650
82bc0194 3651 return 0;
b411b363
PR
3652}
3653
4a76b161 3654static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3655{
4a76b161 3656 struct drbd_conf *mdev;
e658983a 3657 struct p_uuids *p = pi->data;
b411b363 3658 u64 *p_uuid;
62b0da3a 3659 int i, updated_uuids = 0;
b411b363 3660
4a76b161
AG
3661 mdev = vnr_to_mdev(tconn, pi->vnr);
3662 if (!mdev)
3663 return config_unknown_volume(tconn, pi);
3664
b411b363 3665 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
063eacf8
JW
3666 if (!p_uuid) {
3667 dev_err(DEV, "kmalloc of p_uuid failed\n");
3668 return false;
3669 }
b411b363
PR
3670
3671 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3672 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3673
3674 kfree(mdev->p_uuid);
3675 mdev->p_uuid = p_uuid;
3676
3677 if (mdev->state.conn < C_CONNECTED &&
3678 mdev->state.disk < D_INCONSISTENT &&
3679 mdev->state.role == R_PRIMARY &&
3680 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3681 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3682 (unsigned long long)mdev->ed_uuid);
38fa9988 3683 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3684 return -EIO;
b411b363
PR
3685 }
3686
3687 if (get_ldev(mdev)) {
3688 int skip_initial_sync =
3689 mdev->state.conn == C_CONNECTED &&
31890f4a 3690 mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3691 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3692 (p_uuid[UI_FLAGS] & 8);
3693 if (skip_initial_sync) {
3694 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3695 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
20ceb2b2
LE
3696 "clear_n_write from receive_uuids",
3697 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
3698 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3699 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3700 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3701 CS_VERBOSE, NULL);
3702 drbd_md_sync(mdev);
62b0da3a 3703 updated_uuids = 1;
b411b363
PR
3704 }
3705 put_ldev(mdev);
18a50fa2
PR
3706 } else if (mdev->state.disk < D_INCONSISTENT &&
3707 mdev->state.role == R_PRIMARY) {
3708 /* I am a diskless primary, the peer just created a new current UUID
3709 for me. */
62b0da3a 3710 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3711 }
3712
3713 /* Before we test for the disk state, we should wait until an eventually
3714 ongoing cluster wide state change is finished. That is important if
3715 we are primary and are detaching from our disk. We need to see the
3716 new disk state... */
8410da8f
PR
3717 mutex_lock(mdev->state_mutex);
3718 mutex_unlock(mdev->state_mutex);
b411b363 3719 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
62b0da3a
LE
3720 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3721
3722 if (updated_uuids)
3723 drbd_print_uuids(mdev, "receiver updated UUIDs to");
b411b363 3724
82bc0194 3725 return 0;
b411b363
PR
3726}
3727
3728/**
3729 * convert_state() - Converts the peer's view of the cluster state to our point of view
3730 * @ps: The state as seen by the peer.
3731 */
3732static union drbd_state convert_state(union drbd_state ps)
3733{
3734 union drbd_state ms;
3735
3736 static enum drbd_conns c_tab[] = {
369bea63 3737 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
b411b363
PR
3738 [C_CONNECTED] = C_CONNECTED,
3739
3740 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3741 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3742 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3743 [C_VERIFY_S] = C_VERIFY_T,
3744 [C_MASK] = C_MASK,
3745 };
3746
3747 ms.i = ps.i;
3748
3749 ms.conn = c_tab[ps.conn];
3750 ms.peer = ps.role;
3751 ms.role = ps.peer;
3752 ms.pdsk = ps.disk;
3753 ms.disk = ps.pdsk;
3754 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3755
3756 return ms;
3757}
3758
4a76b161 3759static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3760{
4a76b161 3761 struct drbd_conf *mdev;
e658983a 3762 struct p_req_state *p = pi->data;
b411b363 3763 union drbd_state mask, val;
bf885f8a 3764 enum drbd_state_rv rv;
b411b363 3765
4a76b161
AG
3766 mdev = vnr_to_mdev(tconn, pi->vnr);
3767 if (!mdev)
3768 return -EIO;
3769
b411b363
PR
3770 mask.i = be32_to_cpu(p->mask);
3771 val.i = be32_to_cpu(p->val);
3772
427c0434 3773 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
8410da8f 3774 mutex_is_locked(mdev->state_mutex)) {
b411b363 3775 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
82bc0194 3776 return 0;
b411b363
PR
3777 }
3778
3779 mask = convert_state(mask);
3780 val = convert_state(val);
3781
dfafcc8a
PR
3782 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3783 drbd_send_sr_reply(mdev, rv);
b411b363 3784
b411b363
PR
3785 drbd_md_sync(mdev);
3786
82bc0194 3787 return 0;
b411b363
PR
3788}
3789
e2857216 3790static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3791{
e658983a 3792 struct p_req_state *p = pi->data;
b411b363 3793 union drbd_state mask, val;
bf885f8a 3794 enum drbd_state_rv rv;
b411b363 3795
b411b363
PR
3796 mask.i = be32_to_cpu(p->mask);
3797 val.i = be32_to_cpu(p->val);
3798
427c0434 3799 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
dfafcc8a
PR
3800 mutex_is_locked(&tconn->cstate_mutex)) {
3801 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
82bc0194 3802 return 0;
b411b363
PR
3803 }
3804
3805 mask = convert_state(mask);
3806 val = convert_state(val);
3807
778bcf2e 3808 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
dfafcc8a 3809 conn_send_sr_reply(tconn, rv);
b411b363 3810
82bc0194 3811 return 0;
b411b363
PR
3812}
3813
4a76b161 3814static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3815{
4a76b161 3816 struct drbd_conf *mdev;
e658983a 3817 struct p_state *p = pi->data;
4ac4aada 3818 union drbd_state os, ns, peer_state;
b411b363 3819 enum drbd_disk_state real_peer_disk;
65d922c3 3820 enum chg_state_flags cs_flags;
b411b363
PR
3821 int rv;
3822
4a76b161
AG
3823 mdev = vnr_to_mdev(tconn, pi->vnr);
3824 if (!mdev)
3825 return config_unknown_volume(tconn, pi);
3826
b411b363
PR
3827 peer_state.i = be32_to_cpu(p->state);
3828
3829 real_peer_disk = peer_state.disk;
3830 if (peer_state.disk == D_NEGOTIATING) {
3831 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3832 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3833 }
3834
87eeee41 3835 spin_lock_irq(&mdev->tconn->req_lock);
b411b363 3836 retry:
78bae59b 3837 os = ns = drbd_read_state(mdev);
87eeee41 3838 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363 3839
545752d5
LE
3840 /* If some other part of the code (asender thread, timeout)
3841 * already decided to close the connection again,
3842 * we must not "re-establish" it here. */
3843 if (os.conn <= C_TEAR_DOWN)
58ffa580 3844 return -ECONNRESET;
545752d5 3845
40424e4a
LE
3846 /* If this is the "end of sync" confirmation, usually the peer disk
3847 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3848 * set) resync started in PausedSyncT, or if the timing of pause-/
3849 * unpause-sync events has been "just right", the peer disk may
3850 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3851 */
3852 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3853 real_peer_disk == D_UP_TO_DATE &&
e9ef7bb6
LE
3854 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3855 /* If we are (becoming) SyncSource, but peer is still in sync
3856 * preparation, ignore its uptodate-ness to avoid flapping, it
3857 * will change to inconsistent once the peer reaches active
3858 * syncing states.
3859 * It may have changed syncer-paused flags, however, so we
3860 * cannot ignore this completely. */
3861 if (peer_state.conn > C_CONNECTED &&
3862 peer_state.conn < C_SYNC_SOURCE)
3863 real_peer_disk = D_INCONSISTENT;
3864
3865 /* if peer_state changes to connected at the same time,
3866 * it explicitly notifies us that it finished resync.
3867 * Maybe we should finish it up, too? */
3868 else if (os.conn >= C_SYNC_SOURCE &&
3869 peer_state.conn == C_CONNECTED) {
3870 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3871 drbd_resync_finished(mdev);
82bc0194 3872 return 0;
e9ef7bb6
LE
3873 }
3874 }
3875
02b91b55
LE
3876 /* explicit verify finished notification, stop sector reached. */
3877 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3878 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
58ffa580 3879 ov_out_of_sync_print(mdev);
02b91b55 3880 drbd_resync_finished(mdev);
58ffa580 3881 return 0;
02b91b55
LE
3882 }
3883
e9ef7bb6
LE
3884 /* peer says his disk is inconsistent, while we think it is uptodate,
3885 * and this happens while the peer still thinks we have a sync going on,
3886 * but we think we are already done with the sync.
3887 * We ignore this to avoid flapping pdsk.
3888 * This should not happen, if the peer is a recent version of drbd. */
3889 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3890 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3891 real_peer_disk = D_UP_TO_DATE;
3892
4ac4aada
LE
3893 if (ns.conn == C_WF_REPORT_PARAMS)
3894 ns.conn = C_CONNECTED;
b411b363 3895
67531718
PR
3896 if (peer_state.conn == C_AHEAD)
3897 ns.conn = C_BEHIND;
3898
b411b363
PR
3899 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3900 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3901 int cr; /* consider resync */
3902
3903 /* if we established a new connection */
4ac4aada 3904 cr = (os.conn < C_CONNECTED);
b411b363
PR
3905 /* if we had an established connection
3906 * and one of the nodes newly attaches a disk */
4ac4aada 3907 cr |= (os.conn == C_CONNECTED &&
b411b363 3908 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3909 os.disk == D_NEGOTIATING));
b411b363
PR
3910 /* if we have both been inconsistent, and the peer has been
3911 * forced to be UpToDate with --overwrite-data */
3912 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3913 /* if we had been plain connected, and the admin requested to
3914 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3915 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3916 (peer_state.conn >= C_STARTING_SYNC_S &&
3917 peer_state.conn <= C_WF_BITMAP_T));
3918
3919 if (cr)
4ac4aada 3920 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3921
3922 put_ldev(mdev);
4ac4aada
LE
3923 if (ns.conn == C_MASK) {
3924 ns.conn = C_CONNECTED;
b411b363 3925 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3926 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3927 } else if (peer_state.disk == D_NEGOTIATING) {
3928 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3929 peer_state.disk = D_DISKLESS;
580b9767 3930 real_peer_disk = D_DISKLESS;
b411b363 3931 } else {
8169e41b 3932 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
82bc0194 3933 return -EIO;
4ac4aada 3934 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
38fa9988 3935 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3936 return -EIO;
b411b363
PR
3937 }
3938 }
3939 }
3940
87eeee41 3941 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b 3942 if (os.i != drbd_read_state(mdev).i)
b411b363
PR
3943 goto retry;
3944 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3945 ns.peer = peer_state.role;
3946 ns.pdsk = real_peer_disk;
3947 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3948 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3949 ns.disk = mdev->new_state_tmp.disk;
4ac4aada 3950 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
2aebfabb 3951 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50 3952 test_bit(NEW_CUR_UUID, &mdev->flags)) {
8554df1c 3953 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
481c6f50 3954 for temporal network outages! */
87eeee41 3955 spin_unlock_irq(&mdev->tconn->req_lock);
481c6f50 3956 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
2f5cdd0b 3957 tl_clear(mdev->tconn);
481c6f50
PR
3958 drbd_uuid_new_current(mdev);
3959 clear_bit(NEW_CUR_UUID, &mdev->flags);
38fa9988 3960 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
82bc0194 3961 return -EIO;
481c6f50 3962 }
65d922c3 3963 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
78bae59b 3964 ns = drbd_read_state(mdev);
87eeee41 3965 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3966
3967 if (rv < SS_SUCCESS) {
38fa9988 3968 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
82bc0194 3969 return -EIO;
b411b363
PR
3970 }
3971
4ac4aada
LE
3972 if (os.conn > C_WF_REPORT_PARAMS) {
3973 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3974 peer_state.disk != D_NEGOTIATING ) {
3975 /* we want resync, peer has not yet decided to sync... */
3976 /* Nowadays only used when forcing a node into primary role and
3977 setting its disk to UpToDate with that */
3978 drbd_send_uuids(mdev);
f479ea06 3979 drbd_send_current_state(mdev);
b411b363
PR
3980 }
3981 }
3982
08b165ba 3983 clear_bit(DISCARD_MY_DATA, &mdev->flags);
b411b363
PR
3984
3985 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3986
82bc0194 3987 return 0;
b411b363
PR
3988}
3989
4a76b161 3990static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 3991{
4a76b161 3992 struct drbd_conf *mdev;
e658983a 3993 struct p_rs_uuid *p = pi->data;
4a76b161
AG
3994
3995 mdev = vnr_to_mdev(tconn, pi->vnr);
3996 if (!mdev)
3997 return -EIO;
b411b363
PR
3998
3999 wait_event(mdev->misc_wait,
4000 mdev->state.conn == C_WF_SYNC_UUID ||
c4752ef1 4001 mdev->state.conn == C_BEHIND ||
b411b363
PR
4002 mdev->state.conn < C_CONNECTED ||
4003 mdev->state.disk < D_NEGOTIATING);
4004
4005 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4006
b411b363
PR
4007 /* Here the _drbd_uuid_ functions are right, current should
4008 _not_ be rotated into the history */
4009 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4010 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4011 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4012
62b0da3a 4013 drbd_print_uuids(mdev, "updated sync uuid");
b411b363
PR
4014 drbd_start_resync(mdev, C_SYNC_TARGET);
4015
4016 put_ldev(mdev);
4017 } else
4018 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4019
82bc0194 4020 return 0;
b411b363
PR
4021}
4022
2c46407d
AG
4023/**
4024 * receive_bitmap_plain
4025 *
4026 * Return 0 when done, 1 when another iteration is needed, and a negative error
4027 * code upon failure.
4028 */
4029static int
50d0b1ad 4030receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
e658983a 4031 unsigned long *p, struct bm_xfer_ctx *c)
b411b363 4032{
50d0b1ad
AG
4033 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4034 drbd_header_size(mdev->tconn);
e658983a 4035 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 4036 c->bm_words - c->word_offset);
e658983a 4037 unsigned int want = num_words * sizeof(*p);
2c46407d 4038 int err;
b411b363 4039
50d0b1ad
AG
4040 if (want != size) {
4041 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
2c46407d 4042 return -EIO;
b411b363
PR
4043 }
4044 if (want == 0)
2c46407d 4045 return 0;
e658983a 4046 err = drbd_recv_all(mdev->tconn, p, want);
82bc0194 4047 if (err)
2c46407d 4048 return err;
b411b363 4049
e658983a 4050 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
b411b363
PR
4051
4052 c->word_offset += num_words;
4053 c->bit_offset = c->word_offset * BITS_PER_LONG;
4054 if (c->bit_offset > c->bm_bits)
4055 c->bit_offset = c->bm_bits;
4056
2c46407d 4057 return 1;
b411b363
PR
4058}
4059
a02d1240
AG
4060static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4061{
4062 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4063}
4064
4065static int dcbp_get_start(struct p_compressed_bm *p)
4066{
4067 return (p->encoding & 0x80) != 0;
4068}
4069
4070static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4071{
4072 return (p->encoding >> 4) & 0x7;
4073}
4074
2c46407d
AG
4075/**
4076 * recv_bm_rle_bits
4077 *
4078 * Return 0 when done, 1 when another iteration is needed, and a negative error
4079 * code upon failure.
4080 */
4081static int
b411b363
PR
4082recv_bm_rle_bits(struct drbd_conf *mdev,
4083 struct p_compressed_bm *p,
c6d25cfe
PR
4084 struct bm_xfer_ctx *c,
4085 unsigned int len)
b411b363
PR
4086{
4087 struct bitstream bs;
4088 u64 look_ahead;
4089 u64 rl;
4090 u64 tmp;
4091 unsigned long s = c->bit_offset;
4092 unsigned long e;
a02d1240 4093 int toggle = dcbp_get_start(p);
b411b363
PR
4094 int have;
4095 int bits;
4096
a02d1240 4097 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
b411b363
PR
4098
4099 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4100 if (bits < 0)
2c46407d 4101 return -EIO;
b411b363
PR
4102
4103 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4104 bits = vli_decode_bits(&rl, look_ahead);
4105 if (bits <= 0)
2c46407d 4106 return -EIO;
b411b363
PR
4107
4108 if (toggle) {
4109 e = s + rl -1;
4110 if (e >= c->bm_bits) {
4111 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
2c46407d 4112 return -EIO;
b411b363
PR
4113 }
4114 _drbd_bm_set_bits(mdev, s, e);
4115 }
4116
4117 if (have < bits) {
4118 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4119 have, bits, look_ahead,
4120 (unsigned int)(bs.cur.b - p->code),
4121 (unsigned int)bs.buf_len);
2c46407d 4122 return -EIO;
b411b363
PR
4123 }
4124 look_ahead >>= bits;
4125 have -= bits;
4126
4127 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4128 if (bits < 0)
2c46407d 4129 return -EIO;
b411b363
PR
4130 look_ahead |= tmp << have;
4131 have += bits;
4132 }
4133
4134 c->bit_offset = s;
4135 bm_xfer_ctx_bit_to_word_offset(c);
4136
2c46407d 4137 return (s != c->bm_bits);
b411b363
PR
4138}
4139
2c46407d
AG
4140/**
4141 * decode_bitmap_c
4142 *
4143 * Return 0 when done, 1 when another iteration is needed, and a negative error
4144 * code upon failure.
4145 */
4146static int
b411b363
PR
4147decode_bitmap_c(struct drbd_conf *mdev,
4148 struct p_compressed_bm *p,
c6d25cfe
PR
4149 struct bm_xfer_ctx *c,
4150 unsigned int len)
b411b363 4151{
a02d1240 4152 if (dcbp_get_code(p) == RLE_VLI_Bits)
e658983a 4153 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
b411b363
PR
4154
4155 /* other variants had been implemented for evaluation,
4156 * but have been dropped as this one turned out to be "best"
4157 * during all our tests. */
4158
4159 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
38fa9988 4160 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
2c46407d 4161 return -EIO;
b411b363
PR
4162}
4163
4164void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4165 const char *direction, struct bm_xfer_ctx *c)
4166{
4167 /* what would it take to transfer it "plaintext" */
50d0b1ad
AG
4168 unsigned int header_size = drbd_header_size(mdev->tconn);
4169 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4170 unsigned int plain =
4171 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4172 c->bm_words * sizeof(unsigned long);
4173 unsigned int total = c->bytes[0] + c->bytes[1];
4174 unsigned int r;
b411b363
PR
4175
4176 /* total can not be zero. but just in case: */
4177 if (total == 0)
4178 return;
4179
4180 /* don't report if not compressed */
4181 if (total >= plain)
4182 return;
4183
4184 /* total < plain. check for overflow, still */
4185 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4186 : (1000 * total / plain);
4187
4188 if (r > 1000)
4189 r = 1000;
4190
4191 r = 1000 - r;
4192 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4193 "total %u; compression: %u.%u%%\n",
4194 direction,
4195 c->bytes[1], c->packets[1],
4196 c->bytes[0], c->packets[0],
4197 total, r/10, r % 10);
4198}
4199
4200/* Since we are processing the bitfield from lower addresses to higher,
4201 it does not matter if the process it in 32 bit chunks or 64 bit
4202 chunks as long as it is little endian. (Understand it as byte stream,
4203 beginning with the lowest byte...) If we would use big endian
4204 we would need to process it from the highest address to the lowest,
4205 in order to be agnostic to the 32 vs 64 bits issue.
4206
4207 returns 0 on failure, 1 if we successfully received it. */
4a76b161 4208static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4209{
4a76b161 4210 struct drbd_conf *mdev;
b411b363 4211 struct bm_xfer_ctx c;
2c46407d 4212 int err;
4a76b161
AG
4213
4214 mdev = vnr_to_mdev(tconn, pi->vnr);
4215 if (!mdev)
4216 return -EIO;
b411b363 4217
20ceb2b2
LE
4218 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4219 /* you are supposed to send additional out-of-sync information
4220 * if you actually set bits during this phase */
b411b363 4221
b411b363
PR
4222 c = (struct bm_xfer_ctx) {
4223 .bm_bits = drbd_bm_bits(mdev),
4224 .bm_words = drbd_bm_words(mdev),
4225 };
4226
2c46407d 4227 for(;;) {
e658983a
AG
4228 if (pi->cmd == P_BITMAP)
4229 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4230 else if (pi->cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
4231 /* MAYBE: sanity check that we speak proto >= 90,
4232 * and the feature is enabled! */
e658983a 4233 struct p_compressed_bm *p = pi->data;
b411b363 4234
50d0b1ad 4235 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
b411b363 4236 dev_err(DEV, "ReportCBitmap packet too large\n");
82bc0194 4237 err = -EIO;
b411b363
PR
4238 goto out;
4239 }
e658983a 4240 if (pi->size <= sizeof(*p)) {
e2857216 4241 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
82bc0194 4242 err = -EIO;
78fcbdae 4243 goto out;
b411b363 4244 }
e658983a
AG
4245 err = drbd_recv_all(mdev->tconn, p, pi->size);
4246 if (err)
4247 goto out;
e2857216 4248 err = decode_bitmap_c(mdev, p, &c, pi->size);
b411b363 4249 } else {
e2857216 4250 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
82bc0194 4251 err = -EIO;
b411b363
PR
4252 goto out;
4253 }
4254
e2857216 4255 c.packets[pi->cmd == P_BITMAP]++;
50d0b1ad 4256 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
b411b363 4257
2c46407d
AG
4258 if (err <= 0) {
4259 if (err < 0)
4260 goto out;
b411b363 4261 break;
2c46407d 4262 }
e2857216 4263 err = drbd_recv_header(mdev->tconn, pi);
82bc0194 4264 if (err)
b411b363 4265 goto out;
2c46407d 4266 }
b411b363
PR
4267
4268 INFO_bm_xfer_stats(mdev, "receive", &c);
4269
4270 if (mdev->state.conn == C_WF_BITMAP_T) {
de1f8e4a
AG
4271 enum drbd_state_rv rv;
4272
82bc0194
AG
4273 err = drbd_send_bitmap(mdev);
4274 if (err)
b411b363
PR
4275 goto out;
4276 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
de1f8e4a
AG
4277 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4278 D_ASSERT(rv == SS_SUCCESS);
b411b363
PR
4279 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4280 /* admin may have requested C_DISCONNECTING,
4281 * other threads may have noticed network errors */
4282 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4283 drbd_conn_str(mdev->state.conn));
4284 }
82bc0194 4285 err = 0;
b411b363 4286
b411b363 4287 out:
20ceb2b2 4288 drbd_bm_unlock(mdev);
82bc0194 4289 if (!err && mdev->state.conn == C_WF_BITMAP_S)
b411b363 4290 drbd_start_resync(mdev, C_SYNC_SOURCE);
82bc0194 4291 return err;
b411b363
PR
4292}
4293
4a76b161 4294static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4295{
4a76b161 4296 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
e2857216 4297 pi->cmd, pi->size);
b411b363 4298
4a76b161 4299 return ignore_remaining_packet(tconn, pi);
b411b363
PR
4300}
4301
4a76b161 4302static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 4303{
e7f52dfb
LE
4304 /* Make sure we've acked all the TCP data associated
4305 * with the data requests being unplugged */
4a76b161 4306 drbd_tcp_quickack(tconn->data.socket);
0ced55a3 4307
82bc0194 4308 return 0;
0ced55a3
PR
4309}
4310
4a76b161 4311static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
73a01a18 4312{
4a76b161 4313 struct drbd_conf *mdev;
e658983a 4314 struct p_block_desc *p = pi->data;
4a76b161
AG
4315
4316 mdev = vnr_to_mdev(tconn, pi->vnr);
4317 if (!mdev)
4318 return -EIO;
73a01a18 4319
f735e363
LE
4320 switch (mdev->state.conn) {
4321 case C_WF_SYNC_UUID:
4322 case C_WF_BITMAP_T:
4323 case C_BEHIND:
4324 break;
4325 default:
4326 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4327 drbd_conn_str(mdev->state.conn));
4328 }
4329
73a01a18
PR
4330 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4331
82bc0194 4332 return 0;
73a01a18
PR
4333}
4334
02918be2
PR
4335struct data_cmd {
4336 int expect_payload;
4337 size_t pkt_size;
4a76b161 4338 int (*fn)(struct drbd_tconn *, struct packet_info *);
02918be2
PR
4339};
4340
4341static struct data_cmd drbd_cmd_handler[] = {
4342 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4343 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4344 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4345 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
e658983a
AG
4346 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4347 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4348 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
02918be2
PR
4349 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4350 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
e658983a
AG
4351 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4352 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
02918be2
PR
4353 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4354 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4355 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4356 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4357 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4358 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4359 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4360 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4362 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
73a01a18 4363 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4a76b161 4364 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
036b17ea 4365 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
b411b363
PR
4366};
4367
eefc2f7d 4368static void drbdd(struct drbd_tconn *tconn)
b411b363 4369{
77351055 4370 struct packet_info pi;
02918be2 4371 size_t shs; /* sub header size */
82bc0194 4372 int err;
b411b363 4373
eefc2f7d 4374 while (get_t_state(&tconn->receiver) == RUNNING) {
deebe195 4375 struct data_cmd *cmd;
b411b363 4376
eefc2f7d 4377 drbd_thread_current_set_cpu(&tconn->receiver);
69bc7bc3 4378 if (drbd_recv_header(tconn, &pi))
02918be2 4379 goto err_out;
b411b363 4380
deebe195 4381 cmd = &drbd_cmd_handler[pi.cmd];
4a76b161 4382 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
2fcb8f30
AG
4383 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4384 cmdname(pi.cmd), pi.cmd);
02918be2 4385 goto err_out;
0b33a916 4386 }
b411b363 4387
e658983a
AG
4388 shs = cmd->pkt_size;
4389 if (pi.size > shs && !cmd->expect_payload) {
2fcb8f30
AG
4390 conn_err(tconn, "No payload expected %s l:%d\n",
4391 cmdname(pi.cmd), pi.size);
02918be2 4392 goto err_out;
b411b363 4393 }
b411b363 4394
c13f7e1a 4395 if (shs) {
e658983a 4396 err = drbd_recv_all_warn(tconn, pi.data, shs);
a5c31904 4397 if (err)
c13f7e1a 4398 goto err_out;
e2857216 4399 pi.size -= shs;
c13f7e1a
LE
4400 }
4401
4a76b161
AG
4402 err = cmd->fn(tconn, &pi);
4403 if (err) {
9f5bdc33
AG
4404 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4405 cmdname(pi.cmd), err, pi.size);
02918be2 4406 goto err_out;
b411b363
PR
4407 }
4408 }
82bc0194 4409 return;
b411b363 4410
82bc0194
AG
4411 err_out:
4412 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
4413}
4414
0e29d163 4415void conn_flush_workqueue(struct drbd_tconn *tconn)
b411b363
PR
4416{
4417 struct drbd_wq_barrier barr;
4418
4419 barr.w.cb = w_prev_work_done;
0e29d163 4420 barr.w.tconn = tconn;
b411b363 4421 init_completion(&barr.done);
d5b27b01 4422 drbd_queue_work(&tconn->sender_work, &barr.w);
b411b363
PR
4423 wait_for_completion(&barr.done);
4424}
4425
81fa2e67 4426static void conn_disconnect(struct drbd_tconn *tconn)
b411b363 4427{
c141ebda 4428 struct drbd_conf *mdev;
bbeb641c 4429 enum drbd_conns oc;
376694a0 4430 int vnr;
b411b363 4431
bbeb641c 4432 if (tconn->cstate == C_STANDALONE)
b411b363 4433 return;
b411b363 4434
545752d5
LE
4435 /* We are about to start the cleanup after connection loss.
4436 * Make sure drbd_make_request knows about that.
4437 * Usually we should be in some network failure state already,
4438 * but just in case we are not, we fix it up here.
4439 */
b8853dbd 4440 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
545752d5 4441
b411b363 4442 /* asender does not clean up anything. it must not interfere, either */
360cc740
PR
4443 drbd_thread_stop(&tconn->asender);
4444 drbd_free_sock(tconn);
4445
c141ebda
PR
4446 rcu_read_lock();
4447 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4448 kref_get(&mdev->kref);
4449 rcu_read_unlock();
4450 drbd_disconnected(mdev);
4451 kref_put(&mdev->kref, &drbd_minor_destroy);
4452 rcu_read_lock();
4453 }
4454 rcu_read_unlock();
4455
12038a3a
PR
4456 if (!list_empty(&tconn->current_epoch->list))
4457 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4458 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4459 atomic_set(&tconn->current_epoch->epoch_size, 0);
b6dd1a89 4460 tconn->send.seen_any_write_yet = false;
12038a3a 4461
360cc740
PR
4462 conn_info(tconn, "Connection closed\n");
4463
cb703454
PR
4464 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4465 conn_try_outdate_peer_async(tconn);
4466
360cc740 4467 spin_lock_irq(&tconn->req_lock);
bbeb641c
PR
4468 oc = tconn->cstate;
4469 if (oc >= C_UNCONNECTED)
376694a0 4470 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
bbeb641c 4471
360cc740
PR
4472 spin_unlock_irq(&tconn->req_lock);
4473
f3dfa40a 4474 if (oc == C_DISCONNECTING)
d9cc6e23 4475 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
360cc740
PR
4476}
4477
c141ebda 4478static int drbd_disconnected(struct drbd_conf *mdev)
360cc740 4479{
360cc740 4480 unsigned int i;
b411b363 4481
85719573 4482 /* wait for current activity to cease. */
87eeee41 4483 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
4484 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4485 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4486 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
87eeee41 4487 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4488
4489 /* We do not have data structures that would allow us to
4490 * get the rs_pending_cnt down to 0 again.
4491 * * On C_SYNC_TARGET we do not have any data structures describing
4492 * the pending RSDataRequest's we have sent.
4493 * * On C_SYNC_SOURCE there is no data structure that tracks
4494 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4495 * And no, it is not the sum of the reference counts in the
4496 * resync_LRU. The resync_LRU tracks the whole operation including
4497 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4498 * on the fly. */
4499 drbd_rs_cancel_all(mdev);
4500 mdev->rs_total = 0;
4501 mdev->rs_failed = 0;
4502 atomic_set(&mdev->rs_pending_cnt, 0);
4503 wake_up(&mdev->misc_wait);
4504
b411b363 4505 del_timer_sync(&mdev->resync_timer);
b411b363
PR
4506 resync_timer_fn((unsigned long)mdev);
4507
b411b363
PR
4508 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4509 * w_make_resync_request etc. which may still be on the worker queue
4510 * to be "canceled" */
4511 drbd_flush_workqueue(mdev);
4512
a990be46 4513 drbd_finish_peer_reqs(mdev);
b411b363 4514
d10b4ea3
PR
4515 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4516 might have issued a work again. The one before drbd_finish_peer_reqs() is
4517 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4518 drbd_flush_workqueue(mdev);
4519
08332d73
LE
4520 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4521 * again via drbd_try_clear_on_disk_bm(). */
4522 drbd_rs_cancel_all(mdev);
b411b363
PR
4523
4524 kfree(mdev->p_uuid);
4525 mdev->p_uuid = NULL;
4526
2aebfabb 4527 if (!drbd_suspended(mdev))
2f5cdd0b 4528 tl_clear(mdev->tconn);
b411b363
PR
4529
4530 drbd_md_sync(mdev);
4531
20ceb2b2
LE
4532 /* serialize with bitmap writeout triggered by the state change,
4533 * if any. */
4534 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4535
b411b363
PR
4536 /* tcp_close and release of sendpage pages can be deferred. I don't
4537 * want to use SO_LINGER, because apparently it can be deferred for
4538 * more than 20 seconds (longest time I checked).
4539 *
4540 * Actually we don't care for exactly when the network stack does its
4541 * put_page(), but release our reference on these pages right here.
4542 */
7721f567 4543 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
4544 if (i)
4545 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
4546 i = atomic_read(&mdev->pp_in_use_by_net);
4547 if (i)
4548 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
4549 i = atomic_read(&mdev->pp_in_use);
4550 if (i)
45bb912b 4551 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
4552
4553 D_ASSERT(list_empty(&mdev->read_ee));
4554 D_ASSERT(list_empty(&mdev->active_ee));
4555 D_ASSERT(list_empty(&mdev->sync_ee));
4556 D_ASSERT(list_empty(&mdev->done_ee));
4557
360cc740 4558 return 0;
b411b363
PR
4559}
4560
4561/*
4562 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4563 * we can agree on is stored in agreed_pro_version.
4564 *
4565 * feature flags and the reserved array should be enough room for future
4566 * enhancements of the handshake protocol, and possible plugins...
4567 *
4568 * for now, they are expected to be zero, but ignored.
4569 */
6038178e 4570static int drbd_send_features(struct drbd_tconn *tconn)
b411b363 4571{
9f5bdc33
AG
4572 struct drbd_socket *sock;
4573 struct p_connection_features *p;
b411b363 4574
9f5bdc33
AG
4575 sock = &tconn->data;
4576 p = conn_prepare_command(tconn, sock);
4577 if (!p)
e8d17b01 4578 return -EIO;
b411b363
PR
4579 memset(p, 0, sizeof(*p));
4580 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4581 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
9f5bdc33 4582 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
b411b363
PR
4583}
4584
4585/*
4586 * return values:
4587 * 1 yes, we have a valid connection
4588 * 0 oops, did not work out, please try again
4589 * -1 peer talks different language,
4590 * no point in trying again, please go standalone.
4591 */
6038178e 4592static int drbd_do_features(struct drbd_tconn *tconn)
b411b363 4593{
65d11ed6 4594 /* ASSERT current == tconn->receiver ... */
e658983a
AG
4595 struct p_connection_features *p;
4596 const int expect = sizeof(struct p_connection_features);
77351055 4597 struct packet_info pi;
a5c31904 4598 int err;
b411b363 4599
6038178e 4600 err = drbd_send_features(tconn);
e8d17b01 4601 if (err)
b411b363
PR
4602 return 0;
4603
69bc7bc3
AG
4604 err = drbd_recv_header(tconn, &pi);
4605 if (err)
b411b363
PR
4606 return 0;
4607
6038178e
AG
4608 if (pi.cmd != P_CONNECTION_FEATURES) {
4609 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
2fcb8f30 4610 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4611 return -1;
4612 }
4613
77351055 4614 if (pi.size != expect) {
6038178e 4615 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
77351055 4616 expect, pi.size);
b411b363
PR
4617 return -1;
4618 }
4619
e658983a
AG
4620 p = pi.data;
4621 err = drbd_recv_all_warn(tconn, p, expect);
a5c31904 4622 if (err)
b411b363 4623 return 0;
b411b363 4624
b411b363
PR
4625 p->protocol_min = be32_to_cpu(p->protocol_min);
4626 p->protocol_max = be32_to_cpu(p->protocol_max);
4627 if (p->protocol_max == 0)
4628 p->protocol_max = p->protocol_min;
4629
4630 if (PRO_VERSION_MAX < p->protocol_min ||
4631 PRO_VERSION_MIN > p->protocol_max)
4632 goto incompat;
4633
65d11ed6 4634 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
b411b363 4635
65d11ed6
PR
4636 conn_info(tconn, "Handshake successful: "
4637 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
b411b363
PR
4638
4639 return 1;
4640
4641 incompat:
65d11ed6 4642 conn_err(tconn, "incompatible DRBD dialects: "
b411b363
PR
4643 "I support %d-%d, peer supports %d-%d\n",
4644 PRO_VERSION_MIN, PRO_VERSION_MAX,
4645 p->protocol_min, p->protocol_max);
4646 return -1;
4647}
4648
4649#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
13e6037d 4650static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363
PR
4651{
4652 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4653 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 4654 return -1;
b411b363
PR
4655}
4656#else
4657#define CHALLENGE_LEN 64
b10d96cb
JT
4658
4659/* Return value:
4660 1 - auth succeeded,
4661 0 - failed, try again (network error),
4662 -1 - auth failed, don't try again.
4663*/
4664
13e6037d 4665static int drbd_do_auth(struct drbd_tconn *tconn)
b411b363 4666{
9f5bdc33 4667 struct drbd_socket *sock;
b411b363
PR
4668 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4669 struct scatterlist sg;
4670 char *response = NULL;
4671 char *right_response = NULL;
4672 char *peers_ch = NULL;
44ed167d
PR
4673 unsigned int key_len;
4674 char secret[SHARED_SECRET_MAX]; /* 64 byte */
b411b363
PR
4675 unsigned int resp_size;
4676 struct hash_desc desc;
77351055 4677 struct packet_info pi;
44ed167d 4678 struct net_conf *nc;
69bc7bc3 4679 int err, rv;
b411b363 4680
9f5bdc33 4681 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
b411b363 4682
44ed167d
PR
4683 rcu_read_lock();
4684 nc = rcu_dereference(tconn->net_conf);
4685 key_len = strlen(nc->shared_secret);
4686 memcpy(secret, nc->shared_secret, key_len);
4687 rcu_read_unlock();
4688
13e6037d 4689 desc.tfm = tconn->cram_hmac_tfm;
b411b363
PR
4690 desc.flags = 0;
4691
44ed167d 4692 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
b411b363 4693 if (rv) {
13e6037d 4694 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 4695 rv = -1;
b411b363
PR
4696 goto fail;
4697 }
4698
4699 get_random_bytes(my_challenge, CHALLENGE_LEN);
4700
9f5bdc33
AG
4701 sock = &tconn->data;
4702 if (!conn_prepare_command(tconn, sock)) {
4703 rv = 0;
4704 goto fail;
4705 }
e658983a 4706 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
9f5bdc33 4707 my_challenge, CHALLENGE_LEN);
b411b363
PR
4708 if (!rv)
4709 goto fail;
4710
69bc7bc3
AG
4711 err = drbd_recv_header(tconn, &pi);
4712 if (err) {
4713 rv = 0;
b411b363 4714 goto fail;
69bc7bc3 4715 }
b411b363 4716
77351055 4717 if (pi.cmd != P_AUTH_CHALLENGE) {
13e6037d 4718 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
2fcb8f30 4719 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4720 rv = 0;
4721 goto fail;
4722 }
4723
77351055 4724 if (pi.size > CHALLENGE_LEN * 2) {
13e6037d 4725 conn_err(tconn, "expected AuthChallenge payload too big.\n");
b10d96cb 4726 rv = -1;
b411b363
PR
4727 goto fail;
4728 }
4729
77351055 4730 peers_ch = kmalloc(pi.size, GFP_NOIO);
b411b363 4731 if (peers_ch == NULL) {
13e6037d 4732 conn_err(tconn, "kmalloc of peers_ch failed\n");
b10d96cb 4733 rv = -1;
b411b363
PR
4734 goto fail;
4735 }
4736
a5c31904
AG
4737 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4738 if (err) {
b411b363
PR
4739 rv = 0;
4740 goto fail;
4741 }
4742
13e6037d 4743 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
b411b363
PR
4744 response = kmalloc(resp_size, GFP_NOIO);
4745 if (response == NULL) {
13e6037d 4746 conn_err(tconn, "kmalloc of response failed\n");
b10d96cb 4747 rv = -1;
b411b363
PR
4748 goto fail;
4749 }
4750
4751 sg_init_table(&sg, 1);
77351055 4752 sg_set_buf(&sg, peers_ch, pi.size);
b411b363
PR
4753
4754 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4755 if (rv) {
13e6037d 4756 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4757 rv = -1;
b411b363
PR
4758 goto fail;
4759 }
4760
9f5bdc33
AG
4761 if (!conn_prepare_command(tconn, sock)) {
4762 rv = 0;
b411b363 4763 goto fail;
9f5bdc33 4764 }
e658983a 4765 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
9f5bdc33 4766 response, resp_size);
b411b363
PR
4767 if (!rv)
4768 goto fail;
4769
69bc7bc3
AG
4770 err = drbd_recv_header(tconn, &pi);
4771 if (err) {
b411b363
PR
4772 rv = 0;
4773 goto fail;
4774 }
4775
77351055 4776 if (pi.cmd != P_AUTH_RESPONSE) {
13e6037d 4777 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
2fcb8f30 4778 cmdname(pi.cmd), pi.cmd);
b411b363
PR
4779 rv = 0;
4780 goto fail;
4781 }
4782
77351055 4783 if (pi.size != resp_size) {
13e6037d 4784 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
b411b363
PR
4785 rv = 0;
4786 goto fail;
4787 }
b411b363 4788
a5c31904
AG
4789 err = drbd_recv_all_warn(tconn, response , resp_size);
4790 if (err) {
b411b363
PR
4791 rv = 0;
4792 goto fail;
4793 }
4794
4795 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4796 if (right_response == NULL) {
13e6037d 4797 conn_err(tconn, "kmalloc of right_response failed\n");
b10d96cb 4798 rv = -1;
b411b363
PR
4799 goto fail;
4800 }
4801
4802 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4803
4804 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4805 if (rv) {
13e6037d 4806 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4807 rv = -1;
b411b363
PR
4808 goto fail;
4809 }
4810
4811 rv = !memcmp(response, right_response, resp_size);
4812
4813 if (rv)
44ed167d
PR
4814 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4815 resp_size);
b10d96cb
JT
4816 else
4817 rv = -1;
b411b363
PR
4818
4819 fail:
4820 kfree(peers_ch);
4821 kfree(response);
4822 kfree(right_response);
4823
4824 return rv;
4825}
4826#endif
4827
4828int drbdd_init(struct drbd_thread *thi)
4829{
392c8801 4830 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
4831 int h;
4832
4d641dd7 4833 conn_info(tconn, "receiver (re)started\n");
b411b363
PR
4834
4835 do {
81fa2e67 4836 h = conn_connect(tconn);
b411b363 4837 if (h == 0) {
81fa2e67 4838 conn_disconnect(tconn);
20ee6390 4839 schedule_timeout_interruptible(HZ);
b411b363
PR
4840 }
4841 if (h == -1) {
4d641dd7 4842 conn_warn(tconn, "Discarding network configuration.\n");
bbeb641c 4843 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
4844 }
4845 } while (h == 0);
4846
91fd4dad
PR
4847 if (h > 0)
4848 drbdd(tconn);
b411b363 4849
81fa2e67 4850 conn_disconnect(tconn);
b411b363 4851
4d641dd7 4852 conn_info(tconn, "receiver terminated\n");
b411b363
PR
4853 return 0;
4854}
4855
4856/* ********* acknowledge sender ******** */
4857
e05e1e59 4858static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4859{
e658983a 4860 struct p_req_state_reply *p = pi->data;
e4f78ede
PR
4861 int retcode = be32_to_cpu(p->retcode);
4862
4863 if (retcode >= SS_SUCCESS) {
4864 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4865 } else {
4866 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4867 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4868 drbd_set_st_err_str(retcode), retcode);
4869 }
4870 wake_up(&tconn->ping_wait);
4871
2735a594 4872 return 0;
e4f78ede 4873}
b411b363 4874
1952e916 4875static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4876{
1952e916 4877 struct drbd_conf *mdev;
e658983a 4878 struct p_req_state_reply *p = pi->data;
b411b363
PR
4879 int retcode = be32_to_cpu(p->retcode);
4880
1952e916
AG
4881 mdev = vnr_to_mdev(tconn, pi->vnr);
4882 if (!mdev)
2735a594 4883 return -EIO;
1952e916 4884
4d0fc3fd
PR
4885 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4886 D_ASSERT(tconn->agreed_pro_version < 100);
4887 return got_conn_RqSReply(tconn, pi);
4888 }
4889
b411b363 4890 if (retcode >= SS_SUCCESS) {
e4f78ede 4891 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
b411b363 4892 } else {
e4f78ede 4893 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
b411b363 4894 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
e4f78ede 4895 drbd_set_st_err_str(retcode), retcode);
b411b363
PR
4896 }
4897 wake_up(&mdev->state_wait);
4898
2735a594 4899 return 0;
b411b363
PR
4900}
4901
e05e1e59 4902static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4903{
2735a594 4904 return drbd_send_ping_ack(tconn);
b411b363
PR
4905
4906}
4907
e05e1e59 4908static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363
PR
4909{
4910 /* restore idle timeout */
2a67d8b9
PR
4911 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4912 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4913 wake_up(&tconn->ping_wait);
b411b363 4914
2735a594 4915 return 0;
b411b363
PR
4916}
4917
1952e916 4918static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4919{
1952e916 4920 struct drbd_conf *mdev;
e658983a 4921 struct p_block_ack *p = pi->data;
b411b363
PR
4922 sector_t sector = be64_to_cpu(p->sector);
4923 int blksize = be32_to_cpu(p->blksize);
4924
1952e916
AG
4925 mdev = vnr_to_mdev(tconn, pi->vnr);
4926 if (!mdev)
2735a594 4927 return -EIO;
1952e916 4928
31890f4a 4929 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
b411b363
PR
4930
4931 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4932
1d53f09e
LE
4933 if (get_ldev(mdev)) {
4934 drbd_rs_complete_io(mdev, sector);
4935 drbd_set_in_sync(mdev, sector, blksize);
4936 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4937 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4938 put_ldev(mdev);
4939 }
b411b363 4940 dec_rs_pending(mdev);
778f271d 4941 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363 4942
2735a594 4943 return 0;
b411b363
PR
4944}
4945
bc9c5c41
AG
4946static int
4947validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4948 struct rb_root *root, const char *func,
4949 enum drbd_req_event what, bool missing_ok)
b411b363
PR
4950{
4951 struct drbd_request *req;
4952 struct bio_and_error m;
4953
87eeee41 4954 spin_lock_irq(&mdev->tconn->req_lock);
bc9c5c41 4955 req = find_request(mdev, root, id, sector, missing_ok, func);
b411b363 4956 if (unlikely(!req)) {
87eeee41 4957 spin_unlock_irq(&mdev->tconn->req_lock);
85997675 4958 return -EIO;
b411b363
PR
4959 }
4960 __req_mod(req, what, &m);
87eeee41 4961 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
4962
4963 if (m.bio)
4964 complete_master_bio(mdev, &m);
85997675 4965 return 0;
b411b363
PR
4966}
4967
1952e916 4968static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 4969{
1952e916 4970 struct drbd_conf *mdev;
e658983a 4971 struct p_block_ack *p = pi->data;
b411b363
PR
4972 sector_t sector = be64_to_cpu(p->sector);
4973 int blksize = be32_to_cpu(p->blksize);
4974 enum drbd_req_event what;
4975
1952e916
AG
4976 mdev = vnr_to_mdev(tconn, pi->vnr);
4977 if (!mdev)
2735a594 4978 return -EIO;
1952e916 4979
b411b363
PR
4980 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4981
579b57ed 4982 if (p->block_id == ID_SYNCER) {
b411b363
PR
4983 drbd_set_in_sync(mdev, sector, blksize);
4984 dec_rs_pending(mdev);
2735a594 4985 return 0;
b411b363 4986 }
e05e1e59 4987 switch (pi->cmd) {
b411b363 4988 case P_RS_WRITE_ACK:
8554df1c 4989 what = WRITE_ACKED_BY_PEER_AND_SIS;
b411b363
PR
4990 break;
4991 case P_WRITE_ACK:
8554df1c 4992 what = WRITE_ACKED_BY_PEER;
b411b363
PR
4993 break;
4994 case P_RECV_ACK:
8554df1c 4995 what = RECV_ACKED_BY_PEER;
b411b363 4996 break;
d4dabbe2
LE
4997 case P_SUPERSEDED:
4998 what = CONFLICT_RESOLVED;
b411b363 4999 break;
7be8da07 5000 case P_RETRY_WRITE:
7be8da07 5001 what = POSTPONE_WRITE;
b411b363
PR
5002 break;
5003 default:
2735a594 5004 BUG();
b411b363
PR
5005 }
5006
5007 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5008 &mdev->write_requests, __func__,
5009 what, false);
b411b363
PR
5010}
5011
1952e916 5012static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5013{
1952e916 5014 struct drbd_conf *mdev;
e658983a 5015 struct p_block_ack *p = pi->data;
b411b363 5016 sector_t sector = be64_to_cpu(p->sector);
2deb8336 5017 int size = be32_to_cpu(p->blksize);
85997675 5018 int err;
b411b363 5019
1952e916
AG
5020 mdev = vnr_to_mdev(tconn, pi->vnr);
5021 if (!mdev)
2735a594 5022 return -EIO;
b411b363
PR
5023
5024 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5025
579b57ed 5026 if (p->block_id == ID_SYNCER) {
b411b363
PR
5027 dec_rs_pending(mdev);
5028 drbd_rs_failed_io(mdev, sector, size);
2735a594 5029 return 0;
b411b363 5030 }
2deb8336 5031
85997675
AG
5032 err = validate_req_change_req_state(mdev, p->block_id, sector,
5033 &mdev->write_requests, __func__,
303d1448 5034 NEG_ACKED, true);
85997675 5035 if (err) {
c3afd8f5
AG
5036 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5037 The master bio might already be completed, therefore the
5038 request is no longer in the collision hash. */
5039 /* In Protocol B we might already have got a P_RECV_ACK
5040 but then get a P_NEG_ACK afterwards. */
c3afd8f5 5041 drbd_set_out_of_sync(mdev, sector, size);
2deb8336 5042 }
2735a594 5043 return 0;
b411b363
PR
5044}
5045
1952e916 5046static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5047{
1952e916 5048 struct drbd_conf *mdev;
e658983a 5049 struct p_block_ack *p = pi->data;
b411b363
PR
5050 sector_t sector = be64_to_cpu(p->sector);
5051
1952e916
AG
5052 mdev = vnr_to_mdev(tconn, pi->vnr);
5053 if (!mdev)
2735a594 5054 return -EIO;
1952e916 5055
b411b363 5056 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
7be8da07 5057
380207d0 5058 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
b411b363
PR
5059 (unsigned long long)sector, be32_to_cpu(p->blksize));
5060
5061 return validate_req_change_req_state(mdev, p->block_id, sector,
2735a594
AG
5062 &mdev->read_requests, __func__,
5063 NEG_ACKED, false);
b411b363
PR
5064}
5065
1952e916 5066static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5067{
1952e916 5068 struct drbd_conf *mdev;
b411b363
PR
5069 sector_t sector;
5070 int size;
e658983a 5071 struct p_block_ack *p = pi->data;
1952e916
AG
5072
5073 mdev = vnr_to_mdev(tconn, pi->vnr);
5074 if (!mdev)
2735a594 5075 return -EIO;
b411b363
PR
5076
5077 sector = be64_to_cpu(p->sector);
5078 size = be32_to_cpu(p->blksize);
b411b363
PR
5079
5080 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5081
5082 dec_rs_pending(mdev);
5083
5084 if (get_ldev_if_state(mdev, D_FAILED)) {
5085 drbd_rs_complete_io(mdev, sector);
e05e1e59 5086 switch (pi->cmd) {
d612d309
PR
5087 case P_NEG_RS_DREPLY:
5088 drbd_rs_failed_io(mdev, sector, size);
5089 case P_RS_CANCEL:
5090 break;
5091 default:
2735a594 5092 BUG();
d612d309 5093 }
b411b363
PR
5094 put_ldev(mdev);
5095 }
5096
2735a594 5097 return 0;
b411b363
PR
5098}
5099
1952e916 5100static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5101{
e658983a 5102 struct p_barrier_ack *p = pi->data;
9ed57dcb
LE
5103 struct drbd_conf *mdev;
5104 int vnr;
1952e916 5105
9ed57dcb 5106 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
b411b363 5107
9ed57dcb
LE
5108 rcu_read_lock();
5109 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5110 if (mdev->state.conn == C_AHEAD &&
5111 atomic_read(&mdev->ap_in_flight) == 0 &&
5112 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5113 mdev->start_resync_timer.expires = jiffies + HZ;
5114 add_timer(&mdev->start_resync_timer);
5115 }
c4752ef1 5116 }
9ed57dcb 5117 rcu_read_unlock();
c4752ef1 5118
2735a594 5119 return 0;
b411b363
PR
5120}
5121
1952e916 5122static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
b411b363 5123{
1952e916 5124 struct drbd_conf *mdev;
e658983a 5125 struct p_block_ack *p = pi->data;
b411b363
PR
5126 struct drbd_work *w;
5127 sector_t sector;
5128 int size;
5129
1952e916
AG
5130 mdev = vnr_to_mdev(tconn, pi->vnr);
5131 if (!mdev)
2735a594 5132 return -EIO;
1952e916 5133
b411b363
PR
5134 sector = be64_to_cpu(p->sector);
5135 size = be32_to_cpu(p->blksize);
5136
5137 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5138
5139 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
8f7bed77 5140 drbd_ov_out_of_sync_found(mdev, sector, size);
b411b363 5141 else
8f7bed77 5142 ov_out_of_sync_print(mdev);
b411b363 5143
1d53f09e 5144 if (!get_ldev(mdev))
2735a594 5145 return 0;
1d53f09e 5146
b411b363
PR
5147 drbd_rs_complete_io(mdev, sector);
5148 dec_rs_pending(mdev);
5149
ea5442af
LE
5150 --mdev->ov_left;
5151
5152 /* let's advance progress step marks only for every other megabyte */
5153 if ((mdev->ov_left & 0x200) == 0x200)
5154 drbd_advance_rs_marks(mdev, mdev->ov_left);
5155
5156 if (mdev->ov_left == 0) {
b411b363
PR
5157 w = kmalloc(sizeof(*w), GFP_NOIO);
5158 if (w) {
5159 w->cb = w_ov_finished;
a21e9298 5160 w->mdev = mdev;
d5b27b01 5161 drbd_queue_work(&mdev->tconn->sender_work, w);
b411b363
PR
5162 } else {
5163 dev_err(DEV, "kmalloc(w) failed.");
8f7bed77 5164 ov_out_of_sync_print(mdev);
b411b363
PR
5165 drbd_resync_finished(mdev);
5166 }
5167 }
1d53f09e 5168 put_ldev(mdev);
2735a594 5169 return 0;
b411b363
PR
5170}
5171
1952e916 5172static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
0ced55a3 5173{
2735a594 5174 return 0;
b411b363
PR
5175}
5176
a990be46 5177static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
0ced55a3 5178{
082a3439 5179 struct drbd_conf *mdev;
c141ebda 5180 int vnr, not_empty = 0;
32862ec7
PR
5181
5182 do {
5183 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5184 flush_signals(current);
c141ebda
PR
5185
5186 rcu_read_lock();
5187 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5188 kref_get(&mdev->kref);
5189 rcu_read_unlock();
d3fcb490 5190 if (drbd_finish_peer_reqs(mdev)) {
c141ebda
PR
5191 kref_put(&mdev->kref, &drbd_minor_destroy);
5192 return 1;
d3fcb490 5193 }
c141ebda
PR
5194 kref_put(&mdev->kref, &drbd_minor_destroy);
5195 rcu_read_lock();
082a3439 5196 }
32862ec7 5197 set_bit(SIGNAL_ASENDER, &tconn->flags);
082a3439
PR
5198
5199 spin_lock_irq(&tconn->req_lock);
c141ebda 5200 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
082a3439
PR
5201 not_empty = !list_empty(&mdev->done_ee);
5202 if (not_empty)
5203 break;
5204 }
5205 spin_unlock_irq(&tconn->req_lock);
c141ebda 5206 rcu_read_unlock();
32862ec7
PR
5207 } while (not_empty);
5208
5209 return 0;
0ced55a3
PR
5210}
5211
b411b363
PR
5212struct asender_cmd {
5213 size_t pkt_size;
1952e916 5214 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
b411b363
PR
5215};
5216
7201b972 5217static struct asender_cmd asender_tbl[] = {
e658983a
AG
5218 [P_PING] = { 0, got_Ping },
5219 [P_PING_ACK] = { 0, got_PingAck },
b411b363
PR
5220 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5221 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5222 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
d4dabbe2 5223 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
b411b363
PR
5224 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5225 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
1952e916 5226 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
b411b363
PR
5227 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5228 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5229 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5230 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 5231 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
1952e916
AG
5232 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5233 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5234 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
7201b972 5235};
b411b363
PR
5236
5237int drbd_asender(struct drbd_thread *thi)
5238{
392c8801 5239 struct drbd_tconn *tconn = thi->tconn;
b411b363 5240 struct asender_cmd *cmd = NULL;
77351055 5241 struct packet_info pi;
257d0af6 5242 int rv;
e658983a 5243 void *buf = tconn->meta.rbuf;
b411b363 5244 int received = 0;
52b061a4
AG
5245 unsigned int header_size = drbd_header_size(tconn);
5246 int expect = header_size;
44ed167d
PR
5247 bool ping_timeout_active = false;
5248 struct net_conf *nc;
bb77d34e 5249 int ping_timeo, tcp_cork, ping_int;
b411b363
PR
5250
5251 current->policy = SCHED_RR; /* Make this a realtime task! */
5252 current->rt_priority = 2; /* more important than all other tasks */
5253
e77a0a5c 5254 while (get_t_state(thi) == RUNNING) {
80822284 5255 drbd_thread_current_set_cpu(thi);
b411b363 5256
44ed167d
PR
5257 rcu_read_lock();
5258 nc = rcu_dereference(tconn->net_conf);
5259 ping_timeo = nc->ping_timeo;
bb77d34e 5260 tcp_cork = nc->tcp_cork;
44ed167d
PR
5261 ping_int = nc->ping_int;
5262 rcu_read_unlock();
5263
32862ec7 5264 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
a17647aa 5265 if (drbd_send_ping(tconn)) {
32862ec7 5266 conn_err(tconn, "drbd_send_ping has failed\n");
b411b363 5267 goto reconnect;
841ce241 5268 }
44ed167d
PR
5269 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5270 ping_timeout_active = true;
b411b363
PR
5271 }
5272
32862ec7
PR
5273 /* TODO: conditionally cork; it may hurt latency if we cork without
5274 much to send */
bb77d34e 5275 if (tcp_cork)
32862ec7 5276 drbd_tcp_cork(tconn->meta.socket);
a990be46
AG
5277 if (tconn_finish_peer_reqs(tconn)) {
5278 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
32862ec7 5279 goto reconnect;
b411b363
PR
5280 }
5281 /* but unconditionally uncork unless disabled */
bb77d34e 5282 if (tcp_cork)
32862ec7 5283 drbd_tcp_uncork(tconn->meta.socket);
b411b363
PR
5284
5285 /* short circuit, recv_msg would return EINTR anyways. */
5286 if (signal_pending(current))
5287 continue;
5288
32862ec7
PR
5289 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5290 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363
PR
5291
5292 flush_signals(current);
5293
5294 /* Note:
5295 * -EINTR (on meta) we got a signal
5296 * -EAGAIN (on meta) rcvtimeo expired
5297 * -ECONNRESET other side closed the connection
5298 * -ERESTARTSYS (on data) we got a signal
5299 * rv < 0 other than above: unexpected error!
5300 * rv == expected: full header or command
5301 * rv < expected: "woken" by signal during receive
5302 * rv == 0 : "connection shut down by peer"
5303 */
5304 if (likely(rv > 0)) {
5305 received += rv;
5306 buf += rv;
5307 } else if (rv == 0) {
b66623e3
PR
5308 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5309 long t;
5310 rcu_read_lock();
5311 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5312 rcu_read_unlock();
5313
5314 t = wait_event_timeout(tconn->ping_wait,
5315 tconn->cstate < C_WF_REPORT_PARAMS,
5316 t);
599377ac
PR
5317 if (t)
5318 break;
5319 }
32862ec7 5320 conn_err(tconn, "meta connection shut down by peer.\n");
b411b363
PR
5321 goto reconnect;
5322 } else if (rv == -EAGAIN) {
cb6518cb
LE
5323 /* If the data socket received something meanwhile,
5324 * that is good enough: peer is still alive. */
32862ec7
PR
5325 if (time_after(tconn->last_received,
5326 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
cb6518cb 5327 continue;
f36af18c 5328 if (ping_timeout_active) {
32862ec7 5329 conn_err(tconn, "PingAck did not arrive in time.\n");
b411b363
PR
5330 goto reconnect;
5331 }
32862ec7 5332 set_bit(SEND_PING, &tconn->flags);
b411b363
PR
5333 continue;
5334 } else if (rv == -EINTR) {
5335 continue;
5336 } else {
32862ec7 5337 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
b411b363
PR
5338 goto reconnect;
5339 }
5340
5341 if (received == expect && cmd == NULL) {
e658983a 5342 if (decode_header(tconn, tconn->meta.rbuf, &pi))
b411b363 5343 goto reconnect;
7201b972 5344 cmd = &asender_tbl[pi.cmd];
1952e916 5345 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
2fcb8f30
AG
5346 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5347 cmdname(pi.cmd), pi.cmd);
b411b363
PR
5348 goto disconnect;
5349 }
e658983a 5350 expect = header_size + cmd->pkt_size;
52b061a4 5351 if (pi.size != expect - header_size) {
32862ec7 5352 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
77351055 5353 pi.cmd, pi.size);
b411b363 5354 goto reconnect;
257d0af6 5355 }
b411b363
PR
5356 }
5357 if (received == expect) {
2735a594 5358 bool err;
a4fbda8e 5359
2735a594
AG
5360 err = cmd->fn(tconn, &pi);
5361 if (err) {
1952e916 5362 conn_err(tconn, "%pf failed\n", cmd->fn);
b411b363 5363 goto reconnect;
1952e916 5364 }
b411b363 5365
a4fbda8e 5366 tconn->last_received = jiffies;
f36af18c 5367
44ed167d
PR
5368 if (cmd == &asender_tbl[P_PING_ACK]) {
5369 /* restore idle timeout */
5370 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5371 ping_timeout_active = false;
5372 }
f36af18c 5373
e658983a 5374 buf = tconn->meta.rbuf;
b411b363 5375 received = 0;
52b061a4 5376 expect = header_size;
b411b363
PR
5377 cmd = NULL;
5378 }
5379 }
5380
5381 if (0) {
5382reconnect:
bbeb641c 5383 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
19fffd7b 5384 conn_md_sync(tconn);
b411b363
PR
5385 }
5386 if (0) {
5387disconnect:
bbeb641c 5388 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363 5389 }
32862ec7 5390 clear_bit(SIGNAL_ASENDER, &tconn->flags);
b411b363 5391
32862ec7 5392 conn_info(tconn, "asender terminated\n");
b411b363
PR
5393
5394 return 0;
5395}