4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf
*mdev
);
58 static int drbd_do_auth(struct drbd_conf
*mdev
);
60 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
61 static int e_end_block(struct drbd_conf
*, struct drbd_work
*, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page
*page_chain_del(struct page
**head
, int n
)
89 tmp
= page_chain_next(page
);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page
, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page
*page_chain_tail(struct page
*page
, int *len
)
113 while ((tmp
= page_chain_next(page
)))
120 static int page_chain_free(struct page
*page
)
124 page_chain_for_each_safe(page
, tmp
) {
131 static void page_chain_add(struct page
**head
,
132 struct page
*chain_first
, struct page
*chain_last
)
136 tmp
= page_chain_tail(chain_first
, NULL
);
137 BUG_ON(tmp
!= chain_last
);
140 /* add chain to head */
141 set_page_private(chain_last
, (unsigned long)*head
);
145 static struct page
*drbd_pp_first_pages_or_try_alloc(struct drbd_conf
*mdev
, int number
)
147 struct page
*page
= NULL
;
148 struct page
*tmp
= NULL
;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant
>= number
) {
154 spin_lock(&drbd_pp_lock
);
155 page
= page_chain_del(&drbd_pp_pool
, number
);
157 drbd_pp_vacant
-= number
;
158 spin_unlock(&drbd_pp_lock
);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i
= 0; i
< number
; i
++) {
167 tmp
= alloc_page(GFP_TRY
);
170 set_page_private(tmp
, (unsigned long)page
);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp
= page_chain_tail(page
, NULL
);
182 spin_lock(&drbd_pp_lock
);
183 page_chain_add(&drbd_pp_pool
, page
, tmp
);
185 spin_unlock(&drbd_pp_lock
);
190 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
192 struct drbd_epoch_entry
*e
;
193 struct list_head
*le
, *tle
;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
201 e
= list_entry(le
, struct drbd_epoch_entry
, w
.list
);
202 if (drbd_ee_has_active_page(e
))
204 list_move(le
, to_be_freed
);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
210 LIST_HEAD(reclaimed
);
211 struct drbd_epoch_entry
*e
, *t
;
213 spin_lock_irq(&mdev
->req_lock
);
214 reclaim_net_ee(mdev
, &reclaimed
);
215 spin_unlock_irq(&mdev
->req_lock
);
217 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
218 drbd_free_net_ee(mdev
, e
);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, unsigned number
, bool retry
)
235 struct page
*page
= NULL
;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
)
241 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
243 while (page
== NULL
) {
244 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
246 drbd_kick_lo_and_reclaim_net(mdev
);
248 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
249 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
257 if (signal_pending(current
)) {
258 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait
, &wait
);
267 atomic_add(number
, &mdev
->pp_in_use
);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
277 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
280 if (drbd_pp_vacant
> (DRBD_MAX_SEGMENT_SIZE
/PAGE_SIZE
)*minor_count
)
281 i
= page_chain_free(page
);
284 tmp
= page_chain_tail(page
, &i
);
285 spin_lock(&drbd_pp_lock
);
286 page_chain_add(&drbd_pp_pool
, page
, tmp
);
288 spin_unlock(&drbd_pp_lock
);
290 i
= atomic_sub_return(i
, a
);
292 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
294 wake_up(&drbd_pp_wait
);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry
*drbd_alloc_ee(struct drbd_conf
*mdev
,
315 unsigned int data_size
,
316 gfp_t gfp_mask
) __must_hold(local
)
318 struct drbd_epoch_entry
*e
;
320 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
322 if (FAULT_ACTIVE(mdev
, DRBD_FAULT_AL_EE
))
325 e
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
327 if (!(gfp_mask
& __GFP_NOWARN
))
328 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
332 page
= drbd_pp_alloc(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
336 INIT_HLIST_NODE(&e
->colision
);
340 atomic_set(&e
->pending_bios
, 0);
349 mempool_free(e
, drbd_ee_mempool
);
353 void drbd_free_some_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
, int is_net
)
355 if (e
->flags
& EE_HAS_DIGEST
)
357 drbd_pp_free(mdev
, e
->pages
, is_net
);
358 D_ASSERT(atomic_read(&e
->pending_bios
) == 0);
359 D_ASSERT(hlist_unhashed(&e
->colision
));
360 mempool_free(e
, drbd_ee_mempool
);
363 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
365 LIST_HEAD(work_list
);
366 struct drbd_epoch_entry
*e
, *t
;
368 int is_net
= list
== &mdev
->net_ee
;
370 spin_lock_irq(&mdev
->req_lock
);
371 list_splice_init(list
, &work_list
);
372 spin_unlock_irq(&mdev
->req_lock
);
374 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
375 drbd_free_some_ee(mdev
, e
, is_net
);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
393 LIST_HEAD(work_list
);
394 LIST_HEAD(reclaimed
);
395 struct drbd_epoch_entry
*e
, *t
;
396 int ok
= (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
);
398 spin_lock_irq(&mdev
->req_lock
);
399 reclaim_net_ee(mdev
, &reclaimed
);
400 list_splice_init(&mdev
->done_ee
, &work_list
);
401 spin_unlock_irq(&mdev
->req_lock
);
403 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
404 drbd_free_net_ee(mdev
, e
);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
411 /* list_del not necessary, next/prev members not touched */
412 ok
= e
->w
.cb(mdev
, &e
->w
, !ok
) && ok
;
413 drbd_free_ee(mdev
, e
);
415 wake_up(&mdev
->ee_wait
);
420 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head
)) {
427 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
428 spin_unlock_irq(&mdev
->req_lock
);
430 finish_wait(&mdev
->ee_wait
, &wait
);
431 spin_lock_irq(&mdev
->req_lock
);
435 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
437 spin_lock_irq(&mdev
->req_lock
);
438 _drbd_wait_ee_list_empty(mdev
, head
);
439 spin_unlock_irq(&mdev
->req_lock
);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf
*mdev
, const char **what
,
445 struct socket
*sock
, struct socket
**newsock
)
447 struct sock
*sk
= sock
->sk
;
451 err
= sock
->ops
->listen(sock
, 5);
455 *what
= "sock_create_lite";
456 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
462 err
= sock
->ops
->accept(sock
, *newsock
, 0);
464 sock_release(*newsock
);
468 (*newsock
)->ops
= sock
->ops
;
474 static int drbd_recv_short(struct drbd_conf
*mdev
, struct socket
*sock
,
475 void *buf
, size_t size
, int flags
)
482 struct msghdr msg
= {
484 .msg_iov
= (struct iovec
*)&iov
,
485 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
491 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
497 static int drbd_recv(struct drbd_conf
*mdev
, void *buf
, size_t size
)
504 struct msghdr msg
= {
506 .msg_iov
= (struct iovec
*)&iov
,
507 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
515 rv
= sock_recvmsg(mdev
->data
.socket
, &msg
, size
, msg
.msg_flags
);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv
== -ECONNRESET
)
526 dev_info(DEV
, "sock was reset by peer\n");
527 else if (rv
!= -ERESTARTSYS
)
528 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
530 } else if (rv
== 0) {
531 dev_info(DEV
, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock
->sk
->sk_sndbuf
= snd
;
561 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
564 sock
->sk
->sk_rcvbuf
= rcv
;
565 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
569 static struct socket
*drbd_try_connect(struct drbd_conf
*mdev
)
573 struct sockaddr_in6 src_in6
;
575 int disconnect_on_error
= 1;
577 if (!get_net_conf(mdev
))
580 what
= "sock_create_kern";
581 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
582 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
588 sock
->sk
->sk_rcvtimeo
=
589 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->try_connect_int
*HZ
;
590 drbd_setbufsize(sock
, mdev
->net_conf
->sndbuf_size
,
591 mdev
->net_conf
->rcvbuf_size
);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6
, mdev
->net_conf
->my_addr
,
601 min_t(int, mdev
->net_conf
->my_addr_len
, sizeof(src_in6
)));
602 if (((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
603 src_in6
.sin6_port
= 0;
605 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
607 what
= "bind before connect";
608 err
= sock
->ops
->bind(sock
,
609 (struct sockaddr
*) &src_in6
,
610 mdev
->net_conf
->my_addr_len
);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error
= 0;
618 err
= sock
->ops
->connect(sock
,
619 (struct sockaddr
*)mdev
->net_conf
->peer_addr
,
620 mdev
->net_conf
->peer_addr_len
, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
631 case EINTR
: case ERESTARTSYS
:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED
: case ENETUNREACH
:
634 case EHOSTDOWN
: case EHOSTUNREACH
:
635 disconnect_on_error
= 0;
638 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
640 if (disconnect_on_error
)
641 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
647 static struct socket
*drbd_wait_for_connect(struct drbd_conf
*mdev
)
650 struct socket
*s_estab
= NULL
, *s_listen
;
653 if (!get_net_conf(mdev
))
656 what
= "sock_create_kern";
657 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
658 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
664 timeo
= mdev
->net_conf
->try_connect_int
* HZ
;
665 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
667 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
668 s_listen
->sk
->sk_rcvtimeo
= timeo
;
669 s_listen
->sk
->sk_sndtimeo
= timeo
;
670 drbd_setbufsize(s_listen
, mdev
->net_conf
->sndbuf_size
,
671 mdev
->net_conf
->rcvbuf_size
);
673 what
= "bind before listen";
674 err
= s_listen
->ops
->bind(s_listen
,
675 (struct sockaddr
*) mdev
->net_conf
->my_addr
,
676 mdev
->net_conf
->my_addr_len
);
680 err
= drbd_accept(mdev
, &what
, s_listen
, &s_estab
);
684 sock_release(s_listen
);
686 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
687 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
688 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
696 static int drbd_send_fp(struct drbd_conf
*mdev
,
697 struct socket
*sock
, enum drbd_packets cmd
)
699 struct p_header80
*h
= &mdev
->data
.sbuf
.header
.h80
;
701 return _drbd_send_cmd(mdev
, sock
, cmd
, h
, sizeof(*h
), 0);
704 static enum drbd_packets
drbd_recv_fp(struct drbd_conf
*mdev
, struct socket
*sock
)
706 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
709 rr
= drbd_recv_short(mdev
, sock
, h
, sizeof(*h
), 0);
711 if (rr
== sizeof(*h
) && h
->magic
== BE_DRBD_MAGIC
)
712 return be16_to_cpu(h
->command
);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf
*mdev
, struct socket
**sock
)
730 rr
= drbd_recv_short(mdev
, *sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
732 if (rr
> 0 || rr
== -EAGAIN
) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf
*mdev
)
751 struct socket
*s
, *sock
, *msock
;
754 D_ASSERT(!mdev
->data
.socket
);
756 if (drbd_request_state(mdev
, NS(conn
, C_WF_CONNECTION
)) < SS_SUCCESS
)
759 clear_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
766 /* 3 tries, this should take less than a second! */
767 s
= drbd_try_connect(mdev
);
770 /* give the other side time to call bind() & listen() */
771 __set_current_state(TASK_INTERRUPTIBLE
);
772 schedule_timeout(HZ
/ 10);
777 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_S
);
781 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_M
);
785 dev_err(DEV
, "Logic error in drbd_connect()\n");
786 goto out_release_sockets
;
791 __set_current_state(TASK_INTERRUPTIBLE
);
792 schedule_timeout(HZ
/ 10);
793 ok
= drbd_socket_okay(mdev
, &sock
);
794 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
800 s
= drbd_wait_for_connect(mdev
);
802 try = drbd_recv_fp(mdev
, s
);
803 drbd_socket_okay(mdev
, &sock
);
804 drbd_socket_okay(mdev
, &msock
);
808 dev_warn(DEV
, "initial packet S crossed\n");
815 dev_warn(DEV
, "initial packet M crossed\n");
819 set_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
822 dev_warn(DEV
, "Error receiving initial packet\n");
829 if (mdev
->state
.conn
<= C_DISCONNECTING
)
830 goto out_release_sockets
;
831 if (signal_pending(current
)) {
832 flush_signals(current
);
834 if (get_t_state(&mdev
->receiver
) == Exiting
)
835 goto out_release_sockets
;
839 ok
= drbd_socket_okay(mdev
, &sock
);
840 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
846 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
847 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
849 sock
->sk
->sk_allocation
= GFP_NOIO
;
850 msock
->sk
->sk_allocation
= GFP_NOIO
;
852 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
853 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock
->sk
->sk_sndtimeo
=
861 sock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_timeo
*4*HZ
/10;
863 msock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
864 msock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
866 /* we don't want delays.
867 * we use TCP_CORK where apropriate, though */
868 drbd_tcp_nodelay(sock
);
869 drbd_tcp_nodelay(msock
);
871 mdev
->data
.socket
= sock
;
872 mdev
->meta
.socket
= msock
;
873 mdev
->last_received
= jiffies
;
875 D_ASSERT(mdev
->asender
.task
== NULL
);
877 h
= drbd_do_handshake(mdev
);
881 if (mdev
->cram_hmac_tfm
) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
883 switch (drbd_do_auth(mdev
)) {
885 dev_err(DEV
, "Authentication of peer failed\n");
888 dev_err(DEV
, "Authentication of peer failed, trying again.\n");
893 if (drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
)) < SS_SUCCESS
)
896 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
897 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
899 atomic_set(&mdev
->packet_seq
, 0);
902 drbd_thread_start(&mdev
->asender
);
904 if (mdev
->agreed_pro_version
< 95 && get_ldev(mdev
)) {
905 drbd_setup_queue_param(mdev
, DRBD_MAX_SIZE_H80_PACKET
);
909 if (!drbd_send_protocol(mdev
))
911 drbd_send_sync_param(mdev
, &mdev
->sync_conf
);
912 drbd_send_sizes(mdev
, 0, 0);
913 drbd_send_uuids(mdev
);
914 drbd_send_state(mdev
);
915 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
916 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
928 static int drbd_recv_header(struct drbd_conf
*mdev
, enum drbd_packets
*cmd
, unsigned int *packet_size
)
930 union p_header
*h
= &mdev
->data
.rbuf
.header
;
933 r
= drbd_recv(mdev
, h
, sizeof(*h
));
934 if (unlikely(r
!= sizeof(*h
))) {
935 dev_err(DEV
, "short read expecting header on sock: r=%d\n", r
);
939 if (likely(h
->h80
.magic
== BE_DRBD_MAGIC
)) {
940 *cmd
= be16_to_cpu(h
->h80
.command
);
941 *packet_size
= be16_to_cpu(h
->h80
.length
);
942 } else if (h
->h95
.magic
== BE_DRBD_MAGIC_BIG
) {
943 *cmd
= be16_to_cpu(h
->h95
.command
);
944 *packet_size
= be32_to_cpu(h
->h95
.length
);
946 dev_err(DEV
, "magic?? on data m: 0x%08x c: %d l: %d\n",
947 be32_to_cpu(h
->h80
.magic
),
948 be16_to_cpu(h
->h80
.command
),
949 be16_to_cpu(h
->h80
.length
));
952 mdev
->last_received
= jiffies
;
957 static void drbd_flush(struct drbd_conf
*mdev
)
961 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
962 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
965 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
966 /* would rather check on EOPNOTSUPP, but that is not reliable.
967 * don't try again for ANY return value != 0
968 * if (rv == -EOPNOTSUPP) */
969 drbd_bump_write_ordering(mdev
, WO_drain_io
);
976 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
977 * @mdev: DRBD device.
978 * @epoch: Epoch object.
981 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
982 struct drbd_epoch
*epoch
,
986 struct drbd_epoch
*next_epoch
;
987 enum finish_epoch rv
= FE_STILL_LIVE
;
989 spin_lock(&mdev
->epoch_lock
);
993 epoch_size
= atomic_read(&epoch
->epoch_size
);
995 switch (ev
& ~EV_CLEANUP
) {
997 atomic_dec(&epoch
->active
);
999 case EV_GOT_BARRIER_NR
:
1000 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1002 case EV_BECAME_LAST
:
1007 if (epoch_size
!= 0 &&
1008 atomic_read(&epoch
->active
) == 0 &&
1009 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
)) {
1010 if (!(ev
& EV_CLEANUP
)) {
1011 spin_unlock(&mdev
->epoch_lock
);
1012 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1013 spin_lock(&mdev
->epoch_lock
);
1017 if (mdev
->current_epoch
!= epoch
) {
1018 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1019 list_del(&epoch
->list
);
1020 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1024 if (rv
== FE_STILL_LIVE
)
1028 atomic_set(&epoch
->epoch_size
, 0);
1029 /* atomic_set(&epoch->active, 0); is already zero */
1030 if (rv
== FE_STILL_LIVE
)
1032 wake_up(&mdev
->ee_wait
);
1042 spin_unlock(&mdev
->epoch_lock
);
1048 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1049 * @mdev: DRBD device.
1050 * @wo: Write ordering method to try.
1052 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1054 enum write_ordering_e pwo
;
1055 static char *write_ordering_str
[] = {
1057 [WO_drain_io
] = "drain",
1058 [WO_bdev_flush
] = "flush",
1061 pwo
= mdev
->write_ordering
;
1063 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1065 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1067 mdev
->write_ordering
= wo
;
1068 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bdev_flush
)
1069 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1074 * @mdev: DRBD device.
1076 * @rw: flag field, see bio->bi_rw
1078 /* TODO allocate from our own bio_set. */
1079 int drbd_submit_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
,
1080 const unsigned rw
, const int fault_type
)
1082 struct bio
*bios
= NULL
;
1084 struct page
*page
= e
->pages
;
1085 sector_t sector
= e
->sector
;
1086 unsigned ds
= e
->size
;
1087 unsigned n_bios
= 0;
1088 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1090 /* In most cases, we will only need one bio. But in case the lower
1091 * level restrictions happen to be different at this offset on this
1092 * side than those of the sending peer, we may need to submit the
1093 * request in more than one bio. */
1095 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1097 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1100 /* > e->sector, unless this is the first bio */
1101 bio
->bi_sector
= sector
;
1102 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1104 bio
->bi_private
= e
;
1105 bio
->bi_end_io
= drbd_endio_sec
;
1107 bio
->bi_next
= bios
;
1111 page_chain_for_each(page
) {
1112 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1113 if (!bio_add_page(bio
, page
, len
, 0)) {
1114 /* a single page must always be possible! */
1115 BUG_ON(bio
->bi_vcnt
== 0);
1122 D_ASSERT(page
== NULL
);
1125 atomic_set(&e
->pending_bios
, n_bios
);
1128 bios
= bios
->bi_next
;
1129 bio
->bi_next
= NULL
;
1131 drbd_generic_make_request(mdev
, fault_type
, bio
);
1138 bios
= bios
->bi_next
;
1144 static int receive_Barrier(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1147 struct p_barrier
*p
= &mdev
->data
.rbuf
.barrier
;
1148 struct drbd_epoch
*epoch
;
1152 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1153 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1155 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1156 * the activity log, which means it would not be resynced in case the
1157 * R_PRIMARY crashes now.
1158 * Therefore we must send the barrier_ack after the barrier request was
1160 switch (mdev
->write_ordering
) {
1162 if (rv
== FE_RECYCLED
)
1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */
1167 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1171 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1176 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1179 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1180 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1185 epoch
= mdev
->current_epoch
;
1186 wait_event(mdev
->ee_wait
, atomic_read(&epoch
->epoch_size
) == 0);
1188 D_ASSERT(atomic_read(&epoch
->active
) == 0);
1189 D_ASSERT(epoch
->flags
== 0);
1193 dev_err(DEV
, "Strangeness in mdev->write_ordering %d\n", mdev
->write_ordering
);
1198 atomic_set(&epoch
->epoch_size
, 0);
1199 atomic_set(&epoch
->active
, 0);
1201 spin_lock(&mdev
->epoch_lock
);
1202 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1203 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1204 mdev
->current_epoch
= epoch
;
1207 /* The current_epoch got recycled while we allocated this one... */
1210 spin_unlock(&mdev
->epoch_lock
);
1215 /* used from receive_RSDataReply (recv_resync_read)
1216 * and from receive_Data */
1217 static struct drbd_epoch_entry
*
1218 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
, int data_size
) __must_hold(local
)
1220 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1221 struct drbd_epoch_entry
*e
;
1224 void *dig_in
= mdev
->int_dig_in
;
1225 void *dig_vv
= mdev
->int_dig_vv
;
1226 unsigned long *data
;
1228 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1229 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1232 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1234 dev_warn(DEV
, "short read receiving data digest: read %d expected %d\n",
1242 ERR_IF(data_size
& 0x1ff) return NULL
;
1243 ERR_IF(data_size
> DRBD_MAX_SEGMENT_SIZE
) return NULL
;
1245 /* even though we trust out peer,
1246 * we sometimes have to double check. */
1247 if (sector
+ (data_size
>>9) > capacity
) {
1248 dev_err(DEV
, "capacity: %llus < sector: %llus + size: %u\n",
1249 (unsigned long long)capacity
,
1250 (unsigned long long)sector
, data_size
);
1254 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1255 * "criss-cross" setup, that might cause write-out on some other DRBD,
1256 * which in turn might block on the other node at this very place. */
1257 e
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1263 page_chain_for_each(page
) {
1264 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1266 rr
= drbd_recv(mdev
, data
, len
);
1267 if (FAULT_ACTIVE(mdev
, DRBD_FAULT_RECEIVE
)) {
1268 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1269 data
[0] = data
[0] ^ (unsigned long)-1;
1273 drbd_free_ee(mdev
, e
);
1274 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1282 drbd_csum_ee(mdev
, mdev
->integrity_r_tfm
, e
, dig_vv
);
1283 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1284 dev_err(DEV
, "Digest integrity check FAILED.\n");
1285 drbd_bcast_ee(mdev
, "digest failed",
1286 dgs
, dig_in
, dig_vv
, e
);
1287 drbd_free_ee(mdev
, e
);
1291 mdev
->recv_cnt
+= data_size
>>9;
1295 /* drbd_drain_block() just takes a data block
1296 * out of the socket input buffer, and discards it.
1298 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1307 page
= drbd_pp_alloc(mdev
, 1, 1);
1311 rr
= drbd_recv(mdev
, data
, min_t(int, data_size
, PAGE_SIZE
));
1312 if (rr
!= min_t(int, data_size
, PAGE_SIZE
)) {
1314 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1315 rr
, min_t(int, data_size
, PAGE_SIZE
));
1321 drbd_pp_free(mdev
, page
, 0);
1325 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1326 sector_t sector
, int data_size
)
1328 struct bio_vec
*bvec
;
1330 int dgs
, rr
, i
, expect
;
1331 void *dig_in
= mdev
->int_dig_in
;
1332 void *dig_vv
= mdev
->int_dig_vv
;
1334 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1335 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1338 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1340 dev_warn(DEV
, "short read receiving data reply digest: read %d expected %d\n",
1348 /* optimistically update recv_cnt. if receiving fails below,
1349 * we disconnect anyways, and counters will be reset. */
1350 mdev
->recv_cnt
+= data_size
>>9;
1352 bio
= req
->master_bio
;
1353 D_ASSERT(sector
== bio
->bi_sector
);
1355 bio_for_each_segment(bvec
, bio
, i
) {
1356 expect
= min_t(int, data_size
, bvec
->bv_len
);
1357 rr
= drbd_recv(mdev
,
1358 kmap(bvec
->bv_page
)+bvec
->bv_offset
,
1360 kunmap(bvec
->bv_page
);
1362 dev_warn(DEV
, "short read receiving data reply: "
1363 "read %d expected %d\n",
1371 drbd_csum_bio(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1372 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1373 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1378 D_ASSERT(data_size
== 0);
1382 /* e_end_resync_block() is called via
1383 * drbd_process_done_ee() by asender only */
1384 static int e_end_resync_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1386 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1387 sector_t sector
= e
->sector
;
1390 D_ASSERT(hlist_unhashed(&e
->colision
));
1392 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1393 drbd_set_in_sync(mdev
, sector
, e
->size
);
1394 ok
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, e
);
1396 /* Record failure to sync */
1397 drbd_rs_failed_io(mdev
, sector
, e
->size
);
1399 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1406 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1408 struct drbd_epoch_entry
*e
;
1410 e
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1414 dec_rs_pending(mdev
);
1417 /* corresponding dec_unacked() in e_end_resync_block()
1418 * respective _drbd_clear_done_ee */
1420 e
->w
.cb
= e_end_resync_block
;
1422 spin_lock_irq(&mdev
->req_lock
);
1423 list_add(&e
->w
.list
, &mdev
->sync_ee
);
1424 spin_unlock_irq(&mdev
->req_lock
);
1426 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1427 if (drbd_submit_ee(mdev
, e
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1430 /* drbd_submit_ee currently fails for one reason only:
1431 * not being able to allocate enough bios.
1432 * Is dropping the connection going to help? */
1433 spin_lock_irq(&mdev
->req_lock
);
1434 list_del(&e
->w
.list
);
1435 spin_unlock_irq(&mdev
->req_lock
);
1437 drbd_free_ee(mdev
, e
);
1443 static int receive_DataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1445 struct drbd_request
*req
;
1448 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1450 sector
= be64_to_cpu(p
->sector
);
1452 spin_lock_irq(&mdev
->req_lock
);
1453 req
= _ar_id_to_req(mdev
, p
->block_id
, sector
);
1454 spin_unlock_irq(&mdev
->req_lock
);
1455 if (unlikely(!req
)) {
1456 dev_err(DEV
, "Got a corrupt block_id/sector pair(1).\n");
1460 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1461 * special casing it there for the various failure cases.
1462 * still no race with drbd_fail_pending_reads */
1463 ok
= recv_dless_read(mdev
, req
, sector
, data_size
);
1466 req_mod(req
, data_received
);
1467 /* else: nothing. handled from drbd_disconnect...
1468 * I don't think we may complete this just yet
1469 * in case we are "on-disconnect: freeze" */
1474 static int receive_RSDataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1478 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1480 sector
= be64_to_cpu(p
->sector
);
1481 D_ASSERT(p
->block_id
== ID_SYNCER
);
1483 if (get_ldev(mdev
)) {
1484 /* data is submitted to disk within recv_resync_read.
1485 * corresponding put_ldev done below on error,
1486 * or in drbd_endio_write_sec. */
1487 ok
= recv_resync_read(mdev
, sector
, data_size
);
1489 if (__ratelimit(&drbd_ratelimit_state
))
1490 dev_err(DEV
, "Can not write resync data to local disk.\n");
1492 ok
= drbd_drain_block(mdev
, data_size
);
1494 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1497 atomic_add(data_size
>> 9, &mdev
->rs_sect_in
);
1502 /* e_end_block() is called via drbd_process_done_ee().
1503 * this means this function only runs in the asender thread
1505 static int e_end_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1507 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1508 sector_t sector
= e
->sector
;
1511 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1512 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1513 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1514 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1515 e
->flags
& EE_MAY_SET_IN_SYNC
) ?
1516 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1517 ok
&= drbd_send_ack(mdev
, pcmd
, e
);
1518 if (pcmd
== P_RS_WRITE_ACK
)
1519 drbd_set_in_sync(mdev
, sector
, e
->size
);
1521 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1522 /* we expect it to be marked out of sync anyways...
1523 * maybe assert this? */
1527 /* we delete from the conflict detection hash _after_ we sent out the
1528 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1529 if (mdev
->net_conf
->two_primaries
) {
1530 spin_lock_irq(&mdev
->req_lock
);
1531 D_ASSERT(!hlist_unhashed(&e
->colision
));
1532 hlist_del_init(&e
->colision
);
1533 spin_unlock_irq(&mdev
->req_lock
);
1535 D_ASSERT(hlist_unhashed(&e
->colision
));
1538 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1543 static int e_send_discard_ack(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1545 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1548 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1549 ok
= drbd_send_ack(mdev
, P_DISCARD_ACK
, e
);
1551 spin_lock_irq(&mdev
->req_lock
);
1552 D_ASSERT(!hlist_unhashed(&e
->colision
));
1553 hlist_del_init(&e
->colision
);
1554 spin_unlock_irq(&mdev
->req_lock
);
1561 /* Called from receive_Data.
1562 * Synchronize packets on sock with packets on msock.
1564 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1565 * packet traveling on msock, they are still processed in the order they have
1568 * Note: we don't care for Ack packets overtaking P_DATA packets.
1570 * In case packet_seq is larger than mdev->peer_seq number, there are
1571 * outstanding packets on the msock. We wait for them to arrive.
1572 * In case we are the logically next packet, we update mdev->peer_seq
1573 * ourselves. Correctly handles 32bit wrap around.
1575 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1576 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1577 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1578 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1580 * returns 0 if we may process the packet,
1581 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1582 static int drbd_wait_peer_seq(struct drbd_conf
*mdev
, const u32 packet_seq
)
1588 spin_lock(&mdev
->peer_seq_lock
);
1590 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1591 if (seq_le(packet_seq
, mdev
->peer_seq
+1))
1593 if (signal_pending(current
)) {
1597 p_seq
= mdev
->peer_seq
;
1598 spin_unlock(&mdev
->peer_seq_lock
);
1599 timeout
= schedule_timeout(30*HZ
);
1600 spin_lock(&mdev
->peer_seq_lock
);
1601 if (timeout
== 0 && p_seq
== mdev
->peer_seq
) {
1603 dev_err(DEV
, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1607 finish_wait(&mdev
->seq_wait
, &wait
);
1608 if (mdev
->peer_seq
+1 == packet_seq
)
1610 spin_unlock(&mdev
->peer_seq_lock
);
1614 static unsigned long write_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
1616 if (mdev
->agreed_pro_version
>= 95)
1617 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
1618 (dpf
& DP_FUA
? REQ_FUA
: 0) |
1619 (dpf
& DP_FLUSH
? REQ_FUA
: 0) |
1620 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
1622 return dpf
& DP_RW_SYNC
? REQ_SYNC
: 0;
1625 /* mirrored write */
1626 static int receive_Data(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1629 struct drbd_epoch_entry
*e
;
1630 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1634 if (!get_ldev(mdev
)) {
1635 if (__ratelimit(&drbd_ratelimit_state
))
1636 dev_err(DEV
, "Can not write mirrored data block "
1637 "to local disk.\n");
1638 spin_lock(&mdev
->peer_seq_lock
);
1639 if (mdev
->peer_seq
+1 == be32_to_cpu(p
->seq_num
))
1641 spin_unlock(&mdev
->peer_seq_lock
);
1643 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1644 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1645 return drbd_drain_block(mdev
, data_size
);
1648 /* get_ldev(mdev) successful.
1649 * Corresponding put_ldev done either below (on various errors),
1650 * or in drbd_endio_write_sec, if we successfully submit the data at
1651 * the end of this function. */
1653 sector
= be64_to_cpu(p
->sector
);
1654 e
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1660 e
->w
.cb
= e_end_block
;
1662 spin_lock(&mdev
->epoch_lock
);
1663 e
->epoch
= mdev
->current_epoch
;
1664 atomic_inc(&e
->epoch
->epoch_size
);
1665 atomic_inc(&e
->epoch
->active
);
1666 spin_unlock(&mdev
->epoch_lock
);
1668 dp_flags
= be32_to_cpu(p
->dp_flags
);
1669 rw
|= write_flags_to_bio(mdev
, dp_flags
);
1671 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
1672 e
->flags
|= EE_MAY_SET_IN_SYNC
;
1674 /* I'm the receiver, I do hold a net_cnt reference. */
1675 if (!mdev
->net_conf
->two_primaries
) {
1676 spin_lock_irq(&mdev
->req_lock
);
1678 /* don't get the req_lock yet,
1679 * we may sleep in drbd_wait_peer_seq */
1680 const int size
= e
->size
;
1681 const int discard
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1683 struct drbd_request
*i
;
1684 struct hlist_node
*n
;
1685 struct hlist_head
*slot
;
1688 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1689 BUG_ON(mdev
->ee_hash
== NULL
);
1690 BUG_ON(mdev
->tl_hash
== NULL
);
1692 /* conflict detection and handling:
1693 * 1. wait on the sequence number,
1694 * in case this data packet overtook ACK packets.
1695 * 2. check our hash tables for conflicting requests.
1696 * we only need to walk the tl_hash, since an ee can not
1697 * have a conflict with an other ee: on the submitting
1698 * node, the corresponding req had already been conflicting,
1699 * and a conflicting req is never sent.
1701 * Note: for two_primaries, we are protocol C,
1702 * so there cannot be any request that is DONE
1703 * but still on the transfer log.
1705 * unconditionally add to the ee_hash.
1707 * if no conflicting request is found:
1710 * if any conflicting request is found
1711 * that has not yet been acked,
1712 * AND I have the "discard concurrent writes" flag:
1713 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1715 * if any conflicting request is found:
1716 * block the receiver, waiting on misc_wait
1717 * until no more conflicting requests are there,
1718 * or we get interrupted (disconnect).
1720 * we do not just write after local io completion of those
1721 * requests, but only after req is done completely, i.e.
1722 * we wait for the P_DISCARD_ACK to arrive!
1724 * then proceed normally, i.e. submit.
1726 if (drbd_wait_peer_seq(mdev
, be32_to_cpu(p
->seq_num
)))
1727 goto out_interrupted
;
1729 spin_lock_irq(&mdev
->req_lock
);
1731 hlist_add_head(&e
->colision
, ee_hash_slot(mdev
, sector
));
1733 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1734 slot
= tl_hash_slot(mdev
, sector
);
1737 int have_unacked
= 0;
1738 int have_conflict
= 0;
1739 prepare_to_wait(&mdev
->misc_wait
, &wait
,
1740 TASK_INTERRUPTIBLE
);
1741 hlist_for_each_entry(i
, n
, slot
, colision
) {
1743 /* only ALERT on first iteration,
1744 * we may be woken up early... */
1746 dev_alert(DEV
, "%s[%u] Concurrent local write detected!"
1747 " new: %llus +%u; pending: %llus +%u\n",
1748 current
->comm
, current
->pid
,
1749 (unsigned long long)sector
, size
,
1750 (unsigned long long)i
->sector
, i
->size
);
1751 if (i
->rq_state
& RQ_NET_PENDING
)
1760 /* Discard Ack only for the _first_ iteration */
1761 if (first
&& discard
&& have_unacked
) {
1762 dev_alert(DEV
, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1763 (unsigned long long)sector
);
1765 e
->w
.cb
= e_send_discard_ack
;
1766 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
1768 spin_unlock_irq(&mdev
->req_lock
);
1770 /* we could probably send that P_DISCARD_ACK ourselves,
1771 * but I don't like the receiver using the msock */
1775 finish_wait(&mdev
->misc_wait
, &wait
);
1779 if (signal_pending(current
)) {
1780 hlist_del_init(&e
->colision
);
1782 spin_unlock_irq(&mdev
->req_lock
);
1784 finish_wait(&mdev
->misc_wait
, &wait
);
1785 goto out_interrupted
;
1788 spin_unlock_irq(&mdev
->req_lock
);
1791 dev_alert(DEV
, "Concurrent write! [W AFTERWARDS] "
1792 "sec=%llus\n", (unsigned long long)sector
);
1793 } else if (discard
) {
1794 /* we had none on the first iteration.
1795 * there must be none now. */
1796 D_ASSERT(have_unacked
== 0);
1799 spin_lock_irq(&mdev
->req_lock
);
1801 finish_wait(&mdev
->misc_wait
, &wait
);
1804 list_add(&e
->w
.list
, &mdev
->active_ee
);
1805 spin_unlock_irq(&mdev
->req_lock
);
1807 switch (mdev
->net_conf
->wire_protocol
) {
1810 /* corresponding dec_unacked() in e_end_block()
1811 * respective _drbd_clear_done_ee */
1814 /* I really don't like it that the receiver thread
1815 * sends on the msock, but anyways */
1816 drbd_send_ack(mdev
, P_RECV_ACK
, e
);
1823 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
1824 /* In case we have the only disk of the cluster, */
1825 drbd_set_out_of_sync(mdev
, e
->sector
, e
->size
);
1826 e
->flags
|= EE_CALL_AL_COMPLETE_IO
;
1827 e
->flags
&= ~EE_MAY_SET_IN_SYNC
;
1828 drbd_al_begin_io(mdev
, e
->sector
);
1831 if (drbd_submit_ee(mdev
, e
, rw
, DRBD_FAULT_DT_WR
) == 0)
1834 /* drbd_submit_ee currently fails for one reason only:
1835 * not being able to allocate enough bios.
1836 * Is dropping the connection going to help? */
1837 spin_lock_irq(&mdev
->req_lock
);
1838 list_del(&e
->w
.list
);
1839 hlist_del_init(&e
->colision
);
1840 spin_unlock_irq(&mdev
->req_lock
);
1841 if (e
->flags
& EE_CALL_AL_COMPLETE_IO
)
1842 drbd_al_complete_io(mdev
, e
->sector
);
1845 /* yes, the epoch_size now is imbalanced.
1846 * but we drop the connection anyways, so we don't have a chance to
1847 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1849 drbd_free_ee(mdev
, e
);
1853 /* We may throttle resync, if the lower device seems to be busy,
1854 * and current sync rate is above c_min_rate.
1856 * To decide whether or not the lower device is busy, we use a scheme similar
1857 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1858 * (more than 64 sectors) of activity we cannot account for with our own resync
1859 * activity, it obviously is "busy".
1861 * The current sync rate used here uses only the most recent two step marks,
1862 * to have a short time average so we can react faster.
1864 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
)
1866 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
1867 unsigned long db
, dt
, dbdt
;
1871 /* feature disabled? */
1872 if (mdev
->sync_conf
.c_min_rate
== 0)
1875 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
1876 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
1877 atomic_read(&mdev
->rs_sect_ev
);
1878 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
1879 unsigned long rs_left
;
1882 mdev
->rs_last_events
= curr_events
;
1884 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1886 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
1888 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
1889 rs_left
= mdev
->ov_left
;
1891 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
1893 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
1896 db
= mdev
->rs_mark_left
[i
] - rs_left
;
1897 dbdt
= Bit2KB(db
/dt
);
1899 if (dbdt
> mdev
->sync_conf
.c_min_rate
)
1906 static int receive_DataRequest(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int digest_size
)
1909 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1910 struct drbd_epoch_entry
*e
;
1911 struct digest_info
*di
= NULL
;
1913 unsigned int fault_type
;
1914 struct p_block_req
*p
= &mdev
->data
.rbuf
.block_req
;
1916 sector
= be64_to_cpu(p
->sector
);
1917 size
= be32_to_cpu(p
->blksize
);
1919 if (size
<= 0 || (size
& 0x1ff) != 0 || size
> DRBD_MAX_SEGMENT_SIZE
) {
1920 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1921 (unsigned long long)sector
, size
);
1924 if (sector
+ (size
>>9) > capacity
) {
1925 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1926 (unsigned long long)sector
, size
);
1930 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
1933 case P_DATA_REQUEST
:
1934 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
1936 case P_RS_DATA_REQUEST
:
1937 case P_CSUM_RS_REQUEST
:
1939 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
1943 dec_rs_pending(mdev
);
1944 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
1947 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
1950 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
1951 dev_err(DEV
, "Can not satisfy peer's read request, "
1952 "no local data.\n");
1954 /* drain possibly payload */
1955 return drbd_drain_block(mdev
, digest_size
);
1958 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1959 * "criss-cross" setup, that might cause write-out on some other DRBD,
1960 * which in turn might block on the other node at this very place. */
1961 e
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
1968 case P_DATA_REQUEST
:
1969 e
->w
.cb
= w_e_end_data_req
;
1970 fault_type
= DRBD_FAULT_DT_RD
;
1971 /* application IO, don't drbd_rs_begin_io */
1974 case P_RS_DATA_REQUEST
:
1975 e
->w
.cb
= w_e_end_rsdata_req
;
1976 fault_type
= DRBD_FAULT_RS_RD
;
1980 case P_CSUM_RS_REQUEST
:
1981 fault_type
= DRBD_FAULT_RS_RD
;
1982 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
1986 di
->digest_size
= digest_size
;
1987 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
1990 e
->flags
|= EE_HAS_DIGEST
;
1992 if (drbd_recv(mdev
, di
->digest
, digest_size
) != digest_size
)
1995 if (cmd
== P_CSUM_RS_REQUEST
) {
1996 D_ASSERT(mdev
->agreed_pro_version
>= 89);
1997 e
->w
.cb
= w_e_end_csum_rs_req
;
1998 } else if (cmd
== P_OV_REPLY
) {
1999 /* track progress, we may need to throttle */
2000 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2001 e
->w
.cb
= w_e_end_ov_reply
;
2002 dec_rs_pending(mdev
);
2003 /* drbd_rs_begin_io done when we sent this request,
2004 * but accounting still needs to be done. */
2005 goto submit_for_resync
;
2010 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2011 mdev
->agreed_pro_version
>= 90) {
2012 unsigned long now
= jiffies
;
2014 mdev
->ov_start_sector
= sector
;
2015 mdev
->ov_position
= sector
;
2016 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2017 mdev
->rs_total
= mdev
->ov_left
;
2018 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2019 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2020 mdev
->rs_mark_time
[i
] = now
;
2022 dev_info(DEV
, "Online Verify start sector: %llu\n",
2023 (unsigned long long)sector
);
2025 e
->w
.cb
= w_e_end_ov_req
;
2026 fault_type
= DRBD_FAULT_RS_RD
;
2030 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2032 fault_type
= DRBD_FAULT_MAX
;
2036 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2037 * wrt the receiver, but it is not as straightforward as it may seem.
2038 * Various places in the resync start and stop logic assume resync
2039 * requests are processed in order, requeuing this on the worker thread
2040 * introduces a bunch of new code for synchronization between threads.
2042 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2043 * "forever", throttling after drbd_rs_begin_io will lock that extent
2044 * for application writes for the same time. For now, just throttle
2045 * here, where the rest of the code expects the receiver to sleep for
2049 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2050 * this defers syncer requests for some time, before letting at least
2051 * on request through. The resync controller on the receiving side
2052 * will adapt to the incoming rate accordingly.
2054 * We cannot throttle here if remote is Primary/SyncTarget:
2055 * we would also throttle its application reads.
2056 * In that case, throttling is done on the SyncTarget only.
2058 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
))
2060 if (drbd_rs_begin_io(mdev
, e
->sector
))
2064 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2068 spin_lock_irq(&mdev
->req_lock
);
2069 list_add_tail(&e
->w
.list
, &mdev
->read_ee
);
2070 spin_unlock_irq(&mdev
->req_lock
);
2072 if (drbd_submit_ee(mdev
, e
, READ
, fault_type
) == 0)
2075 /* drbd_submit_ee currently fails for one reason only:
2076 * not being able to allocate enough bios.
2077 * Is dropping the connection going to help? */
2078 spin_lock_irq(&mdev
->req_lock
);
2079 list_del(&e
->w
.list
);
2080 spin_unlock_irq(&mdev
->req_lock
);
2081 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2085 drbd_free_ee(mdev
, e
);
2089 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2091 int self
, peer
, rv
= -100;
2092 unsigned long ch_self
, ch_peer
;
2094 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2095 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2097 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2098 ch_self
= mdev
->comm_bm_set
;
2100 switch (mdev
->net_conf
->after_sb_0p
) {
2102 case ASB_DISCARD_SECONDARY
:
2103 case ASB_CALL_HELPER
:
2104 dev_err(DEV
, "Configuration error.\n");
2106 case ASB_DISCONNECT
:
2108 case ASB_DISCARD_YOUNGER_PRI
:
2109 if (self
== 0 && peer
== 1) {
2113 if (self
== 1 && peer
== 0) {
2117 /* Else fall through to one of the other strategies... */
2118 case ASB_DISCARD_OLDER_PRI
:
2119 if (self
== 0 && peer
== 1) {
2123 if (self
== 1 && peer
== 0) {
2127 /* Else fall through to one of the other strategies... */
2128 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2129 "Using discard-least-changes instead\n");
2130 case ASB_DISCARD_ZERO_CHG
:
2131 if (ch_peer
== 0 && ch_self
== 0) {
2132 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2136 if (ch_peer
== 0) { rv
= 1; break; }
2137 if (ch_self
== 0) { rv
= -1; break; }
2139 if (mdev
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2141 case ASB_DISCARD_LEAST_CHG
:
2142 if (ch_self
< ch_peer
)
2144 else if (ch_self
> ch_peer
)
2146 else /* ( ch_self == ch_peer ) */
2147 /* Well, then use something else. */
2148 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2151 case ASB_DISCARD_LOCAL
:
2154 case ASB_DISCARD_REMOTE
:
2161 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2163 int self
, peer
, hg
, rv
= -100;
2165 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2166 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2168 switch (mdev
->net_conf
->after_sb_1p
) {
2169 case ASB_DISCARD_YOUNGER_PRI
:
2170 case ASB_DISCARD_OLDER_PRI
:
2171 case ASB_DISCARD_LEAST_CHG
:
2172 case ASB_DISCARD_LOCAL
:
2173 case ASB_DISCARD_REMOTE
:
2174 dev_err(DEV
, "Configuration error.\n");
2176 case ASB_DISCONNECT
:
2179 hg
= drbd_asb_recover_0p(mdev
);
2180 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2182 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2186 rv
= drbd_asb_recover_0p(mdev
);
2188 case ASB_DISCARD_SECONDARY
:
2189 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2190 case ASB_CALL_HELPER
:
2191 hg
= drbd_asb_recover_0p(mdev
);
2192 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2193 self
= drbd_set_role(mdev
, R_SECONDARY
, 0);
2194 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2195 * we might be here in C_WF_REPORT_PARAMS which is transient.
2196 * we do not need to wait for the after state change work either. */
2197 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2198 if (self
!= SS_SUCCESS
) {
2199 drbd_khelper(mdev
, "pri-lost-after-sb");
2201 dev_warn(DEV
, "Successfully gave up primary role.\n");
2211 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2213 int self
, peer
, hg
, rv
= -100;
2215 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2216 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2218 switch (mdev
->net_conf
->after_sb_2p
) {
2219 case ASB_DISCARD_YOUNGER_PRI
:
2220 case ASB_DISCARD_OLDER_PRI
:
2221 case ASB_DISCARD_LEAST_CHG
:
2222 case ASB_DISCARD_LOCAL
:
2223 case ASB_DISCARD_REMOTE
:
2225 case ASB_DISCARD_SECONDARY
:
2226 dev_err(DEV
, "Configuration error.\n");
2229 rv
= drbd_asb_recover_0p(mdev
);
2231 case ASB_DISCONNECT
:
2233 case ASB_CALL_HELPER
:
2234 hg
= drbd_asb_recover_0p(mdev
);
2236 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2237 * we might be here in C_WF_REPORT_PARAMS which is transient.
2238 * we do not need to wait for the after state change work either. */
2239 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2240 if (self
!= SS_SUCCESS
) {
2241 drbd_khelper(mdev
, "pri-lost-after-sb");
2243 dev_warn(DEV
, "Successfully gave up primary role.\n");
2253 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2254 u64 bits
, u64 flags
)
2257 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2260 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2262 (unsigned long long)uuid
[UI_CURRENT
],
2263 (unsigned long long)uuid
[UI_BITMAP
],
2264 (unsigned long long)uuid
[UI_HISTORY_START
],
2265 (unsigned long long)uuid
[UI_HISTORY_END
],
2266 (unsigned long long)bits
,
2267 (unsigned long long)flags
);
2271 100 after split brain try auto recover
2272 2 C_SYNC_SOURCE set BitMap
2273 1 C_SYNC_SOURCE use BitMap
2275 -1 C_SYNC_TARGET use BitMap
2276 -2 C_SYNC_TARGET set BitMap
2277 -100 after split brain, disconnect
2278 -1000 unrelated data
2280 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2285 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2286 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2289 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2293 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2294 peer
!= UUID_JUST_CREATED
)
2298 if (self
!= UUID_JUST_CREATED
&&
2299 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2303 int rct
, dc
; /* roles at crash time */
2305 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2307 if (mdev
->agreed_pro_version
< 91)
2310 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2311 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2312 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2313 drbd_uuid_set_bm(mdev
, 0UL);
2315 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2316 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2319 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2326 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2328 if (mdev
->agreed_pro_version
< 91)
2331 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2332 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2333 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2335 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2336 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2337 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2339 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2342 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2349 /* Common power [off|failure] */
2350 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2351 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2352 /* lowest bit is set when we were primary,
2353 * next bit (weight 2) is set when peer was primary */
2357 case 0: /* !self_pri && !peer_pri */ return 0;
2358 case 1: /* self_pri && !peer_pri */ return 1;
2359 case 2: /* !self_pri && peer_pri */ return -1;
2360 case 3: /* self_pri && peer_pri */
2361 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
2367 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2372 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2374 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2375 peer
= mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2377 /* The last P_SYNC_UUID did not get though. Undo the last start of
2378 resync as sync source modifications of the peer's UUIDs. */
2380 if (mdev
->agreed_pro_version
< 91)
2383 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2384 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2390 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2391 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2392 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2398 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2399 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2404 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2406 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2407 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2409 /* The last P_SYNC_UUID did not get though. Undo the last start of
2410 resync as sync source modifications of our UUIDs. */
2412 if (mdev
->agreed_pro_version
< 91)
2415 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2416 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2418 dev_info(DEV
, "Undid last start of resync:\n");
2420 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2421 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2429 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2430 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2431 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2437 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2438 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2439 if (self
== peer
&& self
!= ((u64
)0))
2443 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2444 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2445 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2446 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2455 /* drbd_sync_handshake() returns the new conn state on success, or
2456 CONN_MASK (-1) on failure.
2458 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2459 enum drbd_disk_state peer_disk
) __must_hold(local
)
2462 enum drbd_conns rv
= C_MASK
;
2463 enum drbd_disk_state mydisk
;
2465 mydisk
= mdev
->state
.disk
;
2466 if (mydisk
== D_NEGOTIATING
)
2467 mydisk
= mdev
->new_state_tmp
.disk
;
2469 dev_info(DEV
, "drbd_sync_handshake:\n");
2470 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2471 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2472 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2474 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2476 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2479 dev_alert(DEV
, "Unrelated data, aborting!\n");
2483 dev_alert(DEV
, "To resolve this both sides have to support at least protocol\n");
2487 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2488 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2489 int f
= (hg
== -100) || abs(hg
) == 2;
2490 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2493 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2494 hg
> 0 ? "source" : "target");
2498 drbd_khelper(mdev
, "initial-split-brain");
2500 if (hg
== 100 || (hg
== -100 && mdev
->net_conf
->always_asbp
)) {
2501 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2502 + (peer_role
== R_PRIMARY
);
2503 int forced
= (hg
== -100);
2507 hg
= drbd_asb_recover_0p(mdev
);
2510 hg
= drbd_asb_recover_1p(mdev
);
2513 hg
= drbd_asb_recover_2p(mdev
);
2516 if (abs(hg
) < 100) {
2517 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2518 "automatically solved. Sync from %s node\n",
2519 pcount
, (hg
< 0) ? "peer" : "this");
2521 dev_warn(DEV
, "Doing a full sync, since"
2522 " UUIDs where ambiguous.\n");
2529 if (mdev
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2531 if (!mdev
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2535 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2536 "Sync from %s node\n",
2537 (hg
< 0) ? "peer" : "this");
2541 /* FIXME this log message is not correct if we end up here
2542 * after an attempted attach on a diskless node.
2543 * We just refuse to attach -- well, we drop the "connection"
2544 * to that disk, in a way... */
2545 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2546 drbd_khelper(mdev
, "split-brain");
2550 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2551 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2555 if (hg
< 0 && /* by intention we do not use mydisk here. */
2556 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2557 switch (mdev
->net_conf
->rr_conflict
) {
2558 case ASB_CALL_HELPER
:
2559 drbd_khelper(mdev
, "pri-lost");
2561 case ASB_DISCONNECT
:
2562 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2565 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2570 if (mdev
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->flags
)) {
2572 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2574 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2575 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2576 abs(hg
) >= 2 ? "full" : "bit-map based");
2581 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2582 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake"))
2586 if (hg
> 0) { /* become sync source. */
2588 } else if (hg
< 0) { /* become sync target */
2592 if (drbd_bm_total_weight(mdev
)) {
2593 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2594 drbd_bm_total_weight(mdev
));
2601 /* returns 1 if invalid */
2602 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2604 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2605 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2606 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2609 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2610 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2611 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2614 /* everything else is valid if they are equal on both sides. */
2618 /* everything es is invalid. */
2622 static int receive_protocol(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2624 struct p_protocol
*p
= &mdev
->data
.rbuf
.protocol
;
2625 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2626 int p_want_lose
, p_two_primaries
, cf
;
2627 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2629 p_proto
= be32_to_cpu(p
->protocol
);
2630 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2631 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2632 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2633 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2634 cf
= be32_to_cpu(p
->conn_flags
);
2635 p_want_lose
= cf
& CF_WANT_LOSE
;
2637 clear_bit(CONN_DRY_RUN
, &mdev
->flags
);
2639 if (cf
& CF_DRY_RUN
)
2640 set_bit(CONN_DRY_RUN
, &mdev
->flags
);
2642 if (p_proto
!= mdev
->net_conf
->wire_protocol
) {
2643 dev_err(DEV
, "incompatible communication protocols\n");
2647 if (cmp_after_sb(p_after_sb_0p
, mdev
->net_conf
->after_sb_0p
)) {
2648 dev_err(DEV
, "incompatible after-sb-0pri settings\n");
2652 if (cmp_after_sb(p_after_sb_1p
, mdev
->net_conf
->after_sb_1p
)) {
2653 dev_err(DEV
, "incompatible after-sb-1pri settings\n");
2657 if (cmp_after_sb(p_after_sb_2p
, mdev
->net_conf
->after_sb_2p
)) {
2658 dev_err(DEV
, "incompatible after-sb-2pri settings\n");
2662 if (p_want_lose
&& mdev
->net_conf
->want_lose
) {
2663 dev_err(DEV
, "both sides have the 'want_lose' flag set\n");
2667 if (p_two_primaries
!= mdev
->net_conf
->two_primaries
) {
2668 dev_err(DEV
, "incompatible setting of the two-primaries options\n");
2672 if (mdev
->agreed_pro_version
>= 87) {
2673 unsigned char *my_alg
= mdev
->net_conf
->integrity_alg
;
2675 if (drbd_recv(mdev
, p_integrity_alg
, data_size
) != data_size
)
2678 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2679 if (strcmp(p_integrity_alg
, my_alg
)) {
2680 dev_err(DEV
, "incompatible setting of the data-integrity-alg\n");
2683 dev_info(DEV
, "data-integrity-alg: %s\n",
2684 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2690 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2695 * input: alg name, feature name
2696 * return: NULL (alg name was "")
2697 * ERR_PTR(error) if something goes wrong
2698 * or the crypto hash ptr, if it worked out ok. */
2699 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2700 const char *alg
, const char *name
)
2702 struct crypto_hash
*tfm
;
2707 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2709 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2710 alg
, name
, PTR_ERR(tfm
));
2713 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2714 crypto_free_hash(tfm
);
2715 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2716 return ERR_PTR(-EINVAL
);
2721 static int receive_SyncParam(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int packet_size
)
2724 struct p_rs_param_95
*p
= &mdev
->data
.rbuf
.rs_param_95
;
2725 unsigned int header_size
, data_size
, exp_max_sz
;
2726 struct crypto_hash
*verify_tfm
= NULL
;
2727 struct crypto_hash
*csums_tfm
= NULL
;
2728 const int apv
= mdev
->agreed_pro_version
;
2729 int *rs_plan_s
= NULL
;
2732 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2733 : apv
== 88 ? sizeof(struct p_rs_param
)
2735 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
2736 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
2738 if (packet_size
> exp_max_sz
) {
2739 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2740 packet_size
, exp_max_sz
);
2745 header_size
= sizeof(struct p_rs_param
) - sizeof(struct p_header80
);
2746 data_size
= packet_size
- header_size
;
2747 } else if (apv
<= 94) {
2748 header_size
= sizeof(struct p_rs_param_89
) - sizeof(struct p_header80
);
2749 data_size
= packet_size
- header_size
;
2750 D_ASSERT(data_size
== 0);
2752 header_size
= sizeof(struct p_rs_param_95
) - sizeof(struct p_header80
);
2753 data_size
= packet_size
- header_size
;
2754 D_ASSERT(data_size
== 0);
2757 /* initialize verify_alg and csums_alg */
2758 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
2760 if (drbd_recv(mdev
, &p
->head
.payload
, header_size
) != header_size
)
2763 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2767 if (data_size
> SHARED_SECRET_MAX
) {
2768 dev_err(DEV
, "verify-alg too long, "
2769 "peer wants %u, accepting only %u byte\n",
2770 data_size
, SHARED_SECRET_MAX
);
2774 if (drbd_recv(mdev
, p
->verify_alg
, data_size
) != data_size
)
2777 /* we expect NUL terminated string */
2778 /* but just in case someone tries to be evil */
2779 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
2780 p
->verify_alg
[data_size
-1] = 0;
2782 } else /* apv >= 89 */ {
2783 /* we still expect NUL terminated strings */
2784 /* but just in case someone tries to be evil */
2785 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
2786 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
2787 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
2788 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
2791 if (strcmp(mdev
->sync_conf
.verify_alg
, p
->verify_alg
)) {
2792 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2793 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2794 mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2797 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2798 p
->verify_alg
, "verify-alg");
2799 if (IS_ERR(verify_tfm
)) {
2805 if (apv
>= 89 && strcmp(mdev
->sync_conf
.csums_alg
, p
->csums_alg
)) {
2806 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2807 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2808 mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2811 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2812 p
->csums_alg
, "csums-alg");
2813 if (IS_ERR(csums_tfm
)) {
2820 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2821 mdev
->sync_conf
.c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
2822 mdev
->sync_conf
.c_delay_target
= be32_to_cpu(p
->c_delay_target
);
2823 mdev
->sync_conf
.c_fill_target
= be32_to_cpu(p
->c_fill_target
);
2824 mdev
->sync_conf
.c_max_rate
= be32_to_cpu(p
->c_max_rate
);
2826 fifo_size
= (mdev
->sync_conf
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
2827 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
2828 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
2830 dev_err(DEV
, "kmalloc of fifo_buffer failed");
2836 spin_lock(&mdev
->peer_seq_lock
);
2837 /* lock against drbd_nl_syncer_conf() */
2839 strcpy(mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2840 mdev
->sync_conf
.verify_alg_len
= strlen(p
->verify_alg
) + 1;
2841 crypto_free_hash(mdev
->verify_tfm
);
2842 mdev
->verify_tfm
= verify_tfm
;
2843 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
2846 strcpy(mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2847 mdev
->sync_conf
.csums_alg_len
= strlen(p
->csums_alg
) + 1;
2848 crypto_free_hash(mdev
->csums_tfm
);
2849 mdev
->csums_tfm
= csums_tfm
;
2850 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
2852 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
2853 kfree(mdev
->rs_plan_s
.values
);
2854 mdev
->rs_plan_s
.values
= rs_plan_s
;
2855 mdev
->rs_plan_s
.size
= fifo_size
;
2856 mdev
->rs_planed
= 0;
2858 spin_unlock(&mdev
->peer_seq_lock
);
2863 /* just for completeness: actually not needed,
2864 * as this is not reached if csums_tfm was ok. */
2865 crypto_free_hash(csums_tfm
);
2866 /* but free the verify_tfm again, if csums_tfm did not work out */
2867 crypto_free_hash(verify_tfm
);
2868 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2872 static void drbd_setup_order_type(struct drbd_conf
*mdev
, int peer
)
2874 /* sorry, we currently have no working implementation
2875 * of distributed TCQ */
2878 /* warn if the arguments differ by more than 12.5% */
2879 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
2880 const char *s
, sector_t a
, sector_t b
)
2883 if (a
== 0 || b
== 0)
2885 d
= (a
> b
) ? (a
- b
) : (b
- a
);
2886 if (d
> (a
>>3) || d
> (b
>>3))
2887 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
2888 (unsigned long long)a
, (unsigned long long)b
);
2891 static int receive_sizes(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2893 struct p_sizes
*p
= &mdev
->data
.rbuf
.sizes
;
2894 enum determine_dev_size dd
= unchanged
;
2895 unsigned int max_seg_s
;
2896 sector_t p_size
, p_usize
, my_usize
;
2897 int ldsc
= 0; /* local disk size changed */
2898 enum dds_flags ddsf
;
2900 p_size
= be64_to_cpu(p
->d_size
);
2901 p_usize
= be64_to_cpu(p
->u_size
);
2903 if (p_size
== 0 && mdev
->state
.disk
== D_DISKLESS
) {
2904 dev_err(DEV
, "some backing storage is needed\n");
2905 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2909 /* just store the peer's disk size for now.
2910 * we still need to figure out whether we accept that. */
2911 mdev
->p_size
= p_size
;
2913 if (get_ldev(mdev
)) {
2914 warn_if_differ_considerably(mdev
, "lower level device sizes",
2915 p_size
, drbd_get_max_capacity(mdev
->ldev
));
2916 warn_if_differ_considerably(mdev
, "user requested size",
2917 p_usize
, mdev
->ldev
->dc
.disk_size
);
2919 /* if this is the first connect, or an otherwise expected
2920 * param exchange, choose the minimum */
2921 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2922 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
2925 my_usize
= mdev
->ldev
->dc
.disk_size
;
2927 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
2928 mdev
->ldev
->dc
.disk_size
= p_usize
;
2929 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
2930 (unsigned long)mdev
->ldev
->dc
.disk_size
);
2933 /* Never shrink a device with usable data during connect.
2934 But allow online shrinking if we are connected. */
2935 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
2936 drbd_get_capacity(mdev
->this_bdev
) &&
2937 mdev
->state
.disk
>= D_OUTDATED
&&
2938 mdev
->state
.conn
< C_CONNECTED
) {
2939 dev_err(DEV
, "The peer's disk size is too small!\n");
2940 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2941 mdev
->ldev
->dc
.disk_size
= my_usize
;
2949 ddsf
= be16_to_cpu(p
->dds_flags
);
2950 if (get_ldev(mdev
)) {
2951 dd
= drbd_determin_dev_size(mdev
, ddsf
);
2953 if (dd
== dev_size_error
)
2957 /* I am diskless, need to accept the peer's size. */
2958 drbd_set_my_capacity(mdev
, p_size
);
2961 if (get_ldev(mdev
)) {
2962 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
2963 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2967 if (mdev
->agreed_pro_version
< 94)
2968 max_seg_s
= be32_to_cpu(p
->max_segment_size
);
2969 else if (mdev
->agreed_pro_version
== 94)
2970 max_seg_s
= DRBD_MAX_SIZE_H80_PACKET
;
2971 else /* drbd 8.3.8 onwards */
2972 max_seg_s
= DRBD_MAX_SEGMENT_SIZE
;
2974 if (max_seg_s
!= queue_max_segment_size(mdev
->rq_queue
))
2975 drbd_setup_queue_param(mdev
, max_seg_s
);
2977 drbd_setup_order_type(mdev
, be16_to_cpu(p
->queue_order_type
));
2981 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
2982 if (be64_to_cpu(p
->c_size
) !=
2983 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
2984 /* we have different sizes, probably peer
2985 * needs to know my new size... */
2986 drbd_send_sizes(mdev
, 0, ddsf
);
2988 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
2989 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
2990 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
2991 mdev
->state
.disk
>= D_INCONSISTENT
) {
2992 if (ddsf
& DDSF_NO_RESYNC
)
2993 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
2995 resync_after_online_grow(mdev
);
2997 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3004 static int receive_uuids(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3006 struct p_uuids
*p
= &mdev
->data
.rbuf
.uuids
;
3010 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3012 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3013 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3015 kfree(mdev
->p_uuid
);
3016 mdev
->p_uuid
= p_uuid
;
3018 if (mdev
->state
.conn
< C_CONNECTED
&&
3019 mdev
->state
.disk
< D_INCONSISTENT
&&
3020 mdev
->state
.role
== R_PRIMARY
&&
3021 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3022 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3023 (unsigned long long)mdev
->ed_uuid
);
3024 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3028 if (get_ldev(mdev
)) {
3029 int skip_initial_sync
=
3030 mdev
->state
.conn
== C_CONNECTED
&&
3031 mdev
->agreed_pro_version
>= 90 &&
3032 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3033 (p_uuid
[UI_FLAGS
] & 8);
3034 if (skip_initial_sync
) {
3035 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3036 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3037 "clear_n_write from receive_uuids");
3038 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3039 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3040 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3045 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3046 mdev
->state
.role
== R_PRIMARY
) {
3047 /* I am a diskless primary, the peer just created a new current UUID
3049 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3052 /* Before we test for the disk state, we should wait until an eventually
3053 ongoing cluster wide state change is finished. That is important if
3054 we are primary and are detaching from our disk. We need to see the
3055 new disk state... */
3056 wait_event(mdev
->misc_wait
, !test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
));
3057 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3058 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3064 * convert_state() - Converts the peer's view of the cluster state to our point of view
3065 * @ps: The state as seen by the peer.
3067 static union drbd_state
convert_state(union drbd_state ps
)
3069 union drbd_state ms
;
3071 static enum drbd_conns c_tab
[] = {
3072 [C_CONNECTED
] = C_CONNECTED
,
3074 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3075 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3076 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3077 [C_VERIFY_S
] = C_VERIFY_T
,
3083 ms
.conn
= c_tab
[ps
.conn
];
3088 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3093 static int receive_req_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3095 struct p_req_state
*p
= &mdev
->data
.rbuf
.req_state
;
3096 union drbd_state mask
, val
;
3099 mask
.i
= be32_to_cpu(p
->mask
);
3100 val
.i
= be32_to_cpu(p
->val
);
3102 if (test_bit(DISCARD_CONCURRENT
, &mdev
->flags
) &&
3103 test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
)) {
3104 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3108 mask
= convert_state(mask
);
3109 val
= convert_state(val
);
3111 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3113 drbd_send_sr_reply(mdev
, rv
);
3119 static int receive_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3121 struct p_state
*p
= &mdev
->data
.rbuf
.state
;
3122 union drbd_state os
, ns
, peer_state
;
3123 enum drbd_disk_state real_peer_disk
;
3124 enum chg_state_flags cs_flags
;
3127 peer_state
.i
= be32_to_cpu(p
->state
);
3129 real_peer_disk
= peer_state
.disk
;
3130 if (peer_state
.disk
== D_NEGOTIATING
) {
3131 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3132 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3135 spin_lock_irq(&mdev
->req_lock
);
3137 os
= ns
= mdev
->state
;
3138 spin_unlock_irq(&mdev
->req_lock
);
3140 /* peer says his disk is uptodate, while we think it is inconsistent,
3141 * and this happens while we think we have a sync going on. */
3142 if (os
.pdsk
== D_INCONSISTENT
&& real_peer_disk
== D_UP_TO_DATE
&&
3143 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3144 /* If we are (becoming) SyncSource, but peer is still in sync
3145 * preparation, ignore its uptodate-ness to avoid flapping, it
3146 * will change to inconsistent once the peer reaches active
3148 * It may have changed syncer-paused flags, however, so we
3149 * cannot ignore this completely. */
3150 if (peer_state
.conn
> C_CONNECTED
&&
3151 peer_state
.conn
< C_SYNC_SOURCE
)
3152 real_peer_disk
= D_INCONSISTENT
;
3154 /* if peer_state changes to connected at the same time,
3155 * it explicitly notifies us that it finished resync.
3156 * Maybe we should finish it up, too? */
3157 else if (os
.conn
>= C_SYNC_SOURCE
&&
3158 peer_state
.conn
== C_CONNECTED
) {
3159 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3160 drbd_resync_finished(mdev
);
3165 /* peer says his disk is inconsistent, while we think it is uptodate,
3166 * and this happens while the peer still thinks we have a sync going on,
3167 * but we think we are already done with the sync.
3168 * We ignore this to avoid flapping pdsk.
3169 * This should not happen, if the peer is a recent version of drbd. */
3170 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3171 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3172 real_peer_disk
= D_UP_TO_DATE
;
3174 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3175 ns
.conn
= C_CONNECTED
;
3177 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3178 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3179 int cr
; /* consider resync */
3181 /* if we established a new connection */
3182 cr
= (os
.conn
< C_CONNECTED
);
3183 /* if we had an established connection
3184 * and one of the nodes newly attaches a disk */
3185 cr
|= (os
.conn
== C_CONNECTED
&&
3186 (peer_state
.disk
== D_NEGOTIATING
||
3187 os
.disk
== D_NEGOTIATING
));
3188 /* if we have both been inconsistent, and the peer has been
3189 * forced to be UpToDate with --overwrite-data */
3190 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3191 /* if we had been plain connected, and the admin requested to
3192 * start a sync by "invalidate" or "invalidate-remote" */
3193 cr
|= (os
.conn
== C_CONNECTED
&&
3194 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3195 peer_state
.conn
<= C_WF_BITMAP_T
));
3198 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3201 if (ns
.conn
== C_MASK
) {
3202 ns
.conn
= C_CONNECTED
;
3203 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3204 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3205 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3206 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3207 peer_state
.disk
= D_DISKLESS
;
3208 real_peer_disk
= D_DISKLESS
;
3210 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->flags
))
3212 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3213 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3219 spin_lock_irq(&mdev
->req_lock
);
3220 if (mdev
->state
.i
!= os
.i
)
3222 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3223 ns
.peer
= peer_state
.role
;
3224 ns
.pdsk
= real_peer_disk
;
3225 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3226 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3227 ns
.disk
= mdev
->new_state_tmp
.disk
;
3228 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3229 if (ns
.pdsk
== D_CONSISTENT
&& is_susp(ns
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3230 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3231 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3232 for temporal network outages! */
3233 spin_unlock_irq(&mdev
->req_lock
);
3234 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3236 drbd_uuid_new_current(mdev
);
3237 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3238 drbd_force_state(mdev
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0));
3241 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3243 spin_unlock_irq(&mdev
->req_lock
);
3245 if (rv
< SS_SUCCESS
) {
3246 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3250 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3251 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3252 peer_state
.disk
!= D_NEGOTIATING
) {
3253 /* we want resync, peer has not yet decided to sync... */
3254 /* Nowadays only used when forcing a node into primary role and
3255 setting its disk to UpToDate with that */
3256 drbd_send_uuids(mdev
);
3257 drbd_send_state(mdev
);
3261 mdev
->net_conf
->want_lose
= 0;
3263 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3268 static int receive_sync_uuid(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3270 struct p_rs_uuid
*p
= &mdev
->data
.rbuf
.rs_uuid
;
3272 wait_event(mdev
->misc_wait
,
3273 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3274 mdev
->state
.conn
< C_CONNECTED
||
3275 mdev
->state
.disk
< D_NEGOTIATING
);
3277 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3279 /* Here the _drbd_uuid_ functions are right, current should
3280 _not_ be rotated into the history */
3281 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3282 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3283 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3285 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3289 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3294 enum receive_bitmap_ret
{ OK
, DONE
, FAILED
};
3296 static enum receive_bitmap_ret
3297 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int data_size
,
3298 unsigned long *buffer
, struct bm_xfer_ctx
*c
)
3300 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3301 unsigned want
= num_words
* sizeof(long);
3303 if (want
!= data_size
) {
3304 dev_err(DEV
, "%s:want (%u) != data_size (%u)\n", __func__
, want
, data_size
);
3309 if (drbd_recv(mdev
, buffer
, want
) != want
)
3312 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3314 c
->word_offset
+= num_words
;
3315 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3316 if (c
->bit_offset
> c
->bm_bits
)
3317 c
->bit_offset
= c
->bm_bits
;
3322 static enum receive_bitmap_ret
3323 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3324 struct p_compressed_bm
*p
,
3325 struct bm_xfer_ctx
*c
)
3327 struct bitstream bs
;
3331 unsigned long s
= c
->bit_offset
;
3333 int len
= be16_to_cpu(p
->head
.length
) - (sizeof(*p
) - sizeof(p
->head
));
3334 int toggle
= DCBP_get_start(p
);
3338 bitstream_init(&bs
, p
->code
, len
, DCBP_get_pad_bits(p
));
3340 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3344 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3345 bits
= vli_decode_bits(&rl
, look_ahead
);
3351 if (e
>= c
->bm_bits
) {
3352 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3355 _drbd_bm_set_bits(mdev
, s
, e
);
3359 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3360 have
, bits
, look_ahead
,
3361 (unsigned int)(bs
.cur
.b
- p
->code
),
3362 (unsigned int)bs
.buf_len
);
3365 look_ahead
>>= bits
;
3368 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3371 look_ahead
|= tmp
<< have
;
3376 bm_xfer_ctx_bit_to_word_offset(c
);
3378 return (s
== c
->bm_bits
) ? DONE
: OK
;
3381 static enum receive_bitmap_ret
3382 decode_bitmap_c(struct drbd_conf
*mdev
,
3383 struct p_compressed_bm
*p
,
3384 struct bm_xfer_ctx
*c
)
3386 if (DCBP_get_code(p
) == RLE_VLI_Bits
)
3387 return recv_bm_rle_bits(mdev
, p
, c
);
3389 /* other variants had been implemented for evaluation,
3390 * but have been dropped as this one turned out to be "best"
3391 * during all our tests. */
3393 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3394 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3398 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3399 const char *direction
, struct bm_xfer_ctx
*c
)
3401 /* what would it take to transfer it "plaintext" */
3402 unsigned plain
= sizeof(struct p_header80
) *
3403 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3404 + c
->bm_words
* sizeof(long);
3405 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3408 /* total can not be zero. but just in case: */
3412 /* don't report if not compressed */
3416 /* total < plain. check for overflow, still */
3417 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3418 : (1000 * total
/ plain
);
3424 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3425 "total %u; compression: %u.%u%%\n",
3427 c
->bytes
[1], c
->packets
[1],
3428 c
->bytes
[0], c
->packets
[0],
3429 total
, r
/10, r
% 10);
3432 /* Since we are processing the bitfield from lower addresses to higher,
3433 it does not matter if the process it in 32 bit chunks or 64 bit
3434 chunks as long as it is little endian. (Understand it as byte stream,
3435 beginning with the lowest byte...) If we would use big endian
3436 we would need to process it from the highest address to the lowest,
3437 in order to be agnostic to the 32 vs 64 bits issue.
3439 returns 0 on failure, 1 if we successfully received it. */
3440 static int receive_bitmap(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3442 struct bm_xfer_ctx c
;
3444 enum receive_bitmap_ret ret
;
3446 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
3448 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
3450 drbd_bm_lock(mdev
, "receive bitmap");
3452 /* maybe we should use some per thread scratch page,
3453 * and allocate that during initial device creation? */
3454 buffer
= (unsigned long *) __get_free_page(GFP_NOIO
);
3456 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
3460 c
= (struct bm_xfer_ctx
) {
3461 .bm_bits
= drbd_bm_bits(mdev
),
3462 .bm_words
= drbd_bm_words(mdev
),
3466 if (cmd
== P_BITMAP
) {
3467 ret
= receive_bitmap_plain(mdev
, data_size
, buffer
, &c
);
3468 } else if (cmd
== P_COMPRESSED_BITMAP
) {
3469 /* MAYBE: sanity check that we speak proto >= 90,
3470 * and the feature is enabled! */
3471 struct p_compressed_bm
*p
;
3473 if (data_size
> BM_PACKET_PAYLOAD_BYTES
) {
3474 dev_err(DEV
, "ReportCBitmap packet too large\n");
3477 /* use the page buff */
3479 memcpy(p
, h
, sizeof(*h
));
3480 if (drbd_recv(mdev
, p
->head
.payload
, data_size
) != data_size
)
3482 if (data_size
<= (sizeof(*p
) - sizeof(p
->head
))) {
3483 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", data_size
);
3486 ret
= decode_bitmap_c(mdev
, p
, &c
);
3488 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd
);
3492 c
.packets
[cmd
== P_BITMAP
]++;
3493 c
.bytes
[cmd
== P_BITMAP
] += sizeof(struct p_header80
) + data_size
;
3498 if (!drbd_recv_header(mdev
, &cmd
, &data_size
))
3500 } while (ret
== OK
);
3504 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3506 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3507 ok
= !drbd_send_bitmap(mdev
);
3510 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3511 ok
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3512 D_ASSERT(ok
== SS_SUCCESS
);
3513 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3514 /* admin may have requested C_DISCONNECTING,
3515 * other threads may have noticed network errors */
3516 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3517 drbd_conn_str(mdev
->state
.conn
));
3522 drbd_bm_unlock(mdev
);
3523 if (ok
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3524 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3525 free_page((unsigned long) buffer
);
3529 static int receive_skip(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3531 /* TODO zero copy sink :) */
3532 static char sink
[128];
3535 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3540 want
= min_t(int, size
, sizeof(sink
));
3541 r
= drbd_recv(mdev
, sink
, want
);
3542 ERR_IF(r
<= 0) break;
3548 static int receive_UnplugRemote(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3550 /* Make sure we've acked all the TCP data associated
3551 * with the data requests being unplugged */
3552 drbd_tcp_quickack(mdev
->data
.socket
);
3557 typedef int (*drbd_cmd_handler_f
)(struct drbd_conf
*, enum drbd_packets cmd
, unsigned int to_receive
);
3562 drbd_cmd_handler_f function
;
3565 static struct data_cmd drbd_cmd_handler
[] = {
3566 [P_DATA
] = { 1, sizeof(struct p_data
), receive_Data
},
3567 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_DataReply
},
3568 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_RSDataReply
} ,
3569 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), receive_Barrier
} ,
3570 [P_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3571 [P_COMPRESSED_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3572 [P_UNPLUG_REMOTE
] = { 0, sizeof(struct p_header80
), receive_UnplugRemote
},
3573 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3574 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3575 [P_SYNC_PARAM
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3576 [P_SYNC_PARAM89
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3577 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
3578 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), receive_uuids
},
3579 [P_SIZES
] = { 0, sizeof(struct p_sizes
), receive_sizes
},
3580 [P_STATE
] = { 0, sizeof(struct p_state
), receive_state
},
3581 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_state
},
3582 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), receive_sync_uuid
},
3583 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3584 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3585 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3586 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), receive_skip
},
3587 /* anything missing from this table is in
3588 * the asender_tbl, see get_asender_cmd */
3589 [P_MAX_CMD
] = { 0, 0, NULL
},
3592 /* All handler functions that expect a sub-header get that sub-heder in
3593 mdev->data.rbuf.header.head.payload.
3595 Usually in mdev->data.rbuf.header.head the callback can find the usual
3596 p_header, but they may not rely on that. Since there is also p_header95 !
3599 static void drbdd(struct drbd_conf
*mdev
)
3601 union p_header
*header
= &mdev
->data
.rbuf
.header
;
3602 unsigned int packet_size
;
3603 enum drbd_packets cmd
;
3604 size_t shs
; /* sub header size */
3607 while (get_t_state(&mdev
->receiver
) == Running
) {
3608 drbd_thread_current_set_cpu(mdev
);
3609 if (!drbd_recv_header(mdev
, &cmd
, &packet_size
))
3612 if (unlikely(cmd
>= P_MAX_CMD
|| !drbd_cmd_handler
[cmd
].function
)) {
3613 dev_err(DEV
, "unknown packet type %d, l: %d!\n", cmd
, packet_size
);
3617 shs
= drbd_cmd_handler
[cmd
].pkt_size
- sizeof(union p_header
);
3618 if (packet_size
- shs
> 0 && !drbd_cmd_handler
[cmd
].expect_payload
) {
3619 dev_err(DEV
, "No payload expected %s l:%d\n", cmdname(cmd
), packet_size
);
3624 rv
= drbd_recv(mdev
, &header
->h80
.payload
, shs
);
3625 if (unlikely(rv
!= shs
)) {
3626 dev_err(DEV
, "short read while reading sub header: rv=%d\n", rv
);
3631 rv
= drbd_cmd_handler
[cmd
].function(mdev
, cmd
, packet_size
- shs
);
3633 if (unlikely(!rv
)) {
3634 dev_err(DEV
, "error receiving %s, l: %d!\n",
3635 cmdname(cmd
), packet_size
);
3642 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3644 /* If we leave here, we probably want to update at least the
3645 * "Connected" indicator on stable storage. Do so explicitly here. */
3649 void drbd_flush_workqueue(struct drbd_conf
*mdev
)
3651 struct drbd_wq_barrier barr
;
3653 barr
.w
.cb
= w_prev_work_done
;
3654 init_completion(&barr
.done
);
3655 drbd_queue_work(&mdev
->data
.work
, &barr
.w
);
3656 wait_for_completion(&barr
.done
);
3659 void drbd_free_tl_hash(struct drbd_conf
*mdev
)
3661 struct hlist_head
*h
;
3663 spin_lock_irq(&mdev
->req_lock
);
3665 if (!mdev
->tl_hash
|| mdev
->state
.conn
!= C_STANDALONE
) {
3666 spin_unlock_irq(&mdev
->req_lock
);
3670 for (h
= mdev
->ee_hash
; h
< mdev
->ee_hash
+ mdev
->ee_hash_s
; h
++)
3672 dev_err(DEV
, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3673 (int)(h
- mdev
->ee_hash
), h
->first
);
3674 kfree(mdev
->ee_hash
);
3675 mdev
->ee_hash
= NULL
;
3676 mdev
->ee_hash_s
= 0;
3679 for (h
= mdev
->tl_hash
; h
< mdev
->tl_hash
+ mdev
->tl_hash_s
; h
++)
3681 dev_err(DEV
, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3682 (int)(h
- mdev
->tl_hash
), h
->first
);
3683 kfree(mdev
->tl_hash
);
3684 mdev
->tl_hash
= NULL
;
3685 mdev
->tl_hash_s
= 0;
3686 spin_unlock_irq(&mdev
->req_lock
);
3689 static void drbd_disconnect(struct drbd_conf
*mdev
)
3691 enum drbd_fencing_p fp
;
3692 union drbd_state os
, ns
;
3693 int rv
= SS_UNKNOWN_ERROR
;
3696 if (mdev
->state
.conn
== C_STANDALONE
)
3698 if (mdev
->state
.conn
>= C_WF_CONNECTION
)
3699 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3700 drbd_conn_str(mdev
->state
.conn
));
3702 /* asender does not clean up anything. it must not interfere, either */
3703 drbd_thread_stop(&mdev
->asender
);
3704 drbd_free_sock(mdev
);
3706 /* wait for current activity to cease. */
3707 spin_lock_irq(&mdev
->req_lock
);
3708 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
3709 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
3710 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
3711 spin_unlock_irq(&mdev
->req_lock
);
3713 /* We do not have data structures that would allow us to
3714 * get the rs_pending_cnt down to 0 again.
3715 * * On C_SYNC_TARGET we do not have any data structures describing
3716 * the pending RSDataRequest's we have sent.
3717 * * On C_SYNC_SOURCE there is no data structure that tracks
3718 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3719 * And no, it is not the sum of the reference counts in the
3720 * resync_LRU. The resync_LRU tracks the whole operation including
3721 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3723 drbd_rs_cancel_all(mdev
);
3725 mdev
->rs_failed
= 0;
3726 atomic_set(&mdev
->rs_pending_cnt
, 0);
3727 wake_up(&mdev
->misc_wait
);
3729 /* make sure syncer is stopped and w_resume_next_sg queued */
3730 del_timer_sync(&mdev
->resync_timer
);
3731 resync_timer_fn((unsigned long)mdev
);
3733 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3734 * w_make_resync_request etc. which may still be on the worker queue
3735 * to be "canceled" */
3736 drbd_flush_workqueue(mdev
);
3738 /* This also does reclaim_net_ee(). If we do this too early, we might
3739 * miss some resync ee and pages.*/
3740 drbd_process_done_ee(mdev
);
3742 kfree(mdev
->p_uuid
);
3743 mdev
->p_uuid
= NULL
;
3745 if (!is_susp(mdev
->state
))
3748 dev_info(DEV
, "Connection closed\n");
3753 if (get_ldev(mdev
)) {
3754 fp
= mdev
->ldev
->dc
.fencing
;
3758 if (mdev
->state
.role
== R_PRIMARY
&& fp
>= FP_RESOURCE
&& mdev
->state
.pdsk
>= D_UNKNOWN
)
3759 drbd_try_outdate_peer_async(mdev
);
3761 spin_lock_irq(&mdev
->req_lock
);
3763 if (os
.conn
>= C_UNCONNECTED
) {
3764 /* Do not restart in case we are C_DISCONNECTING */
3766 ns
.conn
= C_UNCONNECTED
;
3767 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
3769 spin_unlock_irq(&mdev
->req_lock
);
3771 if (os
.conn
== C_DISCONNECTING
) {
3772 wait_event(mdev
->net_cnt_wait
, atomic_read(&mdev
->net_cnt
) == 0);
3774 if (!is_susp(mdev
->state
)) {
3775 /* we must not free the tl_hash
3776 * while application io is still on the fly */
3777 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
3778 drbd_free_tl_hash(mdev
);
3781 crypto_free_hash(mdev
->cram_hmac_tfm
);
3782 mdev
->cram_hmac_tfm
= NULL
;
3784 kfree(mdev
->net_conf
);
3785 mdev
->net_conf
= NULL
;
3786 drbd_request_state(mdev
, NS(conn
, C_STANDALONE
));
3789 /* tcp_close and release of sendpage pages can be deferred. I don't
3790 * want to use SO_LINGER, because apparently it can be deferred for
3791 * more than 20 seconds (longest time I checked).
3793 * Actually we don't care for exactly when the network stack does its
3794 * put_page(), but release our reference on these pages right here.
3796 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3798 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
3799 i
= atomic_read(&mdev
->pp_in_use_by_net
);
3801 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
3802 i
= atomic_read(&mdev
->pp_in_use
);
3804 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
3806 D_ASSERT(list_empty(&mdev
->read_ee
));
3807 D_ASSERT(list_empty(&mdev
->active_ee
));
3808 D_ASSERT(list_empty(&mdev
->sync_ee
));
3809 D_ASSERT(list_empty(&mdev
->done_ee
));
3811 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3812 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
3813 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
3817 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3818 * we can agree on is stored in agreed_pro_version.
3820 * feature flags and the reserved array should be enough room for future
3821 * enhancements of the handshake protocol, and possible plugins...
3823 * for now, they are expected to be zero, but ignored.
3825 static int drbd_send_handshake(struct drbd_conf
*mdev
)
3827 /* ASSERT current == mdev->receiver ... */
3828 struct p_handshake
*p
= &mdev
->data
.sbuf
.handshake
;
3831 if (mutex_lock_interruptible(&mdev
->data
.mutex
)) {
3832 dev_err(DEV
, "interrupted during initial handshake\n");
3833 return 0; /* interrupted. not ok. */
3836 if (mdev
->data
.socket
== NULL
) {
3837 mutex_unlock(&mdev
->data
.mutex
);
3841 memset(p
, 0, sizeof(*p
));
3842 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
3843 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
3844 ok
= _drbd_send_cmd( mdev
, mdev
->data
.socket
, P_HAND_SHAKE
,
3845 (struct p_header80
*)p
, sizeof(*p
), 0 );
3846 mutex_unlock(&mdev
->data
.mutex
);
3852 * 1 yes, we have a valid connection
3853 * 0 oops, did not work out, please try again
3854 * -1 peer talks different language,
3855 * no point in trying again, please go standalone.
3857 static int drbd_do_handshake(struct drbd_conf
*mdev
)
3859 /* ASSERT current == mdev->receiver ... */
3860 struct p_handshake
*p
= &mdev
->data
.rbuf
.handshake
;
3861 const int expect
= sizeof(struct p_handshake
) - sizeof(struct p_header80
);
3862 unsigned int length
;
3863 enum drbd_packets cmd
;
3866 rv
= drbd_send_handshake(mdev
);
3870 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3874 if (cmd
!= P_HAND_SHAKE
) {
3875 dev_err(DEV
, "expected HandShake packet, received: %s (0x%04x)\n",
3880 if (length
!= expect
) {
3881 dev_err(DEV
, "expected HandShake length: %u, received: %u\n",
3886 rv
= drbd_recv(mdev
, &p
->head
.payload
, expect
);
3889 dev_err(DEV
, "short read receiving handshake packet: l=%u\n", rv
);
3893 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
3894 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
3895 if (p
->protocol_max
== 0)
3896 p
->protocol_max
= p
->protocol_min
;
3898 if (PRO_VERSION_MAX
< p
->protocol_min
||
3899 PRO_VERSION_MIN
> p
->protocol_max
)
3902 mdev
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
3904 dev_info(DEV
, "Handshake successful: "
3905 "Agreed network protocol version %d\n", mdev
->agreed_pro_version
);
3910 dev_err(DEV
, "incompatible DRBD dialects: "
3911 "I support %d-%d, peer supports %d-%d\n",
3912 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
3913 p
->protocol_min
, p
->protocol_max
);
3917 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3918 static int drbd_do_auth(struct drbd_conf
*mdev
)
3920 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3921 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3925 #define CHALLENGE_LEN 64
3929 0 - failed, try again (network error),
3930 -1 - auth failed, don't try again.
3933 static int drbd_do_auth(struct drbd_conf
*mdev
)
3935 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
3936 struct scatterlist sg
;
3937 char *response
= NULL
;
3938 char *right_response
= NULL
;
3939 char *peers_ch
= NULL
;
3940 unsigned int key_len
= strlen(mdev
->net_conf
->shared_secret
);
3941 unsigned int resp_size
;
3942 struct hash_desc desc
;
3943 enum drbd_packets cmd
;
3944 unsigned int length
;
3947 desc
.tfm
= mdev
->cram_hmac_tfm
;
3950 rv
= crypto_hash_setkey(mdev
->cram_hmac_tfm
,
3951 (u8
*)mdev
->net_conf
->shared_secret
, key_len
);
3953 dev_err(DEV
, "crypto_hash_setkey() failed with %d\n", rv
);
3958 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
3960 rv
= drbd_send_cmd2(mdev
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
3964 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3968 if (cmd
!= P_AUTH_CHALLENGE
) {
3969 dev_err(DEV
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3975 if (length
> CHALLENGE_LEN
* 2) {
3976 dev_err(DEV
, "expected AuthChallenge payload too big.\n");
3981 peers_ch
= kmalloc(length
, GFP_NOIO
);
3982 if (peers_ch
== NULL
) {
3983 dev_err(DEV
, "kmalloc of peers_ch failed\n");
3988 rv
= drbd_recv(mdev
, peers_ch
, length
);
3991 dev_err(DEV
, "short read AuthChallenge: l=%u\n", rv
);
3996 resp_size
= crypto_hash_digestsize(mdev
->cram_hmac_tfm
);
3997 response
= kmalloc(resp_size
, GFP_NOIO
);
3998 if (response
== NULL
) {
3999 dev_err(DEV
, "kmalloc of response failed\n");
4004 sg_init_table(&sg
, 1);
4005 sg_set_buf(&sg
, peers_ch
, length
);
4007 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4009 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4014 rv
= drbd_send_cmd2(mdev
, P_AUTH_RESPONSE
, response
, resp_size
);
4018 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
4022 if (cmd
!= P_AUTH_RESPONSE
) {
4023 dev_err(DEV
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4029 if (length
!= resp_size
) {
4030 dev_err(DEV
, "expected AuthResponse payload of wrong size\n");
4035 rv
= drbd_recv(mdev
, response
, resp_size
);
4037 if (rv
!= resp_size
) {
4038 dev_err(DEV
, "short read receiving AuthResponse: l=%u\n", rv
);
4043 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4044 if (right_response
== NULL
) {
4045 dev_err(DEV
, "kmalloc of right_response failed\n");
4050 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4052 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4054 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4059 rv
= !memcmp(response
, right_response
, resp_size
);
4062 dev_info(DEV
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4063 resp_size
, mdev
->net_conf
->cram_hmac_alg
);
4070 kfree(right_response
);
4076 int drbdd_init(struct drbd_thread
*thi
)
4078 struct drbd_conf
*mdev
= thi
->mdev
;
4079 unsigned int minor
= mdev_to_minor(mdev
);
4082 sprintf(current
->comm
, "drbd%d_receiver", minor
);
4084 dev_info(DEV
, "receiver (re)started\n");
4087 h
= drbd_connect(mdev
);
4089 drbd_disconnect(mdev
);
4090 __set_current_state(TASK_INTERRUPTIBLE
);
4091 schedule_timeout(HZ
);
4094 dev_warn(DEV
, "Discarding network configuration.\n");
4095 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4100 if (get_net_conf(mdev
)) {
4106 drbd_disconnect(mdev
);
4108 dev_info(DEV
, "receiver terminated\n");
4112 /* ********* acknowledge sender ******** */
4114 static int got_RqSReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4116 struct p_req_state_reply
*p
= (struct p_req_state_reply
*)h
;
4118 int retcode
= be32_to_cpu(p
->retcode
);
4120 if (retcode
>= SS_SUCCESS
) {
4121 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4123 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4124 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4125 drbd_set_st_err_str(retcode
), retcode
);
4127 wake_up(&mdev
->state_wait
);
4132 static int got_Ping(struct drbd_conf
*mdev
, struct p_header80
*h
)
4134 return drbd_send_ping_ack(mdev
);
4138 static int got_PingAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4140 /* restore idle timeout */
4141 mdev
->meta
.socket
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
4142 if (!test_and_set_bit(GOT_PING_ACK
, &mdev
->flags
))
4143 wake_up(&mdev
->misc_wait
);
4148 static int got_IsInSync(struct drbd_conf
*mdev
, struct p_header80
*h
)
4150 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4151 sector_t sector
= be64_to_cpu(p
->sector
);
4152 int blksize
= be32_to_cpu(p
->blksize
);
4154 D_ASSERT(mdev
->agreed_pro_version
>= 89);
4156 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4158 if (get_ldev(mdev
)) {
4159 drbd_rs_complete_io(mdev
, sector
);
4160 drbd_set_in_sync(mdev
, sector
, blksize
);
4161 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4162 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4165 dec_rs_pending(mdev
);
4166 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4171 /* when we receive the ACK for a write request,
4172 * verify that we actually know about it */
4173 static struct drbd_request
*_ack_id_to_req(struct drbd_conf
*mdev
,
4174 u64 id
, sector_t sector
)
4176 struct hlist_head
*slot
= tl_hash_slot(mdev
, sector
);
4177 struct hlist_node
*n
;
4178 struct drbd_request
*req
;
4180 hlist_for_each_entry(req
, n
, slot
, colision
) {
4181 if ((unsigned long)req
== (unsigned long)id
) {
4182 if (req
->sector
!= sector
) {
4183 dev_err(DEV
, "_ack_id_to_req: found req %p but it has "
4184 "wrong sector (%llus versus %llus)\n", req
,
4185 (unsigned long long)req
->sector
,
4186 (unsigned long long)sector
);
4192 dev_err(DEV
, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4193 (void *)(unsigned long)id
, (unsigned long long)sector
);
4197 typedef struct drbd_request
*(req_validator_fn
)
4198 (struct drbd_conf
*mdev
, u64 id
, sector_t sector
);
4200 static int validate_req_change_req_state(struct drbd_conf
*mdev
,
4201 u64 id
, sector_t sector
, req_validator_fn validator
,
4202 const char *func
, enum drbd_req_event what
)
4204 struct drbd_request
*req
;
4205 struct bio_and_error m
;
4207 spin_lock_irq(&mdev
->req_lock
);
4208 req
= validator(mdev
, id
, sector
);
4209 if (unlikely(!req
)) {
4210 spin_unlock_irq(&mdev
->req_lock
);
4211 dev_err(DEV
, "%s: got a corrupt block_id/sector pair\n", func
);
4214 __req_mod(req
, what
, &m
);
4215 spin_unlock_irq(&mdev
->req_lock
);
4218 complete_master_bio(mdev
, &m
);
4222 static int got_BlockAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4224 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4225 sector_t sector
= be64_to_cpu(p
->sector
);
4226 int blksize
= be32_to_cpu(p
->blksize
);
4227 enum drbd_req_event what
;
4229 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4231 if (is_syncer_block_id(p
->block_id
)) {
4232 drbd_set_in_sync(mdev
, sector
, blksize
);
4233 dec_rs_pending(mdev
);
4236 switch (be16_to_cpu(h
->command
)) {
4237 case P_RS_WRITE_ACK
:
4238 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4239 what
= write_acked_by_peer_and_sis
;
4242 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4243 what
= write_acked_by_peer
;
4246 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4247 what
= recv_acked_by_peer
;
4250 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4251 what
= conflict_discarded_by_peer
;
4258 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4259 _ack_id_to_req
, __func__
, what
);
4262 static int got_NegAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4264 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4265 sector_t sector
= be64_to_cpu(p
->sector
);
4267 if (__ratelimit(&drbd_ratelimit_state
))
4268 dev_warn(DEV
, "Got NegAck packet. Peer is in troubles?\n");
4270 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4272 if (is_syncer_block_id(p
->block_id
)) {
4273 int size
= be32_to_cpu(p
->blksize
);
4274 dec_rs_pending(mdev
);
4275 drbd_rs_failed_io(mdev
, sector
, size
);
4278 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4279 _ack_id_to_req
, __func__
, neg_acked
);
4282 static int got_NegDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4284 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4285 sector_t sector
= be64_to_cpu(p
->sector
);
4287 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4288 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4289 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4291 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4292 _ar_id_to_req
, __func__
, neg_acked
);
4295 static int got_NegRSDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4299 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4301 sector
= be64_to_cpu(p
->sector
);
4302 size
= be32_to_cpu(p
->blksize
);
4304 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4306 dec_rs_pending(mdev
);
4308 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4309 drbd_rs_complete_io(mdev
, sector
);
4310 drbd_rs_failed_io(mdev
, sector
, size
);
4317 static int got_BarrierAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4319 struct p_barrier_ack
*p
= (struct p_barrier_ack
*)h
;
4321 tl_release(mdev
, p
->barrier
, be32_to_cpu(p
->set_size
));
4326 static int got_OVResult(struct drbd_conf
*mdev
, struct p_header80
*h
)
4328 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4329 struct drbd_work
*w
;
4333 sector
= be64_to_cpu(p
->sector
);
4334 size
= be32_to_cpu(p
->blksize
);
4336 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4338 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4339 drbd_ov_oos_found(mdev
, sector
, size
);
4343 if (!get_ldev(mdev
))
4346 drbd_rs_complete_io(mdev
, sector
);
4347 dec_rs_pending(mdev
);
4351 /* let's advance progress step marks only for every other megabyte */
4352 if ((mdev
->ov_left
& 0x200) == 0x200)
4353 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
4355 if (mdev
->ov_left
== 0) {
4356 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4358 w
->cb
= w_ov_finished
;
4359 drbd_queue_work_front(&mdev
->data
.work
, w
);
4361 dev_err(DEV
, "kmalloc(w) failed.");
4363 drbd_resync_finished(mdev
);
4370 static int got_skip(struct drbd_conf
*mdev
, struct p_header80
*h
)
4375 struct asender_cmd
{
4377 int (*process
)(struct drbd_conf
*mdev
, struct p_header80
*h
);
4380 static struct asender_cmd
*get_asender_cmd(int cmd
)
4382 static struct asender_cmd asender_tbl
[] = {
4383 /* anything missing from this table is in
4384 * the drbd_cmd_handler (drbd_default_handler) table,
4385 * see the beginning of drbdd() */
4386 [P_PING
] = { sizeof(struct p_header80
), got_Ping
},
4387 [P_PING_ACK
] = { sizeof(struct p_header80
), got_PingAck
},
4388 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4389 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4390 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4391 [P_DISCARD_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4392 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
4393 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
4394 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4395 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
4396 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
4397 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
4398 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
4399 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), got_skip
},
4400 [P_MAX_CMD
] = { 0, NULL
},
4402 if (cmd
> P_MAX_CMD
|| asender_tbl
[cmd
].process
== NULL
)
4404 return &asender_tbl
[cmd
];
4407 int drbd_asender(struct drbd_thread
*thi
)
4409 struct drbd_conf
*mdev
= thi
->mdev
;
4410 struct p_header80
*h
= &mdev
->meta
.rbuf
.header
.h80
;
4411 struct asender_cmd
*cmd
= NULL
;
4416 int expect
= sizeof(struct p_header80
);
4419 sprintf(current
->comm
, "drbd%d_asender", mdev_to_minor(mdev
));
4421 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4422 current
->rt_priority
= 2; /* more important than all other tasks */
4424 while (get_t_state(thi
) == Running
) {
4425 drbd_thread_current_set_cpu(mdev
);
4426 if (test_and_clear_bit(SEND_PING
, &mdev
->flags
)) {
4427 ERR_IF(!drbd_send_ping(mdev
)) goto reconnect
;
4428 mdev
->meta
.socket
->sk
->sk_rcvtimeo
=
4429 mdev
->net_conf
->ping_timeo
*HZ
/10;
4432 /* conditionally cork;
4433 * it may hurt latency if we cork without much to send */
4434 if (!mdev
->net_conf
->no_cork
&&
4435 3 < atomic_read(&mdev
->unacked_cnt
))
4436 drbd_tcp_cork(mdev
->meta
.socket
);
4438 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4439 flush_signals(current
);
4440 if (!drbd_process_done_ee(mdev
))
4442 /* to avoid race with newly queued ACKs */
4443 set_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4444 spin_lock_irq(&mdev
->req_lock
);
4445 empty
= list_empty(&mdev
->done_ee
);
4446 spin_unlock_irq(&mdev
->req_lock
);
4447 /* new ack may have been queued right here,
4448 * but then there is also a signal pending,
4449 * and we start over... */
4453 /* but unconditionally uncork unless disabled */
4454 if (!mdev
->net_conf
->no_cork
)
4455 drbd_tcp_uncork(mdev
->meta
.socket
);
4457 /* short circuit, recv_msg would return EINTR anyways. */
4458 if (signal_pending(current
))
4461 rv
= drbd_recv_short(mdev
, mdev
->meta
.socket
,
4462 buf
, expect
-received
, 0);
4463 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4465 flush_signals(current
);
4468 * -EINTR (on meta) we got a signal
4469 * -EAGAIN (on meta) rcvtimeo expired
4470 * -ECONNRESET other side closed the connection
4471 * -ERESTARTSYS (on data) we got a signal
4472 * rv < 0 other than above: unexpected error!
4473 * rv == expected: full header or command
4474 * rv < expected: "woken" by signal during receive
4475 * rv == 0 : "connection shut down by peer"
4477 if (likely(rv
> 0)) {
4480 } else if (rv
== 0) {
4481 dev_err(DEV
, "meta connection shut down by peer.\n");
4483 } else if (rv
== -EAGAIN
) {
4484 if (mdev
->meta
.socket
->sk
->sk_rcvtimeo
==
4485 mdev
->net_conf
->ping_timeo
*HZ
/10) {
4486 dev_err(DEV
, "PingAck did not arrive in time.\n");
4489 set_bit(SEND_PING
, &mdev
->flags
);
4491 } else if (rv
== -EINTR
) {
4494 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
4498 if (received
== expect
&& cmd
== NULL
) {
4499 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
4500 dev_err(DEV
, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4501 be32_to_cpu(h
->magic
),
4502 be16_to_cpu(h
->command
),
4503 be16_to_cpu(h
->length
));
4506 cmd
= get_asender_cmd(be16_to_cpu(h
->command
));
4507 len
= be16_to_cpu(h
->length
);
4508 if (unlikely(cmd
== NULL
)) {
4509 dev_err(DEV
, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4510 be32_to_cpu(h
->magic
),
4511 be16_to_cpu(h
->command
),
4512 be16_to_cpu(h
->length
));
4515 expect
= cmd
->pkt_size
;
4516 ERR_IF(len
!= expect
-sizeof(struct p_header80
))
4519 if (received
== expect
) {
4520 D_ASSERT(cmd
!= NULL
);
4521 if (!cmd
->process(mdev
, h
))
4526 expect
= sizeof(struct p_header80
);
4533 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
4538 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4541 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4543 D_ASSERT(mdev
->state
.conn
< C_CONNECTED
);
4544 dev_info(DEV
, "asender terminated\n");