4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
55 struct drbd_epoch
*epoch
;
64 static int drbd_do_handshake(struct drbd_conf
*mdev
);
65 static int drbd_do_auth(struct drbd_conf
*mdev
);
67 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
68 static int e_end_block(struct drbd_conf
*, struct drbd_work
*, int);
70 static struct drbd_epoch
*previous_epoch(struct drbd_conf
*mdev
, struct drbd_epoch
*epoch
)
72 struct drbd_epoch
*prev
;
73 spin_lock(&mdev
->epoch_lock
);
74 prev
= list_entry(epoch
->list
.prev
, struct drbd_epoch
, list
);
75 if (prev
== epoch
|| prev
== mdev
->current_epoch
)
77 spin_unlock(&mdev
->epoch_lock
);
81 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 static struct page
*drbd_pp_first_page_or_try_alloc(struct drbd_conf
*mdev
)
85 struct page
*page
= NULL
;
87 /* Yes, testing drbd_pp_vacant outside the lock is racy.
88 * So what. It saves a spin_lock. */
89 if (drbd_pp_vacant
> 0) {
90 spin_lock(&drbd_pp_lock
);
93 drbd_pp_pool
= (struct page
*)page_private(page
);
94 set_page_private(page
, 0); /* just to be polite */
97 spin_unlock(&drbd_pp_lock
);
99 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
100 * "criss-cross" setup, that might cause write-out on some other DRBD,
101 * which in turn might block on the other node at this very place. */
103 page
= alloc_page(GFP_TRY
);
105 atomic_inc(&mdev
->pp_in_use
);
109 /* kick lower level device, if we have more than (arbitrary number)
110 * reference counts on it, which typically are locally submitted io
111 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
112 static void maybe_kick_lo(struct drbd_conf
*mdev
)
114 if (atomic_read(&mdev
->local_cnt
) >= mdev
->net_conf
->unplug_watermark
)
118 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
120 struct drbd_epoch_entry
*e
;
121 struct list_head
*le
, *tle
;
123 /* The EEs are always appended to the end of the list. Since
124 they are sent in order over the wire, they have to finish
125 in order. As soon as we see the first not finished we can
126 stop to examine the list... */
128 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
129 e
= list_entry(le
, struct drbd_epoch_entry
, w
.list
);
130 if (drbd_bio_has_active_page(e
->private_bio
))
132 list_move(le
, to_be_freed
);
136 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
138 LIST_HEAD(reclaimed
);
139 struct drbd_epoch_entry
*e
, *t
;
142 spin_lock_irq(&mdev
->req_lock
);
143 reclaim_net_ee(mdev
, &reclaimed
);
144 spin_unlock_irq(&mdev
->req_lock
);
146 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
147 drbd_free_ee(mdev
, e
);
151 * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
152 * @mdev: DRBD device.
153 * @retry: whether or not to retry allocation forever (or until signalled)
155 * Tries to allocate a page, first from our own page pool, then from the
156 * kernel, unless this allocation would exceed the max_buffers setting.
157 * If @retry is non-zero, retry until DRBD frees a page somewhere else.
159 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, int retry
)
161 struct page
*page
= NULL
;
164 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
165 page
= drbd_pp_first_page_or_try_alloc(mdev
);
171 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
173 drbd_kick_lo_and_reclaim_net(mdev
);
175 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
176 page
= drbd_pp_first_page_or_try_alloc(mdev
);
184 if (signal_pending(current
)) {
185 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
191 finish_wait(&drbd_pp_wait
, &wait
);
196 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
197 * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
198 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
)
202 spin_lock(&drbd_pp_lock
);
203 if (drbd_pp_vacant
> (DRBD_MAX_SEGMENT_SIZE
/PAGE_SIZE
)*minor_count
) {
206 set_page_private(page
, (unsigned long)drbd_pp_pool
);
211 spin_unlock(&drbd_pp_lock
);
213 atomic_dec(&mdev
->pp_in_use
);
218 wake_up(&drbd_pp_wait
);
221 static void drbd_pp_free_bio_pages(struct drbd_conf
*mdev
, struct bio
*bio
)
223 struct page
*p_to_be_freed
= NULL
;
225 struct bio_vec
*bvec
;
228 spin_lock(&drbd_pp_lock
);
229 __bio_for_each_segment(bvec
, bio
, i
, 0) {
230 if (drbd_pp_vacant
> (DRBD_MAX_SEGMENT_SIZE
/PAGE_SIZE
)*minor_count
) {
231 set_page_private(bvec
->bv_page
, (unsigned long)p_to_be_freed
);
232 p_to_be_freed
= bvec
->bv_page
;
234 set_page_private(bvec
->bv_page
, (unsigned long)drbd_pp_pool
);
235 drbd_pp_pool
= bvec
->bv_page
;
239 spin_unlock(&drbd_pp_lock
);
240 atomic_sub(bio
->bi_vcnt
, &mdev
->pp_in_use
);
242 while (p_to_be_freed
) {
243 page
= p_to_be_freed
;
244 p_to_be_freed
= (struct page
*)page_private(page
);
245 set_page_private(page
, 0); /* just to be polite */
249 wake_up(&drbd_pp_wait
);
253 You need to hold the req_lock:
254 _drbd_wait_ee_list_empty()
256 You must not have the req_lock:
262 drbd_process_done_ee()
264 drbd_wait_ee_list_empty()
267 struct drbd_epoch_entry
*drbd_alloc_ee(struct drbd_conf
*mdev
,
270 unsigned int data_size
,
271 gfp_t gfp_mask
) __must_hold(local
)
273 struct request_queue
*q
;
274 struct drbd_epoch_entry
*e
;
279 if (FAULT_ACTIVE(mdev
, DRBD_FAULT_AL_EE
))
282 e
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
284 if (!(gfp_mask
& __GFP_NOWARN
))
285 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
289 bio
= bio_alloc(gfp_mask
& ~__GFP_HIGHMEM
, div_ceil(data_size
, PAGE_SIZE
));
291 if (!(gfp_mask
& __GFP_NOWARN
))
292 dev_err(DEV
, "alloc_ee: Allocation of a bio failed\n");
296 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
297 bio
->bi_sector
= sector
;
301 page
= drbd_pp_alloc(mdev
, (gfp_mask
& __GFP_WAIT
));
303 if (!(gfp_mask
& __GFP_NOWARN
))
304 dev_err(DEV
, "alloc_ee: Allocation of a page failed\n");
307 if (!bio_add_page(bio
, page
, min_t(int, ds
, PAGE_SIZE
), 0)) {
308 drbd_pp_free(mdev
, page
);
309 dev_err(DEV
, "alloc_ee: bio_add_page(s=%llu,"
310 "data_size=%u,ds=%u) failed\n",
311 (unsigned long long)sector
, data_size
, ds
);
313 q
= bdev_get_queue(bio
->bi_bdev
);
314 if (q
->merge_bvec_fn
) {
315 struct bvec_merge_data bvm
= {
316 .bi_bdev
= bio
->bi_bdev
,
317 .bi_sector
= bio
->bi_sector
,
318 .bi_size
= bio
->bi_size
,
321 int l
= q
->merge_bvec_fn(q
, &bvm
,
322 &bio
->bi_io_vec
[bio
->bi_vcnt
]);
323 dev_err(DEV
, "merge_bvec_fn() = %d\n", l
);
326 /* dump more of the bio. */
327 dev_err(DEV
, "bio->bi_max_vecs = %d\n", bio
->bi_max_vecs
);
328 dev_err(DEV
, "bio->bi_vcnt = %d\n", bio
->bi_vcnt
);
329 dev_err(DEV
, "bio->bi_size = %d\n", bio
->bi_size
);
330 dev_err(DEV
, "bio->bi_phys_segments = %d\n", bio
->bi_phys_segments
);
335 ds
-= min_t(int, ds
, PAGE_SIZE
);
338 D_ASSERT(data_size
== bio
->bi_size
);
343 e
->size
= bio
->bi_size
;
345 e
->private_bio
= bio
;
347 INIT_HLIST_NODE(&e
->colision
);
354 drbd_pp_free_bio_pages(mdev
, bio
);
357 mempool_free(e
, drbd_ee_mempool
);
362 void drbd_free_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
364 struct bio
*bio
= e
->private_bio
;
365 drbd_pp_free_bio_pages(mdev
, bio
);
367 D_ASSERT(hlist_unhashed(&e
->colision
));
368 mempool_free(e
, drbd_ee_mempool
);
371 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
373 LIST_HEAD(work_list
);
374 struct drbd_epoch_entry
*e
, *t
;
377 spin_lock_irq(&mdev
->req_lock
);
378 list_splice_init(list
, &work_list
);
379 spin_unlock_irq(&mdev
->req_lock
);
381 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
382 drbd_free_ee(mdev
, e
);
390 * This function is called from _asender only_
391 * but see also comments in _req_mod(,barrier_acked)
392 * and receive_Barrier.
394 * Move entries from net_ee to done_ee, if ready.
395 * Grab done_ee, call all callbacks, free the entries.
396 * The callbacks typically send out ACKs.
398 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
400 LIST_HEAD(work_list
);
401 LIST_HEAD(reclaimed
);
402 struct drbd_epoch_entry
*e
, *t
;
403 int ok
= (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
);
405 spin_lock_irq(&mdev
->req_lock
);
406 reclaim_net_ee(mdev
, &reclaimed
);
407 list_splice_init(&mdev
->done_ee
, &work_list
);
408 spin_unlock_irq(&mdev
->req_lock
);
410 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
411 drbd_free_ee(mdev
, e
);
413 /* possible callbacks here:
414 * e_end_block, and e_end_resync_block, e_send_discard_ack.
415 * all ignore the last argument.
417 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
418 /* list_del not necessary, next/prev members not touched */
419 ok
= e
->w
.cb(mdev
, &e
->w
, !ok
) && ok
;
420 drbd_free_ee(mdev
, e
);
422 wake_up(&mdev
->ee_wait
);
427 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
431 /* avoids spin_lock/unlock
432 * and calling prepare_to_wait in the fast path */
433 while (!list_empty(head
)) {
434 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
435 spin_unlock_irq(&mdev
->req_lock
);
438 finish_wait(&mdev
->ee_wait
, &wait
);
439 spin_lock_irq(&mdev
->req_lock
);
443 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
445 spin_lock_irq(&mdev
->req_lock
);
446 _drbd_wait_ee_list_empty(mdev
, head
);
447 spin_unlock_irq(&mdev
->req_lock
);
450 /* see also kernel_accept; which is only present since 2.6.18.
451 * also we want to log which part of it failed, exactly */
452 static int drbd_accept(struct drbd_conf
*mdev
, const char **what
,
453 struct socket
*sock
, struct socket
**newsock
)
455 struct sock
*sk
= sock
->sk
;
459 err
= sock
->ops
->listen(sock
, 5);
463 *what
= "sock_create_lite";
464 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
470 err
= sock
->ops
->accept(sock
, *newsock
, 0);
472 sock_release(*newsock
);
476 (*newsock
)->ops
= sock
->ops
;
482 static int drbd_recv_short(struct drbd_conf
*mdev
, struct socket
*sock
,
483 void *buf
, size_t size
, int flags
)
490 struct msghdr msg
= {
492 .msg_iov
= (struct iovec
*)&iov
,
493 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
499 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
505 static int drbd_recv(struct drbd_conf
*mdev
, void *buf
, size_t size
)
512 struct msghdr msg
= {
514 .msg_iov
= (struct iovec
*)&iov
,
515 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
523 rv
= sock_recvmsg(mdev
->data
.socket
, &msg
, size
, msg
.msg_flags
);
528 * ECONNRESET other side closed the connection
529 * ERESTARTSYS (on sock) we got a signal
533 if (rv
== -ECONNRESET
)
534 dev_info(DEV
, "sock was reset by peer\n");
535 else if (rv
!= -ERESTARTSYS
)
536 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
538 } else if (rv
== 0) {
539 dev_info(DEV
, "sock was shut down by peer\n");
542 /* signal came in, or peer/link went down,
543 * after we read a partial message
545 /* D_ASSERT(signal_pending(current)); */
553 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
558 static struct socket
*drbd_try_connect(struct drbd_conf
*mdev
)
562 struct sockaddr_in6 src_in6
;
564 int disconnect_on_error
= 1;
566 if (!get_net_conf(mdev
))
569 what
= "sock_create_kern";
570 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
571 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
577 sock
->sk
->sk_rcvtimeo
=
578 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->try_connect_int
*HZ
;
580 /* explicitly bind to the configured IP as source IP
581 * for the outgoing connections.
582 * This is needed for multihomed hosts and to be
583 * able to use lo: interfaces for drbd.
584 * Make sure to use 0 as port number, so linux selects
585 * a free one dynamically.
587 memcpy(&src_in6
, mdev
->net_conf
->my_addr
,
588 min_t(int, mdev
->net_conf
->my_addr_len
, sizeof(src_in6
)));
589 if (((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
590 src_in6
.sin6_port
= 0;
592 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
594 what
= "bind before connect";
595 err
= sock
->ops
->bind(sock
,
596 (struct sockaddr
*) &src_in6
,
597 mdev
->net_conf
->my_addr_len
);
601 /* connect may fail, peer not yet available.
602 * stay C_WF_CONNECTION, don't go Disconnecting! */
603 disconnect_on_error
= 0;
605 err
= sock
->ops
->connect(sock
,
606 (struct sockaddr
*)mdev
->net_conf
->peer_addr
,
607 mdev
->net_conf
->peer_addr_len
, 0);
616 /* timeout, busy, signal pending */
617 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
618 case EINTR
: case ERESTARTSYS
:
619 /* peer not (yet) available, network problem */
620 case ECONNREFUSED
: case ENETUNREACH
:
621 case EHOSTDOWN
: case EHOSTUNREACH
:
622 disconnect_on_error
= 0;
625 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
627 if (disconnect_on_error
)
628 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
634 static struct socket
*drbd_wait_for_connect(struct drbd_conf
*mdev
)
637 struct socket
*s_estab
= NULL
, *s_listen
;
640 if (!get_net_conf(mdev
))
643 what
= "sock_create_kern";
644 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
645 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
651 timeo
= mdev
->net_conf
->try_connect_int
* HZ
;
652 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
654 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
655 s_listen
->sk
->sk_rcvtimeo
= timeo
;
656 s_listen
->sk
->sk_sndtimeo
= timeo
;
658 what
= "bind before listen";
659 err
= s_listen
->ops
->bind(s_listen
,
660 (struct sockaddr
*) mdev
->net_conf
->my_addr
,
661 mdev
->net_conf
->my_addr_len
);
665 err
= drbd_accept(mdev
, &what
, s_listen
, &s_estab
);
669 sock_release(s_listen
);
671 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
672 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
673 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
681 static int drbd_send_fp(struct drbd_conf
*mdev
,
682 struct socket
*sock
, enum drbd_packets cmd
)
684 struct p_header
*h
= (struct p_header
*) &mdev
->data
.sbuf
.header
;
686 return _drbd_send_cmd(mdev
, sock
, cmd
, h
, sizeof(*h
), 0);
689 static enum drbd_packets
drbd_recv_fp(struct drbd_conf
*mdev
, struct socket
*sock
)
691 struct p_header
*h
= (struct p_header
*) &mdev
->data
.sbuf
.header
;
694 rr
= drbd_recv_short(mdev
, sock
, h
, sizeof(*h
), 0);
696 if (rr
== sizeof(*h
) && h
->magic
== BE_DRBD_MAGIC
)
697 return be16_to_cpu(h
->command
);
703 * drbd_socket_okay() - Free the socket if its connection is not okay
704 * @mdev: DRBD device.
705 * @sock: pointer to the pointer to the socket.
707 static int drbd_socket_okay(struct drbd_conf
*mdev
, struct socket
**sock
)
715 rr
= drbd_recv_short(mdev
, *sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
717 if (rr
> 0 || rr
== -EAGAIN
) {
728 * 1 yes, we have a valid connection
729 * 0 oops, did not work out, please try again
730 * -1 peer talks different language,
731 * no point in trying again, please go standalone.
732 * -2 We do not have a network config...
734 static int drbd_connect(struct drbd_conf
*mdev
)
736 struct socket
*s
, *sock
, *msock
;
739 D_ASSERT(!mdev
->data
.socket
);
741 if (test_and_clear_bit(CREATE_BARRIER
, &mdev
->flags
))
742 dev_err(DEV
, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
744 if (drbd_request_state(mdev
, NS(conn
, C_WF_CONNECTION
)) < SS_SUCCESS
)
747 clear_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
754 /* 3 tries, this should take less than a second! */
755 s
= drbd_try_connect(mdev
);
758 /* give the other side time to call bind() & listen() */
759 __set_current_state(TASK_INTERRUPTIBLE
);
760 schedule_timeout(HZ
/ 10);
765 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_S
);
769 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_M
);
773 dev_err(DEV
, "Logic error in drbd_connect()\n");
774 goto out_release_sockets
;
779 __set_current_state(TASK_INTERRUPTIBLE
);
780 schedule_timeout(HZ
/ 10);
781 ok
= drbd_socket_okay(mdev
, &sock
);
782 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
788 s
= drbd_wait_for_connect(mdev
);
790 try = drbd_recv_fp(mdev
, s
);
791 drbd_socket_okay(mdev
, &sock
);
792 drbd_socket_okay(mdev
, &msock
);
796 dev_warn(DEV
, "initial packet S crossed\n");
803 dev_warn(DEV
, "initial packet M crossed\n");
807 set_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
810 dev_warn(DEV
, "Error receiving initial packet\n");
817 if (mdev
->state
.conn
<= C_DISCONNECTING
)
818 goto out_release_sockets
;
819 if (signal_pending(current
)) {
820 flush_signals(current
);
822 if (get_t_state(&mdev
->receiver
) == Exiting
)
823 goto out_release_sockets
;
827 ok
= drbd_socket_okay(mdev
, &sock
);
828 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
834 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
835 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
837 sock
->sk
->sk_allocation
= GFP_NOIO
;
838 msock
->sk
->sk_allocation
= GFP_NOIO
;
840 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
841 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
843 if (mdev
->net_conf
->sndbuf_size
) {
844 sock
->sk
->sk_sndbuf
= mdev
->net_conf
->sndbuf_size
;
845 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
848 if (mdev
->net_conf
->rcvbuf_size
) {
849 sock
->sk
->sk_rcvbuf
= mdev
->net_conf
->rcvbuf_size
;
850 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock
->sk
->sk_sndtimeo
=
859 sock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_timeo
*4*HZ
/10;
861 msock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
862 msock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock
);
867 drbd_tcp_nodelay(msock
);
869 mdev
->data
.socket
= sock
;
870 mdev
->meta
.socket
= msock
;
871 mdev
->last_received
= jiffies
;
873 D_ASSERT(mdev
->asender
.task
== NULL
);
875 h
= drbd_do_handshake(mdev
);
879 if (mdev
->cram_hmac_tfm
) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 switch (drbd_do_auth(mdev
)) {
883 dev_err(DEV
, "Authentication of peer failed\n");
886 dev_err(DEV
, "Authentication of peer failed, trying again.\n");
891 if (drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
)) < SS_SUCCESS
)
894 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
895 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
897 atomic_set(&mdev
->packet_seq
, 0);
900 drbd_thread_start(&mdev
->asender
);
902 if (!drbd_send_protocol(mdev
))
904 drbd_send_sync_param(mdev
, &mdev
->sync_conf
);
905 drbd_send_sizes(mdev
, 0, 0);
906 drbd_send_uuids(mdev
);
907 drbd_send_state(mdev
);
908 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
909 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
921 static int drbd_recv_header(struct drbd_conf
*mdev
, struct p_header
*h
)
925 r
= drbd_recv(mdev
, h
, sizeof(*h
));
927 if (unlikely(r
!= sizeof(*h
))) {
928 dev_err(DEV
, "short read expecting header on sock: r=%d\n", r
);
931 h
->command
= be16_to_cpu(h
->command
);
932 h
->length
= be16_to_cpu(h
->length
);
933 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
934 dev_err(DEV
, "magic?? on data m: 0x%lx c: %d l: %d\n",
935 (long)be32_to_cpu(h
->magic
),
936 h
->command
, h
->length
);
939 mdev
->last_received
= jiffies
;
944 static enum finish_epoch
drbd_flush_after_epoch(struct drbd_conf
*mdev
, struct drbd_epoch
*epoch
)
948 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
949 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
950 NULL
, BLKDEV_IFL_WAIT
);
952 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
953 /* would rather check on EOPNOTSUPP, but that is not reliable.
954 * don't try again for ANY return value != 0
955 * if (rv == -EOPNOTSUPP) */
956 drbd_bump_write_ordering(mdev
, WO_drain_io
);
961 return drbd_may_finish_epoch(mdev
, epoch
, EV_BARRIER_DONE
);
964 static int w_flush(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
966 struct flush_work
*fw
= (struct flush_work
*)w
;
967 struct drbd_epoch
*epoch
= fw
->epoch
;
971 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &epoch
->flags
))
972 drbd_flush_after_epoch(mdev
, epoch
);
974 drbd_may_finish_epoch(mdev
, epoch
, EV_PUT
|
975 (mdev
->state
.conn
< C_CONNECTED
? EV_CLEANUP
: 0));
981 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
982 * @mdev: DRBD device.
983 * @epoch: Epoch object.
986 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
987 struct drbd_epoch
*epoch
,
990 int finish
, epoch_size
;
991 struct drbd_epoch
*next_epoch
;
992 int schedule_flush
= 0;
993 enum finish_epoch rv
= FE_STILL_LIVE
;
995 spin_lock(&mdev
->epoch_lock
);
1000 epoch_size
= atomic_read(&epoch
->epoch_size
);
1002 switch (ev
& ~EV_CLEANUP
) {
1004 atomic_dec(&epoch
->active
);
1006 case EV_GOT_BARRIER_NR
:
1007 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1009 /* Special case: If we just switched from WO_bio_barrier to
1010 WO_bdev_flush we should not finish the current epoch */
1011 if (test_bit(DE_CONTAINS_A_BARRIER
, &epoch
->flags
) && epoch_size
== 1 &&
1012 mdev
->write_ordering
!= WO_bio_barrier
&&
1013 epoch
== mdev
->current_epoch
)
1014 clear_bit(DE_CONTAINS_A_BARRIER
, &epoch
->flags
);
1016 case EV_BARRIER_DONE
:
1017 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE
, &epoch
->flags
);
1019 case EV_BECAME_LAST
:
1024 if (epoch_size
!= 0 &&
1025 atomic_read(&epoch
->active
) == 0 &&
1026 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
) &&
1027 epoch
->list
.prev
== &mdev
->current_epoch
->list
&&
1028 !test_bit(DE_IS_FINISHING
, &epoch
->flags
)) {
1029 /* Nearly all conditions are met to finish that epoch... */
1030 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE
, &epoch
->flags
) ||
1031 mdev
->write_ordering
== WO_none
||
1032 (epoch_size
== 1 && test_bit(DE_CONTAINS_A_BARRIER
, &epoch
->flags
)) ||
1035 set_bit(DE_IS_FINISHING
, &epoch
->flags
);
1036 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &epoch
->flags
) &&
1037 mdev
->write_ordering
== WO_bio_barrier
) {
1038 atomic_inc(&epoch
->active
);
1043 if (!(ev
& EV_CLEANUP
)) {
1044 spin_unlock(&mdev
->epoch_lock
);
1045 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1046 spin_lock(&mdev
->epoch_lock
);
1050 if (mdev
->current_epoch
!= epoch
) {
1051 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1052 list_del(&epoch
->list
);
1053 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1057 if (rv
== FE_STILL_LIVE
)
1061 atomic_set(&epoch
->epoch_size
, 0);
1062 /* atomic_set(&epoch->active, 0); is alrady zero */
1063 if (rv
== FE_STILL_LIVE
)
1074 spin_unlock(&mdev
->epoch_lock
);
1076 if (schedule_flush
) {
1077 struct flush_work
*fw
;
1078 fw
= kmalloc(sizeof(*fw
), GFP_ATOMIC
);
1082 drbd_queue_work(&mdev
->data
.work
, &fw
->w
);
1084 dev_warn(DEV
, "Could not kmalloc a flush_work obj\n");
1085 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &epoch
->flags
);
1086 /* That is not a recursion, only one level */
1087 drbd_may_finish_epoch(mdev
, epoch
, EV_BARRIER_DONE
);
1088 drbd_may_finish_epoch(mdev
, epoch
, EV_PUT
);
1096 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1097 * @mdev: DRBD device.
1098 * @wo: Write ordering method to try.
1100 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1102 enum write_ordering_e pwo
;
1103 static char *write_ordering_str
[] = {
1105 [WO_drain_io
] = "drain",
1106 [WO_bdev_flush
] = "flush",
1107 [WO_bio_barrier
] = "barrier",
1110 pwo
= mdev
->write_ordering
;
1112 if (wo
== WO_bio_barrier
&& mdev
->ldev
->dc
.no_disk_barrier
)
1114 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1116 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1118 mdev
->write_ordering
= wo
;
1119 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bio_barrier
)
1120 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1124 * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1125 * @mdev: DRBD device.
1127 * @cancel: The connection will be closed anyways (unused in this callback)
1129 int w_e_reissue(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
) __releases(local
)
1131 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1132 struct bio
*bio
= e
->private_bio
;
1134 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1135 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1136 so that we can finish that epoch in drbd_may_finish_epoch().
1137 That is necessary if we already have a long chain of Epochs, before
1138 we realize that BIO_RW_BARRIER is actually not supported */
1140 /* As long as the -ENOTSUPP on the barrier is reported immediately
1141 that will never trigger. If it is reported late, we will just
1142 print that warning and continue correctly for all future requests
1143 with WO_bdev_flush */
1144 if (previous_epoch(mdev
, e
->epoch
))
1145 dev_warn(DEV
, "Write ordering was not enforced (one time event)\n");
1147 /* prepare bio for re-submit,
1148 * re-init volatile members */
1149 /* we still have a local reference,
1150 * get_ldev was done in receive_Data. */
1151 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1152 bio
->bi_sector
= e
->sector
;
1153 bio
->bi_size
= e
->size
;
1156 bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
1157 bio
->bi_flags
|= 1 << BIO_UPTODATE
;
1159 /* don't know whether this is necessary: */
1160 bio
->bi_phys_segments
= 0;
1161 bio
->bi_next
= NULL
;
1163 /* these should be unchanged: */
1164 /* bio->bi_end_io = drbd_endio_write_sec; */
1165 /* bio->bi_vcnt = whatever; */
1167 e
->w
.cb
= e_end_block
;
1169 /* This is no longer a barrier request. */
1170 bio
->bi_rw
&= ~(1UL << BIO_RW_BARRIER
);
1172 drbd_generic_make_request(mdev
, DRBD_FAULT_DT_WR
, bio
);
1177 static int receive_Barrier(struct drbd_conf
*mdev
, struct p_header
*h
)
1179 int rv
, issue_flush
;
1180 struct p_barrier
*p
= (struct p_barrier
*)h
;
1181 struct drbd_epoch
*epoch
;
1183 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
))) return FALSE
;
1185 rv
= drbd_recv(mdev
, h
->payload
, h
->length
);
1186 ERR_IF(rv
!= h
->length
) return FALSE
;
1190 if (mdev
->net_conf
->wire_protocol
!= DRBD_PROT_C
)
1193 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1194 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1196 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1197 * the activity log, which means it would not be resynced in case the
1198 * R_PRIMARY crashes now.
1199 * Therefore we must send the barrier_ack after the barrier request was
1201 switch (mdev
->write_ordering
) {
1202 case WO_bio_barrier
:
1204 if (rv
== FE_RECYCLED
)
1210 if (rv
== FE_STILL_LIVE
) {
1211 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &mdev
->current_epoch
->flags
);
1212 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1213 rv
= drbd_flush_after_epoch(mdev
, mdev
->current_epoch
);
1215 if (rv
== FE_RECYCLED
)
1218 /* The asender will send all the ACKs and barrier ACKs out, since
1219 all EEs moved from the active_ee to the done_ee. We need to
1220 provide a new epoch object for the EEs that come in soon */
1224 /* receiver context, in the writeout path of the other node.
1225 * avoid potential distributed deadlock */
1226 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1228 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1229 issue_flush
= !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &mdev
->current_epoch
->flags
);
1230 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1232 rv
= drbd_flush_after_epoch(mdev
, mdev
->current_epoch
);
1233 if (rv
== FE_RECYCLED
)
1237 drbd_wait_ee_list_empty(mdev
, &mdev
->done_ee
);
1243 atomic_set(&epoch
->epoch_size
, 0);
1244 atomic_set(&epoch
->active
, 0);
1246 spin_lock(&mdev
->epoch_lock
);
1247 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1248 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1249 mdev
->current_epoch
= epoch
;
1252 /* The current_epoch got recycled while we allocated this one... */
1255 spin_unlock(&mdev
->epoch_lock
);
1260 /* used from receive_RSDataReply (recv_resync_read)
1261 * and from receive_Data */
1262 static struct drbd_epoch_entry
*
1263 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
, int data_size
) __must_hold(local
)
1265 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1266 struct drbd_epoch_entry
*e
;
1267 struct bio_vec
*bvec
;
1271 void *dig_in
= mdev
->int_dig_in
;
1272 void *dig_vv
= mdev
->int_dig_vv
;
1274 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1275 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1278 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1280 dev_warn(DEV
, "short read receiving data digest: read %d expected %d\n",
1288 ERR_IF(data_size
& 0x1ff) return NULL
;
1289 ERR_IF(data_size
> DRBD_MAX_SEGMENT_SIZE
) return NULL
;
1291 /* even though we trust out peer,
1292 * we sometimes have to double check. */
1293 if (sector
+ (data_size
>>9) > capacity
) {
1294 dev_err(DEV
, "capacity: %llus < sector: %llus + size: %u\n",
1295 (unsigned long long)capacity
,
1296 (unsigned long long)sector
, data_size
);
1300 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1301 * "criss-cross" setup, that might cause write-out on some other DRBD,
1302 * which in turn might block on the other node at this very place. */
1303 e
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1306 bio
= e
->private_bio
;
1308 bio_for_each_segment(bvec
, bio
, i
) {
1309 page
= bvec
->bv_page
;
1310 rr
= drbd_recv(mdev
, kmap(page
), min_t(int, ds
, PAGE_SIZE
));
1312 if (rr
!= min_t(int, ds
, PAGE_SIZE
)) {
1313 drbd_free_ee(mdev
, e
);
1314 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1315 rr
, min_t(int, ds
, PAGE_SIZE
));
1322 drbd_csum(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1323 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1324 dev_err(DEV
, "Digest integrity check FAILED.\n");
1325 drbd_bcast_ee(mdev
, "digest failed",
1326 dgs
, dig_in
, dig_vv
, e
);
1327 drbd_free_ee(mdev
, e
);
1331 mdev
->recv_cnt
+= data_size
>>9;
1335 /* drbd_drain_block() just takes a data block
1336 * out of the socket input buffer, and discards it.
1338 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1347 page
= drbd_pp_alloc(mdev
, 1);
1351 rr
= drbd_recv(mdev
, data
, min_t(int, data_size
, PAGE_SIZE
));
1352 if (rr
!= min_t(int, data_size
, PAGE_SIZE
)) {
1354 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1355 rr
, min_t(int, data_size
, PAGE_SIZE
));
1361 drbd_pp_free(mdev
, page
);
1365 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1366 sector_t sector
, int data_size
)
1368 struct bio_vec
*bvec
;
1370 int dgs
, rr
, i
, expect
;
1371 void *dig_in
= mdev
->int_dig_in
;
1372 void *dig_vv
= mdev
->int_dig_vv
;
1374 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1375 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1378 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1380 dev_warn(DEV
, "short read receiving data reply digest: read %d expected %d\n",
1388 /* optimistically update recv_cnt. if receiving fails below,
1389 * we disconnect anyways, and counters will be reset. */
1390 mdev
->recv_cnt
+= data_size
>>9;
1392 bio
= req
->master_bio
;
1393 D_ASSERT(sector
== bio
->bi_sector
);
1395 bio_for_each_segment(bvec
, bio
, i
) {
1396 expect
= min_t(int, data_size
, bvec
->bv_len
);
1397 rr
= drbd_recv(mdev
,
1398 kmap(bvec
->bv_page
)+bvec
->bv_offset
,
1400 kunmap(bvec
->bv_page
);
1402 dev_warn(DEV
, "short read receiving data reply: "
1403 "read %d expected %d\n",
1411 drbd_csum(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1412 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1413 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1418 D_ASSERT(data_size
== 0);
1422 /* e_end_resync_block() is called via
1423 * drbd_process_done_ee() by asender only */
1424 static int e_end_resync_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1426 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1427 sector_t sector
= e
->sector
;
1430 D_ASSERT(hlist_unhashed(&e
->colision
));
1432 if (likely(drbd_bio_uptodate(e
->private_bio
))) {
1433 drbd_set_in_sync(mdev
, sector
, e
->size
);
1434 ok
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, e
);
1436 /* Record failure to sync */
1437 drbd_rs_failed_io(mdev
, sector
, e
->size
);
1439 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1446 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1448 struct drbd_epoch_entry
*e
;
1450 e
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1456 dec_rs_pending(mdev
);
1458 e
->private_bio
->bi_end_io
= drbd_endio_write_sec
;
1459 e
->private_bio
->bi_rw
= WRITE
;
1460 e
->w
.cb
= e_end_resync_block
;
1463 /* corresponding dec_unacked() in e_end_resync_block()
1464 * respective _drbd_clear_done_ee */
1466 spin_lock_irq(&mdev
->req_lock
);
1467 list_add(&e
->w
.list
, &mdev
->sync_ee
);
1468 spin_unlock_irq(&mdev
->req_lock
);
1470 drbd_generic_make_request(mdev
, DRBD_FAULT_RS_WR
, e
->private_bio
);
1471 /* accounting done in endio */
1473 maybe_kick_lo(mdev
);
1477 static int receive_DataReply(struct drbd_conf
*mdev
, struct p_header
*h
)
1479 struct drbd_request
*req
;
1481 unsigned int header_size
, data_size
;
1483 struct p_data
*p
= (struct p_data
*)h
;
1485 header_size
= sizeof(*p
) - sizeof(*h
);
1486 data_size
= h
->length
- header_size
;
1488 ERR_IF(data_size
== 0) return FALSE
;
1490 if (drbd_recv(mdev
, h
->payload
, header_size
) != header_size
)
1493 sector
= be64_to_cpu(p
->sector
);
1495 spin_lock_irq(&mdev
->req_lock
);
1496 req
= _ar_id_to_req(mdev
, p
->block_id
, sector
);
1497 spin_unlock_irq(&mdev
->req_lock
);
1498 if (unlikely(!req
)) {
1499 dev_err(DEV
, "Got a corrupt block_id/sector pair(1).\n");
1503 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1504 * special casing it there for the various failure cases.
1505 * still no race with drbd_fail_pending_reads */
1506 ok
= recv_dless_read(mdev
, req
, sector
, data_size
);
1509 req_mod(req
, data_received
);
1510 /* else: nothing. handled from drbd_disconnect...
1511 * I don't think we may complete this just yet
1512 * in case we are "on-disconnect: freeze" */
1517 static int receive_RSDataReply(struct drbd_conf
*mdev
, struct p_header
*h
)
1520 unsigned int header_size
, data_size
;
1522 struct p_data
*p
= (struct p_data
*)h
;
1524 header_size
= sizeof(*p
) - sizeof(*h
);
1525 data_size
= h
->length
- header_size
;
1527 ERR_IF(data_size
== 0) return FALSE
;
1529 if (drbd_recv(mdev
, h
->payload
, header_size
) != header_size
)
1532 sector
= be64_to_cpu(p
->sector
);
1533 D_ASSERT(p
->block_id
== ID_SYNCER
);
1535 if (get_ldev(mdev
)) {
1536 /* data is submitted to disk within recv_resync_read.
1537 * corresponding put_ldev done below on error,
1538 * or in drbd_endio_write_sec. */
1539 ok
= recv_resync_read(mdev
, sector
, data_size
);
1541 if (__ratelimit(&drbd_ratelimit_state
))
1542 dev_err(DEV
, "Can not write resync data to local disk.\n");
1544 ok
= drbd_drain_block(mdev
, data_size
);
1546 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
);
1552 /* e_end_block() is called via drbd_process_done_ee().
1553 * this means this function only runs in the asender thread
1555 static int e_end_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1557 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1558 sector_t sector
= e
->sector
;
1559 struct drbd_epoch
*epoch
;
1562 if (e
->flags
& EE_IS_BARRIER
) {
1563 epoch
= previous_epoch(mdev
, e
->epoch
);
1565 drbd_may_finish_epoch(mdev
, epoch
, EV_BARRIER_DONE
+ (cancel
? EV_CLEANUP
: 0));
1568 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1569 if (likely(drbd_bio_uptodate(e
->private_bio
))) {
1570 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1571 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1572 e
->flags
& EE_MAY_SET_IN_SYNC
) ?
1573 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1574 ok
&= drbd_send_ack(mdev
, pcmd
, e
);
1575 if (pcmd
== P_RS_WRITE_ACK
)
1576 drbd_set_in_sync(mdev
, sector
, e
->size
);
1578 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1579 /* we expect it to be marked out of sync anyways...
1580 * maybe assert this? */
1584 /* we delete from the conflict detection hash _after_ we sent out the
1585 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1586 if (mdev
->net_conf
->two_primaries
) {
1587 spin_lock_irq(&mdev
->req_lock
);
1588 D_ASSERT(!hlist_unhashed(&e
->colision
));
1589 hlist_del_init(&e
->colision
);
1590 spin_unlock_irq(&mdev
->req_lock
);
1592 D_ASSERT(hlist_unhashed(&e
->colision
));
1595 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1600 static int e_send_discard_ack(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1602 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1605 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1606 ok
= drbd_send_ack(mdev
, P_DISCARD_ACK
, e
);
1608 spin_lock_irq(&mdev
->req_lock
);
1609 D_ASSERT(!hlist_unhashed(&e
->colision
));
1610 hlist_del_init(&e
->colision
);
1611 spin_unlock_irq(&mdev
->req_lock
);
1618 /* Called from receive_Data.
1619 * Synchronize packets on sock with packets on msock.
1621 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1622 * packet traveling on msock, they are still processed in the order they have
1625 * Note: we don't care for Ack packets overtaking P_DATA packets.
1627 * In case packet_seq is larger than mdev->peer_seq number, there are
1628 * outstanding packets on the msock. We wait for them to arrive.
1629 * In case we are the logically next packet, we update mdev->peer_seq
1630 * ourselves. Correctly handles 32bit wrap around.
1632 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1633 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1634 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1635 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1637 * returns 0 if we may process the packet,
1638 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1639 static int drbd_wait_peer_seq(struct drbd_conf
*mdev
, const u32 packet_seq
)
1645 spin_lock(&mdev
->peer_seq_lock
);
1647 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1648 if (seq_le(packet_seq
, mdev
->peer_seq
+1))
1650 if (signal_pending(current
)) {
1654 p_seq
= mdev
->peer_seq
;
1655 spin_unlock(&mdev
->peer_seq_lock
);
1656 timeout
= schedule_timeout(30*HZ
);
1657 spin_lock(&mdev
->peer_seq_lock
);
1658 if (timeout
== 0 && p_seq
== mdev
->peer_seq
) {
1660 dev_err(DEV
, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1664 finish_wait(&mdev
->seq_wait
, &wait
);
1665 if (mdev
->peer_seq
+1 == packet_seq
)
1667 spin_unlock(&mdev
->peer_seq_lock
);
1671 /* mirrored write */
1672 static int receive_Data(struct drbd_conf
*mdev
, struct p_header
*h
)
1675 struct drbd_epoch_entry
*e
;
1676 struct p_data
*p
= (struct p_data
*)h
;
1677 int header_size
, data_size
;
1681 header_size
= sizeof(*p
) - sizeof(*h
);
1682 data_size
= h
->length
- header_size
;
1684 ERR_IF(data_size
== 0) return FALSE
;
1686 if (drbd_recv(mdev
, h
->payload
, header_size
) != header_size
)
1689 if (!get_ldev(mdev
)) {
1690 if (__ratelimit(&drbd_ratelimit_state
))
1691 dev_err(DEV
, "Can not write mirrored data block "
1692 "to local disk.\n");
1693 spin_lock(&mdev
->peer_seq_lock
);
1694 if (mdev
->peer_seq
+1 == be32_to_cpu(p
->seq_num
))
1696 spin_unlock(&mdev
->peer_seq_lock
);
1698 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
);
1699 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1700 return drbd_drain_block(mdev
, data_size
);
1703 /* get_ldev(mdev) successful.
1704 * Corresponding put_ldev done either below (on various errors),
1705 * or in drbd_endio_write_sec, if we successfully submit the data at
1706 * the end of this function. */
1708 sector
= be64_to_cpu(p
->sector
);
1709 e
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1715 e
->private_bio
->bi_end_io
= drbd_endio_write_sec
;
1716 e
->w
.cb
= e_end_block
;
1718 spin_lock(&mdev
->epoch_lock
);
1719 e
->epoch
= mdev
->current_epoch
;
1720 atomic_inc(&e
->epoch
->epoch_size
);
1721 atomic_inc(&e
->epoch
->active
);
1723 if (mdev
->write_ordering
== WO_bio_barrier
&& atomic_read(&e
->epoch
->epoch_size
) == 1) {
1724 struct drbd_epoch
*epoch
;
1725 /* Issue a barrier if we start a new epoch, and the previous epoch
1726 was not a epoch containing a single request which already was
1728 epoch
= list_entry(e
->epoch
->list
.prev
, struct drbd_epoch
, list
);
1729 if (epoch
== e
->epoch
) {
1730 set_bit(DE_CONTAINS_A_BARRIER
, &e
->epoch
->flags
);
1731 rw
|= (1<<BIO_RW_BARRIER
);
1732 e
->flags
|= EE_IS_BARRIER
;
1734 if (atomic_read(&epoch
->epoch_size
) > 1 ||
1735 !test_bit(DE_CONTAINS_A_BARRIER
, &epoch
->flags
)) {
1736 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED
, &epoch
->flags
);
1737 set_bit(DE_CONTAINS_A_BARRIER
, &e
->epoch
->flags
);
1738 rw
|= (1<<BIO_RW_BARRIER
);
1739 e
->flags
|= EE_IS_BARRIER
;
1743 spin_unlock(&mdev
->epoch_lock
);
1745 dp_flags
= be32_to_cpu(p
->dp_flags
);
1746 if (dp_flags
& DP_HARDBARRIER
) {
1747 dev_err(DEV
, "ASSERT FAILED would have submitted barrier request\n");
1748 /* rw |= (1<<BIO_RW_BARRIER); */
1750 if (dp_flags
& DP_RW_SYNC
)
1751 rw
|= (1<<BIO_RW_SYNCIO
) | (1<<BIO_RW_UNPLUG
);
1752 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
1753 e
->flags
|= EE_MAY_SET_IN_SYNC
;
1755 /* I'm the receiver, I do hold a net_cnt reference. */
1756 if (!mdev
->net_conf
->two_primaries
) {
1757 spin_lock_irq(&mdev
->req_lock
);
1759 /* don't get the req_lock yet,
1760 * we may sleep in drbd_wait_peer_seq */
1761 const int size
= e
->size
;
1762 const int discard
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1764 struct drbd_request
*i
;
1765 struct hlist_node
*n
;
1766 struct hlist_head
*slot
;
1769 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1770 BUG_ON(mdev
->ee_hash
== NULL
);
1771 BUG_ON(mdev
->tl_hash
== NULL
);
1773 /* conflict detection and handling:
1774 * 1. wait on the sequence number,
1775 * in case this data packet overtook ACK packets.
1776 * 2. check our hash tables for conflicting requests.
1777 * we only need to walk the tl_hash, since an ee can not
1778 * have a conflict with an other ee: on the submitting
1779 * node, the corresponding req had already been conflicting,
1780 * and a conflicting req is never sent.
1782 * Note: for two_primaries, we are protocol C,
1783 * so there cannot be any request that is DONE
1784 * but still on the transfer log.
1786 * unconditionally add to the ee_hash.
1788 * if no conflicting request is found:
1791 * if any conflicting request is found
1792 * that has not yet been acked,
1793 * AND I have the "discard concurrent writes" flag:
1794 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1796 * if any conflicting request is found:
1797 * block the receiver, waiting on misc_wait
1798 * until no more conflicting requests are there,
1799 * or we get interrupted (disconnect).
1801 * we do not just write after local io completion of those
1802 * requests, but only after req is done completely, i.e.
1803 * we wait for the P_DISCARD_ACK to arrive!
1805 * then proceed normally, i.e. submit.
1807 if (drbd_wait_peer_seq(mdev
, be32_to_cpu(p
->seq_num
)))
1808 goto out_interrupted
;
1810 spin_lock_irq(&mdev
->req_lock
);
1812 hlist_add_head(&e
->colision
, ee_hash_slot(mdev
, sector
));
1814 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1815 slot
= tl_hash_slot(mdev
, sector
);
1818 int have_unacked
= 0;
1819 int have_conflict
= 0;
1820 prepare_to_wait(&mdev
->misc_wait
, &wait
,
1821 TASK_INTERRUPTIBLE
);
1822 hlist_for_each_entry(i
, n
, slot
, colision
) {
1824 /* only ALERT on first iteration,
1825 * we may be woken up early... */
1827 dev_alert(DEV
, "%s[%u] Concurrent local write detected!"
1828 " new: %llus +%u; pending: %llus +%u\n",
1829 current
->comm
, current
->pid
,
1830 (unsigned long long)sector
, size
,
1831 (unsigned long long)i
->sector
, i
->size
);
1832 if (i
->rq_state
& RQ_NET_PENDING
)
1841 /* Discard Ack only for the _first_ iteration */
1842 if (first
&& discard
&& have_unacked
) {
1843 dev_alert(DEV
, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1844 (unsigned long long)sector
);
1846 e
->w
.cb
= e_send_discard_ack
;
1847 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
1849 spin_unlock_irq(&mdev
->req_lock
);
1851 /* we could probably send that P_DISCARD_ACK ourselves,
1852 * but I don't like the receiver using the msock */
1856 finish_wait(&mdev
->misc_wait
, &wait
);
1860 if (signal_pending(current
)) {
1861 hlist_del_init(&e
->colision
);
1863 spin_unlock_irq(&mdev
->req_lock
);
1865 finish_wait(&mdev
->misc_wait
, &wait
);
1866 goto out_interrupted
;
1869 spin_unlock_irq(&mdev
->req_lock
);
1872 dev_alert(DEV
, "Concurrent write! [W AFTERWARDS] "
1873 "sec=%llus\n", (unsigned long long)sector
);
1874 } else if (discard
) {
1875 /* we had none on the first iteration.
1876 * there must be none now. */
1877 D_ASSERT(have_unacked
== 0);
1880 spin_lock_irq(&mdev
->req_lock
);
1882 finish_wait(&mdev
->misc_wait
, &wait
);
1885 list_add(&e
->w
.list
, &mdev
->active_ee
);
1886 spin_unlock_irq(&mdev
->req_lock
);
1888 switch (mdev
->net_conf
->wire_protocol
) {
1891 /* corresponding dec_unacked() in e_end_block()
1892 * respective _drbd_clear_done_ee */
1895 /* I really don't like it that the receiver thread
1896 * sends on the msock, but anyways */
1897 drbd_send_ack(mdev
, P_RECV_ACK
, e
);
1904 if (mdev
->state
.pdsk
== D_DISKLESS
) {
1905 /* In case we have the only disk of the cluster, */
1906 drbd_set_out_of_sync(mdev
, e
->sector
, e
->size
);
1907 e
->flags
|= EE_CALL_AL_COMPLETE_IO
;
1908 drbd_al_begin_io(mdev
, e
->sector
);
1911 e
->private_bio
->bi_rw
= rw
;
1912 drbd_generic_make_request(mdev
, DRBD_FAULT_DT_WR
, e
->private_bio
);
1913 /* accounting done in endio */
1915 maybe_kick_lo(mdev
);
1919 /* yes, the epoch_size now is imbalanced.
1920 * but we drop the connection anyways, so we don't have a chance to
1921 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1923 drbd_free_ee(mdev
, e
);
1927 static int receive_DataRequest(struct drbd_conf
*mdev
, struct p_header
*h
)
1930 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1931 struct drbd_epoch_entry
*e
;
1932 struct digest_info
*di
= NULL
;
1933 int size
, digest_size
;
1934 unsigned int fault_type
;
1935 struct p_block_req
*p
=
1936 (struct p_block_req
*)h
;
1937 const int brps
= sizeof(*p
)-sizeof(*h
);
1939 if (drbd_recv(mdev
, h
->payload
, brps
) != brps
)
1942 sector
= be64_to_cpu(p
->sector
);
1943 size
= be32_to_cpu(p
->blksize
);
1945 if (size
<= 0 || (size
& 0x1ff) != 0 || size
> DRBD_MAX_SEGMENT_SIZE
) {
1946 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1947 (unsigned long long)sector
, size
);
1950 if (sector
+ (size
>>9) > capacity
) {
1951 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1952 (unsigned long long)sector
, size
);
1956 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
1957 if (__ratelimit(&drbd_ratelimit_state
))
1958 dev_err(DEV
, "Can not satisfy peer's read request, "
1959 "no local data.\n");
1960 drbd_send_ack_rp(mdev
, h
->command
== P_DATA_REQUEST
? P_NEG_DREPLY
:
1961 P_NEG_RS_DREPLY
, p
);
1962 return drbd_drain_block(mdev
, h
->length
- brps
);
1965 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1966 * "criss-cross" setup, that might cause write-out on some other DRBD,
1967 * which in turn might block on the other node at this very place. */
1968 e
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
1974 e
->private_bio
->bi_rw
= READ
;
1975 e
->private_bio
->bi_end_io
= drbd_endio_read_sec
;
1977 switch (h
->command
) {
1978 case P_DATA_REQUEST
:
1979 e
->w
.cb
= w_e_end_data_req
;
1980 fault_type
= DRBD_FAULT_DT_RD
;
1982 case P_RS_DATA_REQUEST
:
1983 e
->w
.cb
= w_e_end_rsdata_req
;
1984 fault_type
= DRBD_FAULT_RS_RD
;
1985 /* Eventually this should become asynchronously. Currently it
1986 * blocks the whole receiver just to delay the reading of a
1987 * resync data block.
1988 * the drbd_work_queue mechanism is made for this...
1990 if (!drbd_rs_begin_io(mdev
, sector
)) {
1991 /* we have been interrupted,
1992 * probably connection lost! */
1993 D_ASSERT(signal_pending(current
));
1999 case P_CSUM_RS_REQUEST
:
2000 fault_type
= DRBD_FAULT_RS_RD
;
2001 digest_size
= h
->length
- brps
;
2002 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
2006 di
->digest_size
= digest_size
;
2007 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2009 if (drbd_recv(mdev
, di
->digest
, digest_size
) != digest_size
)
2012 e
->block_id
= (u64
)(unsigned long)di
;
2013 if (h
->command
== P_CSUM_RS_REQUEST
) {
2014 D_ASSERT(mdev
->agreed_pro_version
>= 89);
2015 e
->w
.cb
= w_e_end_csum_rs_req
;
2016 } else if (h
->command
== P_OV_REPLY
) {
2017 e
->w
.cb
= w_e_end_ov_reply
;
2018 dec_rs_pending(mdev
);
2022 if (!drbd_rs_begin_io(mdev
, sector
)) {
2023 /* we have been interrupted, probably connection lost! */
2024 D_ASSERT(signal_pending(current
));
2030 if (mdev
->state
.conn
>= C_CONNECTED
&&
2031 mdev
->state
.conn
!= C_VERIFY_T
)
2032 dev_warn(DEV
, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2033 drbd_conn_str(mdev
->state
.conn
));
2034 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2035 mdev
->agreed_pro_version
>= 90) {
2036 mdev
->ov_start_sector
= sector
;
2037 mdev
->ov_position
= sector
;
2038 mdev
->ov_left
= mdev
->rs_total
- BM_SECT_TO_BIT(sector
);
2039 dev_info(DEV
, "Online Verify start sector: %llu\n",
2040 (unsigned long long)sector
);
2042 e
->w
.cb
= w_e_end_ov_req
;
2043 fault_type
= DRBD_FAULT_RS_RD
;
2044 /* Eventually this should become asynchronous. Currently it
2045 * blocks the whole receiver just to delay the reading of a
2046 * resync data block.
2047 * the drbd_work_queue mechanism is made for this...
2049 if (!drbd_rs_begin_io(mdev
, sector
)) {
2050 /* we have been interrupted,
2051 * probably connection lost! */
2052 D_ASSERT(signal_pending(current
));
2059 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2060 cmdname(h
->command
));
2061 fault_type
= DRBD_FAULT_MAX
;
2064 spin_lock_irq(&mdev
->req_lock
);
2065 list_add(&e
->w
.list
, &mdev
->read_ee
);
2066 spin_unlock_irq(&mdev
->req_lock
);
2070 drbd_generic_make_request(mdev
, fault_type
, e
->private_bio
);
2071 maybe_kick_lo(mdev
);
2078 drbd_free_ee(mdev
, e
);
2082 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2084 int self
, peer
, rv
= -100;
2085 unsigned long ch_self
, ch_peer
;
2087 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2088 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2090 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2091 ch_self
= mdev
->comm_bm_set
;
2093 switch (mdev
->net_conf
->after_sb_0p
) {
2095 case ASB_DISCARD_SECONDARY
:
2096 case ASB_CALL_HELPER
:
2097 dev_err(DEV
, "Configuration error.\n");
2099 case ASB_DISCONNECT
:
2101 case ASB_DISCARD_YOUNGER_PRI
:
2102 if (self
== 0 && peer
== 1) {
2106 if (self
== 1 && peer
== 0) {
2110 /* Else fall through to one of the other strategies... */
2111 case ASB_DISCARD_OLDER_PRI
:
2112 if (self
== 0 && peer
== 1) {
2116 if (self
== 1 && peer
== 0) {
2120 /* Else fall through to one of the other strategies... */
2121 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2122 "Using discard-least-changes instead\n");
2123 case ASB_DISCARD_ZERO_CHG
:
2124 if (ch_peer
== 0 && ch_self
== 0) {
2125 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2129 if (ch_peer
== 0) { rv
= 1; break; }
2130 if (ch_self
== 0) { rv
= -1; break; }
2132 if (mdev
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2134 case ASB_DISCARD_LEAST_CHG
:
2135 if (ch_self
< ch_peer
)
2137 else if (ch_self
> ch_peer
)
2139 else /* ( ch_self == ch_peer ) */
2140 /* Well, then use something else. */
2141 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2144 case ASB_DISCARD_LOCAL
:
2147 case ASB_DISCARD_REMOTE
:
2154 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2156 int self
, peer
, hg
, rv
= -100;
2158 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2159 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2161 switch (mdev
->net_conf
->after_sb_1p
) {
2162 case ASB_DISCARD_YOUNGER_PRI
:
2163 case ASB_DISCARD_OLDER_PRI
:
2164 case ASB_DISCARD_LEAST_CHG
:
2165 case ASB_DISCARD_LOCAL
:
2166 case ASB_DISCARD_REMOTE
:
2167 dev_err(DEV
, "Configuration error.\n");
2169 case ASB_DISCONNECT
:
2172 hg
= drbd_asb_recover_0p(mdev
);
2173 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2175 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2179 rv
= drbd_asb_recover_0p(mdev
);
2181 case ASB_DISCARD_SECONDARY
:
2182 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2183 case ASB_CALL_HELPER
:
2184 hg
= drbd_asb_recover_0p(mdev
);
2185 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2186 self
= drbd_set_role(mdev
, R_SECONDARY
, 0);
2187 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2188 * we might be here in C_WF_REPORT_PARAMS which is transient.
2189 * we do not need to wait for the after state change work either. */
2190 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2191 if (self
!= SS_SUCCESS
) {
2192 drbd_khelper(mdev
, "pri-lost-after-sb");
2194 dev_warn(DEV
, "Successfully gave up primary role.\n");
2204 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2206 int self
, peer
, hg
, rv
= -100;
2208 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2209 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2211 switch (mdev
->net_conf
->after_sb_2p
) {
2212 case ASB_DISCARD_YOUNGER_PRI
:
2213 case ASB_DISCARD_OLDER_PRI
:
2214 case ASB_DISCARD_LEAST_CHG
:
2215 case ASB_DISCARD_LOCAL
:
2216 case ASB_DISCARD_REMOTE
:
2218 case ASB_DISCARD_SECONDARY
:
2219 dev_err(DEV
, "Configuration error.\n");
2222 rv
= drbd_asb_recover_0p(mdev
);
2224 case ASB_DISCONNECT
:
2226 case ASB_CALL_HELPER
:
2227 hg
= drbd_asb_recover_0p(mdev
);
2229 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2230 * we might be here in C_WF_REPORT_PARAMS which is transient.
2231 * we do not need to wait for the after state change work either. */
2232 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2233 if (self
!= SS_SUCCESS
) {
2234 drbd_khelper(mdev
, "pri-lost-after-sb");
2236 dev_warn(DEV
, "Successfully gave up primary role.\n");
2246 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2247 u64 bits
, u64 flags
)
2250 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2253 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2255 (unsigned long long)uuid
[UI_CURRENT
],
2256 (unsigned long long)uuid
[UI_BITMAP
],
2257 (unsigned long long)uuid
[UI_HISTORY_START
],
2258 (unsigned long long)uuid
[UI_HISTORY_END
],
2259 (unsigned long long)bits
,
2260 (unsigned long long)flags
);
2264 100 after split brain try auto recover
2265 2 C_SYNC_SOURCE set BitMap
2266 1 C_SYNC_SOURCE use BitMap
2268 -1 C_SYNC_TARGET use BitMap
2269 -2 C_SYNC_TARGET set BitMap
2270 -100 after split brain, disconnect
2271 -1000 unrelated data
2273 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2278 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2279 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2282 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2286 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2287 peer
!= UUID_JUST_CREATED
)
2291 if (self
!= UUID_JUST_CREATED
&&
2292 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2296 int rct
, dc
; /* roles at crash time */
2298 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2300 if (mdev
->agreed_pro_version
< 91)
2303 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2304 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2305 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2306 drbd_uuid_set_bm(mdev
, 0UL);
2308 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2309 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2312 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2319 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2321 if (mdev
->agreed_pro_version
< 91)
2324 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2325 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2326 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2328 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2329 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2330 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2332 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2335 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2342 /* Common power [off|failure] */
2343 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2344 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2345 /* lowest bit is set when we were primary,
2346 * next bit (weight 2) is set when peer was primary */
2350 case 0: /* !self_pri && !peer_pri */ return 0;
2351 case 1: /* self_pri && !peer_pri */ return 1;
2352 case 2: /* !self_pri && peer_pri */ return -1;
2353 case 3: /* self_pri && peer_pri */
2354 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
2360 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2365 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2367 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2368 peer
= mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2370 /* The last P_SYNC_UUID did not get though. Undo the last start of
2371 resync as sync source modifications of the peer's UUIDs. */
2373 if (mdev
->agreed_pro_version
< 91)
2376 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2377 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2383 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2384 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2385 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2391 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2392 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2397 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2399 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2400 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2402 /* The last P_SYNC_UUID did not get though. Undo the last start of
2403 resync as sync source modifications of our UUIDs. */
2405 if (mdev
->agreed_pro_version
< 91)
2408 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2409 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2411 dev_info(DEV
, "Undid last start of resync:\n");
2413 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2414 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2422 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2423 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2424 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2430 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2431 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2432 if (self
== peer
&& self
!= ((u64
)0))
2436 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2437 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2438 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2439 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2448 /* drbd_sync_handshake() returns the new conn state on success, or
2449 CONN_MASK (-1) on failure.
2451 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2452 enum drbd_disk_state peer_disk
) __must_hold(local
)
2455 enum drbd_conns rv
= C_MASK
;
2456 enum drbd_disk_state mydisk
;
2458 mydisk
= mdev
->state
.disk
;
2459 if (mydisk
== D_NEGOTIATING
)
2460 mydisk
= mdev
->new_state_tmp
.disk
;
2462 dev_info(DEV
, "drbd_sync_handshake:\n");
2463 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2464 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2465 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2467 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2469 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2472 dev_alert(DEV
, "Unrelated data, aborting!\n");
2476 dev_alert(DEV
, "To resolve this both sides have to support at least protocol\n");
2480 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2481 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2482 int f
= (hg
== -100) || abs(hg
) == 2;
2483 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2486 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2487 hg
> 0 ? "source" : "target");
2491 drbd_khelper(mdev
, "initial-split-brain");
2493 if (hg
== 100 || (hg
== -100 && mdev
->net_conf
->always_asbp
)) {
2494 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2495 + (peer_role
== R_PRIMARY
);
2496 int forced
= (hg
== -100);
2500 hg
= drbd_asb_recover_0p(mdev
);
2503 hg
= drbd_asb_recover_1p(mdev
);
2506 hg
= drbd_asb_recover_2p(mdev
);
2509 if (abs(hg
) < 100) {
2510 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2511 "automatically solved. Sync from %s node\n",
2512 pcount
, (hg
< 0) ? "peer" : "this");
2514 dev_warn(DEV
, "Doing a full sync, since"
2515 " UUIDs where ambiguous.\n");
2522 if (mdev
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2524 if (!mdev
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2528 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2529 "Sync from %s node\n",
2530 (hg
< 0) ? "peer" : "this");
2534 /* FIXME this log message is not correct if we end up here
2535 * after an attempted attach on a diskless node.
2536 * We just refuse to attach -- well, we drop the "connection"
2537 * to that disk, in a way... */
2538 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2539 drbd_khelper(mdev
, "split-brain");
2543 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2544 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2548 if (hg
< 0 && /* by intention we do not use mydisk here. */
2549 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2550 switch (mdev
->net_conf
->rr_conflict
) {
2551 case ASB_CALL_HELPER
:
2552 drbd_khelper(mdev
, "pri-lost");
2554 case ASB_DISCONNECT
:
2555 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2558 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2563 if (mdev
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->flags
)) {
2565 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2567 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2568 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2569 abs(hg
) >= 2 ? "full" : "bit-map based");
2574 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2575 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake"))
2579 if (hg
> 0) { /* become sync source. */
2581 } else if (hg
< 0) { /* become sync target */
2585 if (drbd_bm_total_weight(mdev
)) {
2586 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2587 drbd_bm_total_weight(mdev
));
2594 /* returns 1 if invalid */
2595 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2597 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2598 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2599 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2602 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2603 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2604 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2607 /* everything else is valid if they are equal on both sides. */
2611 /* everything es is invalid. */
2615 static int receive_protocol(struct drbd_conf
*mdev
, struct p_header
*h
)
2617 struct p_protocol
*p
= (struct p_protocol
*)h
;
2618 int header_size
, data_size
;
2619 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2620 int p_want_lose
, p_two_primaries
, cf
;
2621 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2623 header_size
= sizeof(*p
) - sizeof(*h
);
2624 data_size
= h
->length
- header_size
;
2626 if (drbd_recv(mdev
, h
->payload
, header_size
) != header_size
)
2629 p_proto
= be32_to_cpu(p
->protocol
);
2630 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2631 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2632 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2633 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2634 cf
= be32_to_cpu(p
->conn_flags
);
2635 p_want_lose
= cf
& CF_WANT_LOSE
;
2637 clear_bit(CONN_DRY_RUN
, &mdev
->flags
);
2639 if (cf
& CF_DRY_RUN
)
2640 set_bit(CONN_DRY_RUN
, &mdev
->flags
);
2642 if (p_proto
!= mdev
->net_conf
->wire_protocol
) {
2643 dev_err(DEV
, "incompatible communication protocols\n");
2647 if (cmp_after_sb(p_after_sb_0p
, mdev
->net_conf
->after_sb_0p
)) {
2648 dev_err(DEV
, "incompatible after-sb-0pri settings\n");
2652 if (cmp_after_sb(p_after_sb_1p
, mdev
->net_conf
->after_sb_1p
)) {
2653 dev_err(DEV
, "incompatible after-sb-1pri settings\n");
2657 if (cmp_after_sb(p_after_sb_2p
, mdev
->net_conf
->after_sb_2p
)) {
2658 dev_err(DEV
, "incompatible after-sb-2pri settings\n");
2662 if (p_want_lose
&& mdev
->net_conf
->want_lose
) {
2663 dev_err(DEV
, "both sides have the 'want_lose' flag set\n");
2667 if (p_two_primaries
!= mdev
->net_conf
->two_primaries
) {
2668 dev_err(DEV
, "incompatible setting of the two-primaries options\n");
2672 if (mdev
->agreed_pro_version
>= 87) {
2673 unsigned char *my_alg
= mdev
->net_conf
->integrity_alg
;
2675 if (drbd_recv(mdev
, p_integrity_alg
, data_size
) != data_size
)
2678 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2679 if (strcmp(p_integrity_alg
, my_alg
)) {
2680 dev_err(DEV
, "incompatible setting of the data-integrity-alg\n");
2683 dev_info(DEV
, "data-integrity-alg: %s\n",
2684 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2690 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2695 * input: alg name, feature name
2696 * return: NULL (alg name was "")
2697 * ERR_PTR(error) if something goes wrong
2698 * or the crypto hash ptr, if it worked out ok. */
2699 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2700 const char *alg
, const char *name
)
2702 struct crypto_hash
*tfm
;
2707 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2709 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2710 alg
, name
, PTR_ERR(tfm
));
2713 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2714 crypto_free_hash(tfm
);
2715 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2716 return ERR_PTR(-EINVAL
);
2721 static int receive_SyncParam(struct drbd_conf
*mdev
, struct p_header
*h
)
2724 struct p_rs_param_89
*p
= (struct p_rs_param_89
*)h
;
2725 unsigned int header_size
, data_size
, exp_max_sz
;
2726 struct crypto_hash
*verify_tfm
= NULL
;
2727 struct crypto_hash
*csums_tfm
= NULL
;
2728 const int apv
= mdev
->agreed_pro_version
;
2730 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2731 : apv
== 88 ? sizeof(struct p_rs_param
)
2733 : /* 89 */ sizeof(struct p_rs_param_89
);
2735 if (h
->length
> exp_max_sz
) {
2736 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2737 h
->length
, exp_max_sz
);
2742 header_size
= sizeof(struct p_rs_param
) - sizeof(*h
);
2743 data_size
= h
->length
- header_size
;
2744 } else /* apv >= 89 */ {
2745 header_size
= sizeof(struct p_rs_param_89
) - sizeof(*h
);
2746 data_size
= h
->length
- header_size
;
2747 D_ASSERT(data_size
== 0);
2750 /* initialize verify_alg and csums_alg */
2751 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
2753 if (drbd_recv(mdev
, h
->payload
, header_size
) != header_size
)
2756 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2760 if (data_size
> SHARED_SECRET_MAX
) {
2761 dev_err(DEV
, "verify-alg too long, "
2762 "peer wants %u, accepting only %u byte\n",
2763 data_size
, SHARED_SECRET_MAX
);
2767 if (drbd_recv(mdev
, p
->verify_alg
, data_size
) != data_size
)
2770 /* we expect NUL terminated string */
2771 /* but just in case someone tries to be evil */
2772 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
2773 p
->verify_alg
[data_size
-1] = 0;
2775 } else /* apv >= 89 */ {
2776 /* we still expect NUL terminated strings */
2777 /* but just in case someone tries to be evil */
2778 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
2779 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
2780 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
2781 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
2784 if (strcmp(mdev
->sync_conf
.verify_alg
, p
->verify_alg
)) {
2785 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2786 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2787 mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2790 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2791 p
->verify_alg
, "verify-alg");
2792 if (IS_ERR(verify_tfm
)) {
2798 if (apv
>= 89 && strcmp(mdev
->sync_conf
.csums_alg
, p
->csums_alg
)) {
2799 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2800 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2801 mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2804 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2805 p
->csums_alg
, "csums-alg");
2806 if (IS_ERR(csums_tfm
)) {
2813 spin_lock(&mdev
->peer_seq_lock
);
2814 /* lock against drbd_nl_syncer_conf() */
2816 strcpy(mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2817 mdev
->sync_conf
.verify_alg_len
= strlen(p
->verify_alg
) + 1;
2818 crypto_free_hash(mdev
->verify_tfm
);
2819 mdev
->verify_tfm
= verify_tfm
;
2820 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
2823 strcpy(mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2824 mdev
->sync_conf
.csums_alg_len
= strlen(p
->csums_alg
) + 1;
2825 crypto_free_hash(mdev
->csums_tfm
);
2826 mdev
->csums_tfm
= csums_tfm
;
2827 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
2829 spin_unlock(&mdev
->peer_seq_lock
);
2834 /* just for completeness: actually not needed,
2835 * as this is not reached if csums_tfm was ok. */
2836 crypto_free_hash(csums_tfm
);
2837 /* but free the verify_tfm again, if csums_tfm did not work out */
2838 crypto_free_hash(verify_tfm
);
2839 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2843 static void drbd_setup_order_type(struct drbd_conf
*mdev
, int peer
)
2845 /* sorry, we currently have no working implementation
2846 * of distributed TCQ */
2849 /* warn if the arguments differ by more than 12.5% */
2850 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
2851 const char *s
, sector_t a
, sector_t b
)
2854 if (a
== 0 || b
== 0)
2856 d
= (a
> b
) ? (a
- b
) : (b
- a
);
2857 if (d
> (a
>>3) || d
> (b
>>3))
2858 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
2859 (unsigned long long)a
, (unsigned long long)b
);
2862 static int receive_sizes(struct drbd_conf
*mdev
, struct p_header
*h
)
2864 struct p_sizes
*p
= (struct p_sizes
*)h
;
2865 enum determine_dev_size dd
= unchanged
;
2866 unsigned int max_seg_s
;
2867 sector_t p_size
, p_usize
, my_usize
;
2868 int ldsc
= 0; /* local disk size changed */
2869 enum dds_flags ddsf
;
2871 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
))) return FALSE
;
2872 if (drbd_recv(mdev
, h
->payload
, h
->length
) != h
->length
)
2875 p_size
= be64_to_cpu(p
->d_size
);
2876 p_usize
= be64_to_cpu(p
->u_size
);
2878 if (p_size
== 0 && mdev
->state
.disk
== D_DISKLESS
) {
2879 dev_err(DEV
, "some backing storage is needed\n");
2880 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2884 /* just store the peer's disk size for now.
2885 * we still need to figure out whether we accept that. */
2886 mdev
->p_size
= p_size
;
2888 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2889 if (get_ldev(mdev
)) {
2890 warn_if_differ_considerably(mdev
, "lower level device sizes",
2891 p_size
, drbd_get_max_capacity(mdev
->ldev
));
2892 warn_if_differ_considerably(mdev
, "user requested size",
2893 p_usize
, mdev
->ldev
->dc
.disk_size
);
2895 /* if this is the first connect, or an otherwise expected
2896 * param exchange, choose the minimum */
2897 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2898 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
2901 my_usize
= mdev
->ldev
->dc
.disk_size
;
2903 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
2904 mdev
->ldev
->dc
.disk_size
= p_usize
;
2905 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
2906 (unsigned long)mdev
->ldev
->dc
.disk_size
);
2909 /* Never shrink a device with usable data during connect.
2910 But allow online shrinking if we are connected. */
2911 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
2912 drbd_get_capacity(mdev
->this_bdev
) &&
2913 mdev
->state
.disk
>= D_OUTDATED
&&
2914 mdev
->state
.conn
< C_CONNECTED
) {
2915 dev_err(DEV
, "The peer's disk size is too small!\n");
2916 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2917 mdev
->ldev
->dc
.disk_size
= my_usize
;
2925 ddsf
= be16_to_cpu(p
->dds_flags
);
2926 if (get_ldev(mdev
)) {
2927 dd
= drbd_determin_dev_size(mdev
, ddsf
);
2929 if (dd
== dev_size_error
)
2933 /* I am diskless, need to accept the peer's size. */
2934 drbd_set_my_capacity(mdev
, p_size
);
2937 if (get_ldev(mdev
)) {
2938 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
2939 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2943 max_seg_s
= be32_to_cpu(p
->max_segment_size
);
2944 if (max_seg_s
!= queue_max_segment_size(mdev
->rq_queue
))
2945 drbd_setup_queue_param(mdev
, max_seg_s
);
2947 drbd_setup_order_type(mdev
, be16_to_cpu(p
->queue_order_type
));
2951 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
2952 if (be64_to_cpu(p
->c_size
) !=
2953 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
2954 /* we have different sizes, probably peer
2955 * needs to know my new size... */
2956 drbd_send_sizes(mdev
, 0, ddsf
);
2958 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
2959 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
2960 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
2961 mdev
->state
.disk
>= D_INCONSISTENT
) {
2962 if (ddsf
& DDSF_NO_RESYNC
)
2963 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
2965 resync_after_online_grow(mdev
);
2967 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
2974 static int receive_uuids(struct drbd_conf
*mdev
, struct p_header
*h
)
2976 struct p_uuids
*p
= (struct p_uuids
*)h
;
2980 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
))) return FALSE
;
2981 if (drbd_recv(mdev
, h
->payload
, h
->length
) != h
->length
)
2984 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
2986 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
2987 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
2989 kfree(mdev
->p_uuid
);
2990 mdev
->p_uuid
= p_uuid
;
2992 if (mdev
->state
.conn
< C_CONNECTED
&&
2993 mdev
->state
.disk
< D_INCONSISTENT
&&
2994 mdev
->state
.role
== R_PRIMARY
&&
2995 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
2996 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
2997 (unsigned long long)mdev
->ed_uuid
);
2998 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3002 if (get_ldev(mdev
)) {
3003 int skip_initial_sync
=
3004 mdev
->state
.conn
== C_CONNECTED
&&
3005 mdev
->agreed_pro_version
>= 90 &&
3006 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3007 (p_uuid
[UI_FLAGS
] & 8);
3008 if (skip_initial_sync
) {
3009 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3010 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3011 "clear_n_write from receive_uuids");
3012 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3013 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3014 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3021 /* Before we test for the disk state, we should wait until an eventually
3022 ongoing cluster wide state change is finished. That is important if
3023 we are primary and are detaching from our disk. We need to see the
3024 new disk state... */
3025 wait_event(mdev
->misc_wait
, !test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
));
3026 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3027 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3033 * convert_state() - Converts the peer's view of the cluster state to our point of view
3034 * @ps: The state as seen by the peer.
3036 static union drbd_state
convert_state(union drbd_state ps
)
3038 union drbd_state ms
;
3040 static enum drbd_conns c_tab
[] = {
3041 [C_CONNECTED
] = C_CONNECTED
,
3043 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3044 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3045 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3046 [C_VERIFY_S
] = C_VERIFY_T
,
3052 ms
.conn
= c_tab
[ps
.conn
];
3057 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3062 static int receive_req_state(struct drbd_conf
*mdev
, struct p_header
*h
)
3064 struct p_req_state
*p
= (struct p_req_state
*)h
;
3065 union drbd_state mask
, val
;
3068 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
))) return FALSE
;
3069 if (drbd_recv(mdev
, h
->payload
, h
->length
) != h
->length
)
3072 mask
.i
= be32_to_cpu(p
->mask
);
3073 val
.i
= be32_to_cpu(p
->val
);
3075 if (test_bit(DISCARD_CONCURRENT
, &mdev
->flags
) &&
3076 test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
)) {
3077 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3081 mask
= convert_state(mask
);
3082 val
= convert_state(val
);
3084 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3086 drbd_send_sr_reply(mdev
, rv
);
3092 static int receive_state(struct drbd_conf
*mdev
, struct p_header
*h
)
3094 struct p_state
*p
= (struct p_state
*)h
;
3095 enum drbd_conns nconn
, oconn
;
3096 union drbd_state ns
, peer_state
;
3097 enum drbd_disk_state real_peer_disk
;
3100 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
)))
3103 if (drbd_recv(mdev
, h
->payload
, h
->length
) != h
->length
)
3106 peer_state
.i
= be32_to_cpu(p
->state
);
3108 real_peer_disk
= peer_state
.disk
;
3109 if (peer_state
.disk
== D_NEGOTIATING
) {
3110 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3111 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3114 spin_lock_irq(&mdev
->req_lock
);
3116 oconn
= nconn
= mdev
->state
.conn
;
3117 spin_unlock_irq(&mdev
->req_lock
);
3119 if (nconn
== C_WF_REPORT_PARAMS
)
3120 nconn
= C_CONNECTED
;
3122 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3123 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3124 int cr
; /* consider resync */
3126 /* if we established a new connection */
3127 cr
= (oconn
< C_CONNECTED
);
3128 /* if we had an established connection
3129 * and one of the nodes newly attaches a disk */
3130 cr
|= (oconn
== C_CONNECTED
&&
3131 (peer_state
.disk
== D_NEGOTIATING
||
3132 mdev
->state
.disk
== D_NEGOTIATING
));
3133 /* if we have both been inconsistent, and the peer has been
3134 * forced to be UpToDate with --overwrite-data */
3135 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3136 /* if we had been plain connected, and the admin requested to
3137 * start a sync by "invalidate" or "invalidate-remote" */
3138 cr
|= (oconn
== C_CONNECTED
&&
3139 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3140 peer_state
.conn
<= C_WF_BITMAP_T
));
3143 nconn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3146 if (nconn
== C_MASK
) {
3147 nconn
= C_CONNECTED
;
3148 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3149 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
3150 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3151 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3152 peer_state
.disk
= D_DISKLESS
;
3153 real_peer_disk
= D_DISKLESS
;
3155 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->flags
))
3157 D_ASSERT(oconn
== C_WF_REPORT_PARAMS
);
3158 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3164 spin_lock_irq(&mdev
->req_lock
);
3165 if (mdev
->state
.conn
!= oconn
)
3167 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3168 ns
.i
= mdev
->state
.i
;
3170 ns
.peer
= peer_state
.role
;
3171 ns
.pdsk
= real_peer_disk
;
3172 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3173 if ((nconn
== C_CONNECTED
|| nconn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3174 ns
.disk
= mdev
->new_state_tmp
.disk
;
3176 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
| CS_HARD
, NULL
);
3178 spin_unlock_irq(&mdev
->req_lock
);
3180 if (rv
< SS_SUCCESS
) {
3181 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3185 if (oconn
> C_WF_REPORT_PARAMS
) {
3186 if (nconn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3187 peer_state
.disk
!= D_NEGOTIATING
) {
3188 /* we want resync, peer has not yet decided to sync... */
3189 /* Nowadays only used when forcing a node into primary role and
3190 setting its disk to UpToDate with that */
3191 drbd_send_uuids(mdev
);
3192 drbd_send_state(mdev
);
3196 mdev
->net_conf
->want_lose
= 0;
3198 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3203 static int receive_sync_uuid(struct drbd_conf
*mdev
, struct p_header
*h
)
3205 struct p_rs_uuid
*p
= (struct p_rs_uuid
*)h
;
3207 wait_event(mdev
->misc_wait
,
3208 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3209 mdev
->state
.conn
< C_CONNECTED
||
3210 mdev
->state
.disk
< D_NEGOTIATING
);
3212 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3214 ERR_IF(h
->length
!= (sizeof(*p
)-sizeof(*h
))) return FALSE
;
3215 if (drbd_recv(mdev
, h
->payload
, h
->length
) != h
->length
)
3218 /* Here the _drbd_uuid_ functions are right, current should
3219 _not_ be rotated into the history */
3220 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3221 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3222 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3224 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3228 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3233 enum receive_bitmap_ret
{ OK
, DONE
, FAILED
};
3235 static enum receive_bitmap_ret
3236 receive_bitmap_plain(struct drbd_conf
*mdev
, struct p_header
*h
,
3237 unsigned long *buffer
, struct bm_xfer_ctx
*c
)
3239 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3240 unsigned want
= num_words
* sizeof(long);
3242 if (want
!= h
->length
) {
3243 dev_err(DEV
, "%s:want (%u) != h->length (%u)\n", __func__
, want
, h
->length
);
3248 if (drbd_recv(mdev
, buffer
, want
) != want
)
3251 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3253 c
->word_offset
+= num_words
;
3254 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3255 if (c
->bit_offset
> c
->bm_bits
)
3256 c
->bit_offset
= c
->bm_bits
;
3261 static enum receive_bitmap_ret
3262 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3263 struct p_compressed_bm
*p
,
3264 struct bm_xfer_ctx
*c
)
3266 struct bitstream bs
;
3270 unsigned long s
= c
->bit_offset
;
3272 int len
= p
->head
.length
- (sizeof(*p
) - sizeof(p
->head
));
3273 int toggle
= DCBP_get_start(p
);
3277 bitstream_init(&bs
, p
->code
, len
, DCBP_get_pad_bits(p
));
3279 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3283 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3284 bits
= vli_decode_bits(&rl
, look_ahead
);
3290 if (e
>= c
->bm_bits
) {
3291 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3294 _drbd_bm_set_bits(mdev
, s
, e
);
3298 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3299 have
, bits
, look_ahead
,
3300 (unsigned int)(bs
.cur
.b
- p
->code
),
3301 (unsigned int)bs
.buf_len
);
3304 look_ahead
>>= bits
;
3307 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3310 look_ahead
|= tmp
<< have
;
3315 bm_xfer_ctx_bit_to_word_offset(c
);
3317 return (s
== c
->bm_bits
) ? DONE
: OK
;
3320 static enum receive_bitmap_ret
3321 decode_bitmap_c(struct drbd_conf
*mdev
,
3322 struct p_compressed_bm
*p
,
3323 struct bm_xfer_ctx
*c
)
3325 if (DCBP_get_code(p
) == RLE_VLI_Bits
)
3326 return recv_bm_rle_bits(mdev
, p
, c
);
3328 /* other variants had been implemented for evaluation,
3329 * but have been dropped as this one turned out to be "best"
3330 * during all our tests. */
3332 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3333 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3337 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3338 const char *direction
, struct bm_xfer_ctx
*c
)
3340 /* what would it take to transfer it "plaintext" */
3341 unsigned plain
= sizeof(struct p_header
) *
3342 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3343 + c
->bm_words
* sizeof(long);
3344 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3347 /* total can not be zero. but just in case: */
3351 /* don't report if not compressed */
3355 /* total < plain. check for overflow, still */
3356 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3357 : (1000 * total
/ plain
);
3363 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3364 "total %u; compression: %u.%u%%\n",
3366 c
->bytes
[1], c
->packets
[1],
3367 c
->bytes
[0], c
->packets
[0],
3368 total
, r
/10, r
% 10);
3371 /* Since we are processing the bitfield from lower addresses to higher,
3372 it does not matter if the process it in 32 bit chunks or 64 bit
3373 chunks as long as it is little endian. (Understand it as byte stream,
3374 beginning with the lowest byte...) If we would use big endian
3375 we would need to process it from the highest address to the lowest,
3376 in order to be agnostic to the 32 vs 64 bits issue.
3378 returns 0 on failure, 1 if we successfully received it. */
3379 static int receive_bitmap(struct drbd_conf
*mdev
, struct p_header
*h
)
3381 struct bm_xfer_ctx c
;
3383 enum receive_bitmap_ret ret
;
3386 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
3388 drbd_bm_lock(mdev
, "receive bitmap");
3390 /* maybe we should use some per thread scratch page,
3391 * and allocate that during initial device creation? */
3392 buffer
= (unsigned long *) __get_free_page(GFP_NOIO
);
3394 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
3398 c
= (struct bm_xfer_ctx
) {
3399 .bm_bits
= drbd_bm_bits(mdev
),
3400 .bm_words
= drbd_bm_words(mdev
),
3404 if (h
->command
== P_BITMAP
) {
3405 ret
= receive_bitmap_plain(mdev
, h
, buffer
, &c
);
3406 } else if (h
->command
== P_COMPRESSED_BITMAP
) {
3407 /* MAYBE: sanity check that we speak proto >= 90,
3408 * and the feature is enabled! */
3409 struct p_compressed_bm
*p
;
3411 if (h
->length
> BM_PACKET_PAYLOAD_BYTES
) {
3412 dev_err(DEV
, "ReportCBitmap packet too large\n");
3415 /* use the page buff */
3417 memcpy(p
, h
, sizeof(*h
));
3418 if (drbd_recv(mdev
, p
->head
.payload
, h
->length
) != h
->length
)
3420 if (p
->head
.length
<= (sizeof(*p
) - sizeof(p
->head
))) {
3421 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", p
->head
.length
);
3424 ret
= decode_bitmap_c(mdev
, p
, &c
);
3426 dev_warn(DEV
, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h
->command
);
3430 c
.packets
[h
->command
== P_BITMAP
]++;
3431 c
.bytes
[h
->command
== P_BITMAP
] += sizeof(struct p_header
) + h
->length
;
3436 if (!drbd_recv_header(mdev
, h
))
3438 } while (ret
== OK
);
3442 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3444 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3445 ok
= !drbd_send_bitmap(mdev
);
3448 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3449 ok
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3450 D_ASSERT(ok
== SS_SUCCESS
);
3451 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3452 /* admin may have requested C_DISCONNECTING,
3453 * other threads may have noticed network errors */
3454 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3455 drbd_conn_str(mdev
->state
.conn
));
3460 drbd_bm_unlock(mdev
);
3461 if (ok
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3462 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3463 free_page((unsigned long) buffer
);
3467 static int receive_skip(struct drbd_conf
*mdev
, struct p_header
*h
)
3469 /* TODO zero copy sink :) */
3470 static char sink
[128];
3473 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3474 h
->command
, h
->length
);
3478 want
= min_t(int, size
, sizeof(sink
));
3479 r
= drbd_recv(mdev
, sink
, want
);
3480 ERR_IF(r
<= 0) break;
3486 static int receive_UnplugRemote(struct drbd_conf
*mdev
, struct p_header
*h
)
3488 if (mdev
->state
.disk
>= D_INCONSISTENT
)
3491 /* Make sure we've acked all the TCP data associated
3492 * with the data requests being unplugged */
3493 drbd_tcp_quickack(mdev
->data
.socket
);
3498 typedef int (*drbd_cmd_handler_f
)(struct drbd_conf
*, struct p_header
*);
3500 static drbd_cmd_handler_f drbd_default_handler
[] = {
3501 [P_DATA
] = receive_Data
,
3502 [P_DATA_REPLY
] = receive_DataReply
,
3503 [P_RS_DATA_REPLY
] = receive_RSDataReply
,
3504 [P_BARRIER
] = receive_Barrier
,
3505 [P_BITMAP
] = receive_bitmap
,
3506 [P_COMPRESSED_BITMAP
] = receive_bitmap
,
3507 [P_UNPLUG_REMOTE
] = receive_UnplugRemote
,
3508 [P_DATA_REQUEST
] = receive_DataRequest
,
3509 [P_RS_DATA_REQUEST
] = receive_DataRequest
,
3510 [P_SYNC_PARAM
] = receive_SyncParam
,
3511 [P_SYNC_PARAM89
] = receive_SyncParam
,
3512 [P_PROTOCOL
] = receive_protocol
,
3513 [P_UUIDS
] = receive_uuids
,
3514 [P_SIZES
] = receive_sizes
,
3515 [P_STATE
] = receive_state
,
3516 [P_STATE_CHG_REQ
] = receive_req_state
,
3517 [P_SYNC_UUID
] = receive_sync_uuid
,
3518 [P_OV_REQUEST
] = receive_DataRequest
,
3519 [P_OV_REPLY
] = receive_DataRequest
,
3520 [P_CSUM_RS_REQUEST
] = receive_DataRequest
,
3521 /* anything missing from this table is in
3522 * the asender_tbl, see get_asender_cmd */
3526 static drbd_cmd_handler_f
*drbd_cmd_handler
= drbd_default_handler
;
3527 static drbd_cmd_handler_f
*drbd_opt_cmd_handler
;
3529 static void drbdd(struct drbd_conf
*mdev
)
3531 drbd_cmd_handler_f handler
;
3532 struct p_header
*header
= &mdev
->data
.rbuf
.header
;
3534 while (get_t_state(&mdev
->receiver
) == Running
) {
3535 drbd_thread_current_set_cpu(mdev
);
3536 if (!drbd_recv_header(mdev
, header
)) {
3537 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3541 if (header
->command
< P_MAX_CMD
)
3542 handler
= drbd_cmd_handler
[header
->command
];
3543 else if (P_MAY_IGNORE
< header
->command
3544 && header
->command
< P_MAX_OPT_CMD
)
3545 handler
= drbd_opt_cmd_handler
[header
->command
-P_MAY_IGNORE
];
3546 else if (header
->command
> P_MAX_OPT_CMD
)
3547 handler
= receive_skip
;
3551 if (unlikely(!handler
)) {
3552 dev_err(DEV
, "unknown packet type %d, l: %d!\n",
3553 header
->command
, header
->length
);
3554 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3557 if (unlikely(!handler(mdev
, header
))) {
3558 dev_err(DEV
, "error receiving %s, l: %d!\n",
3559 cmdname(header
->command
), header
->length
);
3560 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3566 static void drbd_fail_pending_reads(struct drbd_conf
*mdev
)
3568 struct hlist_head
*slot
;
3569 struct hlist_node
*pos
;
3570 struct hlist_node
*tmp
;
3571 struct drbd_request
*req
;
3575 * Application READ requests
3577 spin_lock_irq(&mdev
->req_lock
);
3578 for (i
= 0; i
< APP_R_HSIZE
; i
++) {
3579 slot
= mdev
->app_reads_hash
+i
;
3580 hlist_for_each_entry_safe(req
, pos
, tmp
, slot
, colision
) {
3581 /* it may (but should not any longer!)
3582 * be on the work queue; if that assert triggers,
3583 * we need to also grab the
3584 * spin_lock_irq(&mdev->data.work.q_lock);
3585 * and list_del_init here. */
3586 D_ASSERT(list_empty(&req
->w
.list
));
3587 /* It would be nice to complete outside of spinlock.
3588 * But this is easier for now. */
3589 _req_mod(req
, connection_lost_while_pending
);
3592 for (i
= 0; i
< APP_R_HSIZE
; i
++)
3593 if (!hlist_empty(mdev
->app_reads_hash
+i
))
3594 dev_warn(DEV
, "ASSERT FAILED: app_reads_hash[%d].first: "
3595 "%p, should be NULL\n", i
, mdev
->app_reads_hash
[i
].first
);
3597 memset(mdev
->app_reads_hash
, 0, APP_R_HSIZE
*sizeof(void *));
3598 spin_unlock_irq(&mdev
->req_lock
);
3601 void drbd_flush_workqueue(struct drbd_conf
*mdev
)
3603 struct drbd_wq_barrier barr
;
3605 barr
.w
.cb
= w_prev_work_done
;
3606 init_completion(&barr
.done
);
3607 drbd_queue_work(&mdev
->data
.work
, &barr
.w
);
3608 wait_for_completion(&barr
.done
);
3611 static void drbd_disconnect(struct drbd_conf
*mdev
)
3613 enum drbd_fencing_p fp
;
3614 union drbd_state os
, ns
;
3615 int rv
= SS_UNKNOWN_ERROR
;
3618 if (mdev
->state
.conn
== C_STANDALONE
)
3620 if (mdev
->state
.conn
>= C_WF_CONNECTION
)
3621 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3622 drbd_conn_str(mdev
->state
.conn
));
3624 /* asender does not clean up anything. it must not interfere, either */
3625 drbd_thread_stop(&mdev
->asender
);
3626 drbd_free_sock(mdev
);
3628 spin_lock_irq(&mdev
->req_lock
);
3629 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
3630 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
3631 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
3632 spin_unlock_irq(&mdev
->req_lock
);
3634 /* We do not have data structures that would allow us to
3635 * get the rs_pending_cnt down to 0 again.
3636 * * On C_SYNC_TARGET we do not have any data structures describing
3637 * the pending RSDataRequest's we have sent.
3638 * * On C_SYNC_SOURCE there is no data structure that tracks
3639 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3640 * And no, it is not the sum of the reference counts in the
3641 * resync_LRU. The resync_LRU tracks the whole operation including
3642 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3644 drbd_rs_cancel_all(mdev
);
3646 mdev
->rs_failed
= 0;
3647 atomic_set(&mdev
->rs_pending_cnt
, 0);
3648 wake_up(&mdev
->misc_wait
);
3650 /* make sure syncer is stopped and w_resume_next_sg queued */
3651 del_timer_sync(&mdev
->resync_timer
);
3652 set_bit(STOP_SYNC_TIMER
, &mdev
->flags
);
3653 resync_timer_fn((unsigned long)mdev
);
3655 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3656 * w_make_resync_request etc. which may still be on the worker queue
3657 * to be "canceled" */
3658 drbd_flush_workqueue(mdev
);
3660 /* This also does reclaim_net_ee(). If we do this too early, we might
3661 * miss some resync ee and pages.*/
3662 drbd_process_done_ee(mdev
);
3664 kfree(mdev
->p_uuid
);
3665 mdev
->p_uuid
= NULL
;
3667 if (!mdev
->state
.susp
)
3670 drbd_fail_pending_reads(mdev
);
3672 dev_info(DEV
, "Connection closed\n");
3677 if (get_ldev(mdev
)) {
3678 fp
= mdev
->ldev
->dc
.fencing
;
3682 if (mdev
->state
.role
== R_PRIMARY
) {
3683 if (fp
>= FP_RESOURCE
&& mdev
->state
.pdsk
>= D_UNKNOWN
) {
3684 enum drbd_disk_state nps
= drbd_try_outdate_peer(mdev
);
3685 drbd_request_state(mdev
, NS(pdsk
, nps
));
3689 spin_lock_irq(&mdev
->req_lock
);
3691 if (os
.conn
>= C_UNCONNECTED
) {
3692 /* Do not restart in case we are C_DISCONNECTING */
3694 ns
.conn
= C_UNCONNECTED
;
3695 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
3697 spin_unlock_irq(&mdev
->req_lock
);
3699 if (os
.conn
== C_DISCONNECTING
) {
3700 struct hlist_head
*h
;
3701 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->net_cnt
) == 0);
3703 /* we must not free the tl_hash
3704 * while application io is still on the fly */
3705 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->ap_bio_cnt
) == 0);
3707 spin_lock_irq(&mdev
->req_lock
);
3709 for (h
= mdev
->ee_hash
; h
< mdev
->ee_hash
+ mdev
->ee_hash_s
; h
++)
3711 dev_err(DEV
, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3712 (int)(h
- mdev
->ee_hash
), h
->first
);
3713 kfree(mdev
->ee_hash
);
3714 mdev
->ee_hash
= NULL
;
3715 mdev
->ee_hash_s
= 0;
3718 for (h
= mdev
->tl_hash
; h
< mdev
->tl_hash
+ mdev
->tl_hash_s
; h
++)
3720 dev_err(DEV
, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3721 (int)(h
- mdev
->tl_hash
), h
->first
);
3722 kfree(mdev
->tl_hash
);
3723 mdev
->tl_hash
= NULL
;
3724 mdev
->tl_hash_s
= 0;
3725 spin_unlock_irq(&mdev
->req_lock
);
3727 crypto_free_hash(mdev
->cram_hmac_tfm
);
3728 mdev
->cram_hmac_tfm
= NULL
;
3730 kfree(mdev
->net_conf
);
3731 mdev
->net_conf
= NULL
;
3732 drbd_request_state(mdev
, NS(conn
, C_STANDALONE
));
3735 /* tcp_close and release of sendpage pages can be deferred. I don't
3736 * want to use SO_LINGER, because apparently it can be deferred for
3737 * more than 20 seconds (longest time I checked).
3739 * Actually we don't care for exactly when the network stack does its
3740 * put_page(), but release our reference on these pages right here.
3742 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3744 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
3745 i
= atomic_read(&mdev
->pp_in_use
);
3747 dev_info(DEV
, "pp_in_use = %u, expected 0\n", i
);
3749 D_ASSERT(list_empty(&mdev
->read_ee
));
3750 D_ASSERT(list_empty(&mdev
->active_ee
));
3751 D_ASSERT(list_empty(&mdev
->sync_ee
));
3752 D_ASSERT(list_empty(&mdev
->done_ee
));
3754 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3755 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
3756 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
3760 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3761 * we can agree on is stored in agreed_pro_version.
3763 * feature flags and the reserved array should be enough room for future
3764 * enhancements of the handshake protocol, and possible plugins...
3766 * for now, they are expected to be zero, but ignored.
3768 static int drbd_send_handshake(struct drbd_conf
*mdev
)
3770 /* ASSERT current == mdev->receiver ... */
3771 struct p_handshake
*p
= &mdev
->data
.sbuf
.handshake
;
3774 if (mutex_lock_interruptible(&mdev
->data
.mutex
)) {
3775 dev_err(DEV
, "interrupted during initial handshake\n");
3776 return 0; /* interrupted. not ok. */
3779 if (mdev
->data
.socket
== NULL
) {
3780 mutex_unlock(&mdev
->data
.mutex
);
3784 memset(p
, 0, sizeof(*p
));
3785 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
3786 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
3787 ok
= _drbd_send_cmd( mdev
, mdev
->data
.socket
, P_HAND_SHAKE
,
3788 (struct p_header
*)p
, sizeof(*p
), 0 );
3789 mutex_unlock(&mdev
->data
.mutex
);
3795 * 1 yes, we have a valid connection
3796 * 0 oops, did not work out, please try again
3797 * -1 peer talks different language,
3798 * no point in trying again, please go standalone.
3800 static int drbd_do_handshake(struct drbd_conf
*mdev
)
3802 /* ASSERT current == mdev->receiver ... */
3803 struct p_handshake
*p
= &mdev
->data
.rbuf
.handshake
;
3804 const int expect
= sizeof(struct p_handshake
)
3805 -sizeof(struct p_header
);
3808 rv
= drbd_send_handshake(mdev
);
3812 rv
= drbd_recv_header(mdev
, &p
->head
);
3816 if (p
->head
.command
!= P_HAND_SHAKE
) {
3817 dev_err(DEV
, "expected HandShake packet, received: %s (0x%04x)\n",
3818 cmdname(p
->head
.command
), p
->head
.command
);
3822 if (p
->head
.length
!= expect
) {
3823 dev_err(DEV
, "expected HandShake length: %u, received: %u\n",
3824 expect
, p
->head
.length
);
3828 rv
= drbd_recv(mdev
, &p
->head
.payload
, expect
);
3831 dev_err(DEV
, "short read receiving handshake packet: l=%u\n", rv
);
3835 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
3836 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
3837 if (p
->protocol_max
== 0)
3838 p
->protocol_max
= p
->protocol_min
;
3840 if (PRO_VERSION_MAX
< p
->protocol_min
||
3841 PRO_VERSION_MIN
> p
->protocol_max
)
3844 mdev
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
3846 dev_info(DEV
, "Handshake successful: "
3847 "Agreed network protocol version %d\n", mdev
->agreed_pro_version
);
3852 dev_err(DEV
, "incompatible DRBD dialects: "
3853 "I support %d-%d, peer supports %d-%d\n",
3854 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
3855 p
->protocol_min
, p
->protocol_max
);
3859 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3860 static int drbd_do_auth(struct drbd_conf
*mdev
)
3862 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3863 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3867 #define CHALLENGE_LEN 64
3871 0 - failed, try again (network error),
3872 -1 - auth failed, don't try again.
3875 static int drbd_do_auth(struct drbd_conf
*mdev
)
3877 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
3878 struct scatterlist sg
;
3879 char *response
= NULL
;
3880 char *right_response
= NULL
;
3881 char *peers_ch
= NULL
;
3883 unsigned int key_len
= strlen(mdev
->net_conf
->shared_secret
);
3884 unsigned int resp_size
;
3885 struct hash_desc desc
;
3888 desc
.tfm
= mdev
->cram_hmac_tfm
;
3891 rv
= crypto_hash_setkey(mdev
->cram_hmac_tfm
,
3892 (u8
*)mdev
->net_conf
->shared_secret
, key_len
);
3894 dev_err(DEV
, "crypto_hash_setkey() failed with %d\n", rv
);
3899 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
3901 rv
= drbd_send_cmd2(mdev
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
3905 rv
= drbd_recv_header(mdev
, &p
);
3909 if (p
.command
!= P_AUTH_CHALLENGE
) {
3910 dev_err(DEV
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3911 cmdname(p
.command
), p
.command
);
3916 if (p
.length
> CHALLENGE_LEN
*2) {
3917 dev_err(DEV
, "expected AuthChallenge payload too big.\n");
3922 peers_ch
= kmalloc(p
.length
, GFP_NOIO
);
3923 if (peers_ch
== NULL
) {
3924 dev_err(DEV
, "kmalloc of peers_ch failed\n");
3929 rv
= drbd_recv(mdev
, peers_ch
, p
.length
);
3931 if (rv
!= p
.length
) {
3932 dev_err(DEV
, "short read AuthChallenge: l=%u\n", rv
);
3937 resp_size
= crypto_hash_digestsize(mdev
->cram_hmac_tfm
);
3938 response
= kmalloc(resp_size
, GFP_NOIO
);
3939 if (response
== NULL
) {
3940 dev_err(DEV
, "kmalloc of response failed\n");
3945 sg_init_table(&sg
, 1);
3946 sg_set_buf(&sg
, peers_ch
, p
.length
);
3948 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
3950 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
3955 rv
= drbd_send_cmd2(mdev
, P_AUTH_RESPONSE
, response
, resp_size
);
3959 rv
= drbd_recv_header(mdev
, &p
);
3963 if (p
.command
!= P_AUTH_RESPONSE
) {
3964 dev_err(DEV
, "expected AuthResponse packet, received: %s (0x%04x)\n",
3965 cmdname(p
.command
), p
.command
);
3970 if (p
.length
!= resp_size
) {
3971 dev_err(DEV
, "expected AuthResponse payload of wrong size\n");
3976 rv
= drbd_recv(mdev
, response
, resp_size
);
3978 if (rv
!= resp_size
) {
3979 dev_err(DEV
, "short read receiving AuthResponse: l=%u\n", rv
);
3984 right_response
= kmalloc(resp_size
, GFP_NOIO
);
3985 if (right_response
== NULL
) {
3986 dev_err(DEV
, "kmalloc of right_response failed\n");
3991 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
3993 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
3995 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4000 rv
= !memcmp(response
, right_response
, resp_size
);
4003 dev_info(DEV
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4004 resp_size
, mdev
->net_conf
->cram_hmac_alg
);
4011 kfree(right_response
);
4017 int drbdd_init(struct drbd_thread
*thi
)
4019 struct drbd_conf
*mdev
= thi
->mdev
;
4020 unsigned int minor
= mdev_to_minor(mdev
);
4023 sprintf(current
->comm
, "drbd%d_receiver", minor
);
4025 dev_info(DEV
, "receiver (re)started\n");
4028 h
= drbd_connect(mdev
);
4030 drbd_disconnect(mdev
);
4031 __set_current_state(TASK_INTERRUPTIBLE
);
4032 schedule_timeout(HZ
);
4035 dev_warn(DEV
, "Discarding network configuration.\n");
4036 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4041 if (get_net_conf(mdev
)) {
4047 drbd_disconnect(mdev
);
4049 dev_info(DEV
, "receiver terminated\n");
4053 /* ********* acknowledge sender ******** */
4055 static int got_RqSReply(struct drbd_conf
*mdev
, struct p_header
*h
)
4057 struct p_req_state_reply
*p
= (struct p_req_state_reply
*)h
;
4059 int retcode
= be32_to_cpu(p
->retcode
);
4061 if (retcode
>= SS_SUCCESS
) {
4062 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4064 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4065 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4066 drbd_set_st_err_str(retcode
), retcode
);
4068 wake_up(&mdev
->state_wait
);
4073 static int got_Ping(struct drbd_conf
*mdev
, struct p_header
*h
)
4075 return drbd_send_ping_ack(mdev
);
4079 static int got_PingAck(struct drbd_conf
*mdev
, struct p_header
*h
)
4081 /* restore idle timeout */
4082 mdev
->meta
.socket
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
4083 if (!test_and_set_bit(GOT_PING_ACK
, &mdev
->flags
))
4084 wake_up(&mdev
->misc_wait
);
4089 static int got_IsInSync(struct drbd_conf
*mdev
, struct p_header
*h
)
4091 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4092 sector_t sector
= be64_to_cpu(p
->sector
);
4093 int blksize
= be32_to_cpu(p
->blksize
);
4095 D_ASSERT(mdev
->agreed_pro_version
>= 89);
4097 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4099 drbd_rs_complete_io(mdev
, sector
);
4100 drbd_set_in_sync(mdev
, sector
, blksize
);
4101 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4102 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4103 dec_rs_pending(mdev
);
4108 /* when we receive the ACK for a write request,
4109 * verify that we actually know about it */
4110 static struct drbd_request
*_ack_id_to_req(struct drbd_conf
*mdev
,
4111 u64 id
, sector_t sector
)
4113 struct hlist_head
*slot
= tl_hash_slot(mdev
, sector
);
4114 struct hlist_node
*n
;
4115 struct drbd_request
*req
;
4117 hlist_for_each_entry(req
, n
, slot
, colision
) {
4118 if ((unsigned long)req
== (unsigned long)id
) {
4119 if (req
->sector
!= sector
) {
4120 dev_err(DEV
, "_ack_id_to_req: found req %p but it has "
4121 "wrong sector (%llus versus %llus)\n", req
,
4122 (unsigned long long)req
->sector
,
4123 (unsigned long long)sector
);
4129 dev_err(DEV
, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4130 (void *)(unsigned long)id
, (unsigned long long)sector
);
4134 typedef struct drbd_request
*(req_validator_fn
)
4135 (struct drbd_conf
*mdev
, u64 id
, sector_t sector
);
4137 static int validate_req_change_req_state(struct drbd_conf
*mdev
,
4138 u64 id
, sector_t sector
, req_validator_fn validator
,
4139 const char *func
, enum drbd_req_event what
)
4141 struct drbd_request
*req
;
4142 struct bio_and_error m
;
4144 spin_lock_irq(&mdev
->req_lock
);
4145 req
= validator(mdev
, id
, sector
);
4146 if (unlikely(!req
)) {
4147 spin_unlock_irq(&mdev
->req_lock
);
4148 dev_err(DEV
, "%s: got a corrupt block_id/sector pair\n", func
);
4151 __req_mod(req
, what
, &m
);
4152 spin_unlock_irq(&mdev
->req_lock
);
4155 complete_master_bio(mdev
, &m
);
4159 static int got_BlockAck(struct drbd_conf
*mdev
, struct p_header
*h
)
4161 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4162 sector_t sector
= be64_to_cpu(p
->sector
);
4163 int blksize
= be32_to_cpu(p
->blksize
);
4164 enum drbd_req_event what
;
4166 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4168 if (is_syncer_block_id(p
->block_id
)) {
4169 drbd_set_in_sync(mdev
, sector
, blksize
);
4170 dec_rs_pending(mdev
);
4173 switch (be16_to_cpu(h
->command
)) {
4174 case P_RS_WRITE_ACK
:
4175 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4176 what
= write_acked_by_peer_and_sis
;
4179 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4180 what
= write_acked_by_peer
;
4183 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4184 what
= recv_acked_by_peer
;
4187 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4188 what
= conflict_discarded_by_peer
;
4195 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4196 _ack_id_to_req
, __func__
, what
);
4199 static int got_NegAck(struct drbd_conf
*mdev
, struct p_header
*h
)
4201 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4202 sector_t sector
= be64_to_cpu(p
->sector
);
4204 if (__ratelimit(&drbd_ratelimit_state
))
4205 dev_warn(DEV
, "Got NegAck packet. Peer is in troubles?\n");
4207 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4209 if (is_syncer_block_id(p
->block_id
)) {
4210 int size
= be32_to_cpu(p
->blksize
);
4211 dec_rs_pending(mdev
);
4212 drbd_rs_failed_io(mdev
, sector
, size
);
4215 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4216 _ack_id_to_req
, __func__
, neg_acked
);
4219 static int got_NegDReply(struct drbd_conf
*mdev
, struct p_header
*h
)
4221 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4222 sector_t sector
= be64_to_cpu(p
->sector
);
4224 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4225 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4226 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4228 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4229 _ar_id_to_req
, __func__
, neg_acked
);
4232 static int got_NegRSDReply(struct drbd_conf
*mdev
, struct p_header
*h
)
4236 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4238 sector
= be64_to_cpu(p
->sector
);
4239 size
= be32_to_cpu(p
->blksize
);
4241 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4243 dec_rs_pending(mdev
);
4245 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4246 drbd_rs_complete_io(mdev
, sector
);
4247 drbd_rs_failed_io(mdev
, sector
, size
);
4254 static int got_BarrierAck(struct drbd_conf
*mdev
, struct p_header
*h
)
4256 struct p_barrier_ack
*p
= (struct p_barrier_ack
*)h
;
4258 tl_release(mdev
, p
->barrier
, be32_to_cpu(p
->set_size
));
4263 static int got_OVResult(struct drbd_conf
*mdev
, struct p_header
*h
)
4265 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4266 struct drbd_work
*w
;
4270 sector
= be64_to_cpu(p
->sector
);
4271 size
= be32_to_cpu(p
->blksize
);
4273 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4275 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4276 drbd_ov_oos_found(mdev
, sector
, size
);
4280 drbd_rs_complete_io(mdev
, sector
);
4281 dec_rs_pending(mdev
);
4283 if (--mdev
->ov_left
== 0) {
4284 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4286 w
->cb
= w_ov_finished
;
4287 drbd_queue_work_front(&mdev
->data
.work
, w
);
4289 dev_err(DEV
, "kmalloc(w) failed.");
4291 drbd_resync_finished(mdev
);
4297 struct asender_cmd
{
4299 int (*process
)(struct drbd_conf
*mdev
, struct p_header
*h
);
4302 static struct asender_cmd
*get_asender_cmd(int cmd
)
4304 static struct asender_cmd asender_tbl
[] = {
4305 /* anything missing from this table is in
4306 * the drbd_cmd_handler (drbd_default_handler) table,
4307 * see the beginning of drbdd() */
4308 [P_PING
] = { sizeof(struct p_header
), got_Ping
},
4309 [P_PING_ACK
] = { sizeof(struct p_header
), got_PingAck
},
4310 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4311 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4312 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4313 [P_DISCARD_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4314 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
4315 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
4316 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4317 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
4318 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
4319 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
4320 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
4321 [P_MAX_CMD
] = { 0, NULL
},
4323 if (cmd
> P_MAX_CMD
|| asender_tbl
[cmd
].process
== NULL
)
4325 return &asender_tbl
[cmd
];
4328 int drbd_asender(struct drbd_thread
*thi
)
4330 struct drbd_conf
*mdev
= thi
->mdev
;
4331 struct p_header
*h
= &mdev
->meta
.rbuf
.header
;
4332 struct asender_cmd
*cmd
= NULL
;
4337 int expect
= sizeof(struct p_header
);
4340 sprintf(current
->comm
, "drbd%d_asender", mdev_to_minor(mdev
));
4342 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4343 current
->rt_priority
= 2; /* more important than all other tasks */
4345 while (get_t_state(thi
) == Running
) {
4346 drbd_thread_current_set_cpu(mdev
);
4347 if (test_and_clear_bit(SEND_PING
, &mdev
->flags
)) {
4348 ERR_IF(!drbd_send_ping(mdev
)) goto reconnect
;
4349 mdev
->meta
.socket
->sk
->sk_rcvtimeo
=
4350 mdev
->net_conf
->ping_timeo
*HZ
/10;
4353 /* conditionally cork;
4354 * it may hurt latency if we cork without much to send */
4355 if (!mdev
->net_conf
->no_cork
&&
4356 3 < atomic_read(&mdev
->unacked_cnt
))
4357 drbd_tcp_cork(mdev
->meta
.socket
);
4359 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4360 flush_signals(current
);
4361 if (!drbd_process_done_ee(mdev
)) {
4362 dev_err(DEV
, "process_done_ee() = NOT_OK\n");
4365 /* to avoid race with newly queued ACKs */
4366 set_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4367 spin_lock_irq(&mdev
->req_lock
);
4368 empty
= list_empty(&mdev
->done_ee
);
4369 spin_unlock_irq(&mdev
->req_lock
);
4370 /* new ack may have been queued right here,
4371 * but then there is also a signal pending,
4372 * and we start over... */
4376 /* but unconditionally uncork unless disabled */
4377 if (!mdev
->net_conf
->no_cork
)
4378 drbd_tcp_uncork(mdev
->meta
.socket
);
4380 /* short circuit, recv_msg would return EINTR anyways. */
4381 if (signal_pending(current
))
4384 rv
= drbd_recv_short(mdev
, mdev
->meta
.socket
,
4385 buf
, expect
-received
, 0);
4386 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4388 flush_signals(current
);
4391 * -EINTR (on meta) we got a signal
4392 * -EAGAIN (on meta) rcvtimeo expired
4393 * -ECONNRESET other side closed the connection
4394 * -ERESTARTSYS (on data) we got a signal
4395 * rv < 0 other than above: unexpected error!
4396 * rv == expected: full header or command
4397 * rv < expected: "woken" by signal during receive
4398 * rv == 0 : "connection shut down by peer"
4400 if (likely(rv
> 0)) {
4403 } else if (rv
== 0) {
4404 dev_err(DEV
, "meta connection shut down by peer.\n");
4406 } else if (rv
== -EAGAIN
) {
4407 if (mdev
->meta
.socket
->sk
->sk_rcvtimeo
==
4408 mdev
->net_conf
->ping_timeo
*HZ
/10) {
4409 dev_err(DEV
, "PingAck did not arrive in time.\n");
4412 set_bit(SEND_PING
, &mdev
->flags
);
4414 } else if (rv
== -EINTR
) {
4417 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
4421 if (received
== expect
&& cmd
== NULL
) {
4422 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
4423 dev_err(DEV
, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4424 (long)be32_to_cpu(h
->magic
),
4425 h
->command
, h
->length
);
4428 cmd
= get_asender_cmd(be16_to_cpu(h
->command
));
4429 len
= be16_to_cpu(h
->length
);
4430 if (unlikely(cmd
== NULL
)) {
4431 dev_err(DEV
, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4432 (long)be32_to_cpu(h
->magic
),
4433 h
->command
, h
->length
);
4436 expect
= cmd
->pkt_size
;
4437 ERR_IF(len
!= expect
-sizeof(struct p_header
))
4440 if (received
== expect
) {
4441 D_ASSERT(cmd
!= NULL
);
4442 if (!cmd
->process(mdev
, h
))
4447 expect
= sizeof(struct p_header
);
4454 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
4458 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4460 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4462 D_ASSERT(mdev
->state
.conn
< C_CONNECTED
);
4463 dev_info(DEV
, "asender terminated\n");