4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 static DEFINE_MUTEX(drbd_main_mutex
);
60 int drbdd_init(struct drbd_thread
*);
61 int drbd_worker(struct drbd_thread
*);
62 int drbd_asender(struct drbd_thread
*);
65 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
66 static int drbd_release(struct gendisk
*gd
, fmode_t mode
);
67 static int w_md_sync(struct drbd_work
*w
, int unused
);
68 static void md_sync_timer_fn(unsigned long data
);
69 static int w_bitmap_io(struct drbd_work
*w
, int unused
);
70 static int w_go_diskless(struct drbd_work
*w
, int unused
);
72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
75 MODULE_VERSION(REL_VERSION
);
76 MODULE_LICENSE("GPL");
77 MODULE_PARM_DESC(minor_count
, "Approximate number of drbd devices ("
78 __stringify(DRBD_MINOR_COUNT_MIN
) "-" __stringify(DRBD_MINOR_COUNT_MAX
) ")");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
81 #include <linux/moduleparam.h>
82 /* allow_open_on_secondary */
83 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
84 /* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86 module_param(minor_count
, uint
, 0444);
87 module_param(disable_sendpage
, bool, 0644);
88 module_param(allow_oos
, bool, 0);
89 module_param(proc_details
, int, 0644);
91 #ifdef CONFIG_DRBD_FAULT_INJECTION
94 static int fault_count
;
96 /* bitmap of enabled faults */
97 module_param(enable_faults
, int, 0664);
98 /* fault rate % value - applies to all enabled faults */
99 module_param(fault_rate
, int, 0664);
100 /* count of faults inserted */
101 module_param(fault_count
, int, 0664);
102 /* bitmap of devices to insert faults on */
103 module_param(fault_devs
, int, 0644);
106 /* module parameter, defined */
107 unsigned int minor_count
= DRBD_MINOR_COUNT_DEF
;
108 int disable_sendpage
;
110 int proc_details
; /* Detail level in proc drbd*/
112 /* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114 char usermode_helper
[80] = "/sbin/drbdadm";
116 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
122 struct list_head drbd_tconns
; /* list of struct drbd_tconn */
124 struct kmem_cache
*drbd_request_cache
;
125 struct kmem_cache
*drbd_ee_cache
; /* peer requests */
126 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
127 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
128 mempool_t
*drbd_request_mempool
;
129 mempool_t
*drbd_ee_mempool
;
130 mempool_t
*drbd_md_io_page_pool
;
131 struct bio_set
*drbd_md_io_bio_set
;
133 /* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
139 struct page
*drbd_pp_pool
;
140 spinlock_t drbd_pp_lock
;
142 wait_queue_head_t drbd_pp_wait
;
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
146 static const struct block_device_operations drbd_ops
= {
147 .owner
= THIS_MODULE
,
149 .release
= drbd_release
,
152 static void bio_destructor_drbd(struct bio
*bio
)
154 bio_free(bio
, drbd_md_io_bio_set
);
157 struct bio
*bio_alloc_drbd(gfp_t gfp_mask
)
161 if (!drbd_md_io_bio_set
)
162 return bio_alloc(gfp_mask
, 1);
164 bio
= bio_alloc_bioset(gfp_mask
, 1, drbd_md_io_bio_set
);
167 bio
->bi_destructor
= bio_destructor_drbd
;
172 /* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
175 int _get_ldev_if_state(struct drbd_conf
*mdev
, enum drbd_disk_state mins
)
179 atomic_inc(&mdev
->local_cnt
);
180 io_allowed
= (mdev
->state
.disk
>= mins
);
182 if (atomic_dec_and_test(&mdev
->local_cnt
))
183 wake_up(&mdev
->misc_wait
);
191 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
192 * @tconn: DRBD connection.
193 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
194 * @set_size: Expected number of requests before that barrier.
196 * In case the passed barrier_nr or set_size does not match the oldest
197 * epoch of not yet barrier-acked requests, this function will cause a
198 * termination of the connection.
200 void tl_release(struct drbd_tconn
*tconn
, unsigned int barrier_nr
,
201 unsigned int set_size
)
203 struct drbd_request
*r
;
204 struct drbd_request
*req
= NULL
;
205 int expect_epoch
= 0;
208 spin_lock_irq(&tconn
->req_lock
);
210 /* find latest not yet barrier-acked write request,
211 * count writes in its epoch. */
212 list_for_each_entry(r
, &tconn
->transfer_log
, tl_requests
) {
213 const unsigned s
= r
->rq_state
;
217 if (!(s
& RQ_NET_MASK
))
222 expect_epoch
= req
->epoch
;
225 if (r
->epoch
!= expect_epoch
)
229 /* if (s & RQ_DONE): not expected */
230 /* if (!(s & RQ_NET_MASK)): not expected */
235 /* first some paranoia code */
237 conn_err(tconn
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
241 if (expect_epoch
!= barrier_nr
) {
242 conn_err(tconn
, "BAD! BarrierAck #%u received, expected #%u!\n",
243 barrier_nr
, expect_epoch
);
247 if (expect_size
!= set_size
) {
248 conn_err(tconn
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
249 barrier_nr
, set_size
, expect_size
);
253 /* Clean up list of requests processed during current epoch */
254 list_for_each_entry_safe(req
, r
, &tconn
->transfer_log
, tl_requests
) {
255 if (req
->epoch
!= expect_epoch
)
257 _req_mod(req
, BARRIER_ACKED
);
259 spin_unlock_irq(&tconn
->req_lock
);
264 spin_unlock_irq(&tconn
->req_lock
);
265 conn_request_state(tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @mdev: DRBD device.
272 * @what: The action/event to perform with all request objects
274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
277 /* must hold resource->req_lock */
278 void _tl_restart(struct drbd_tconn
*tconn
, enum drbd_req_event what
)
280 struct drbd_request
*req
, *r
;
282 list_for_each_entry_safe(req
, r
, &tconn
->transfer_log
, tl_requests
)
286 void tl_restart(struct drbd_tconn
*tconn
, enum drbd_req_event what
)
288 spin_lock_irq(&tconn
->req_lock
);
289 _tl_restart(tconn
, what
);
290 spin_unlock_irq(&tconn
->req_lock
);
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @mdev: DRBD device.
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
301 void tl_clear(struct drbd_tconn
*tconn
)
303 tl_restart(tconn
, CONNECTION_LOST_WHILE_PENDING
);
307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
308 * @mdev: DRBD device.
310 void tl_abort_disk_io(struct drbd_conf
*mdev
)
312 struct drbd_tconn
*tconn
= mdev
->tconn
;
313 struct drbd_request
*req
, *r
;
315 spin_lock_irq(&tconn
->req_lock
);
316 list_for_each_entry_safe(req
, r
, &tconn
->transfer_log
, tl_requests
) {
317 if (!(req
->rq_state
& RQ_LOCAL_PENDING
))
319 if (req
->w
.mdev
!= mdev
)
321 _req_mod(req
, ABORT_DISK_IO
);
323 spin_unlock_irq(&tconn
->req_lock
);
326 static int drbd_thread_setup(void *arg
)
328 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
329 struct drbd_tconn
*tconn
= thi
->tconn
;
333 snprintf(current
->comm
, sizeof(current
->comm
), "drbd_%c_%s",
334 thi
->name
[0], thi
->tconn
->name
);
337 retval
= thi
->function(thi
);
339 spin_lock_irqsave(&thi
->t_lock
, flags
);
341 /* if the receiver has been "EXITING", the last thing it did
342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
345 * drbd_thread_start needs to set "RESTARTING" in that case.
346 * t_state check and assignment needs to be within the same spinlock,
347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
351 if (thi
->t_state
== RESTARTING
) {
352 conn_info(tconn
, "Restarting %s thread\n", thi
->name
);
353 thi
->t_state
= RUNNING
;
354 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
361 complete_all(&thi
->stop
);
362 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
364 conn_info(tconn
, "Terminating %s\n", current
->comm
);
366 /* Release mod reference taken when thread was started */
368 kref_put(&tconn
->kref
, &conn_destroy
);
369 module_put(THIS_MODULE
);
373 static void drbd_thread_init(struct drbd_tconn
*tconn
, struct drbd_thread
*thi
,
374 int (*func
) (struct drbd_thread
*), char *name
)
376 spin_lock_init(&thi
->t_lock
);
379 thi
->function
= func
;
381 strncpy(thi
->name
, name
, ARRAY_SIZE(thi
->name
));
384 int drbd_thread_start(struct drbd_thread
*thi
)
386 struct drbd_tconn
*tconn
= thi
->tconn
;
387 struct task_struct
*nt
;
390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi
->t_lock
, flags
);
394 switch (thi
->t_state
) {
396 conn_info(tconn
, "Starting %s thread (from %s [%d])\n",
397 thi
->name
, current
->comm
, current
->pid
);
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE
)) {
401 conn_err(tconn
, "Failed to get module reference in drbd_thread_start\n");
402 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
406 kref_get(&thi
->tconn
->kref
);
408 init_completion(&thi
->stop
);
409 thi
->reset_cpu_mask
= 1;
410 thi
->t_state
= RUNNING
;
411 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
412 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
414 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
415 "drbd_%c_%s", thi
->name
[0], thi
->tconn
->name
);
418 conn_err(tconn
, "Couldn't start thread\n");
420 kref_put(&tconn
->kref
, &conn_destroy
);
421 module_put(THIS_MODULE
);
424 spin_lock_irqsave(&thi
->t_lock
, flags
);
426 thi
->t_state
= RUNNING
;
427 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
431 thi
->t_state
= RESTARTING
;
432 conn_info(tconn
, "Restarting %s thread (from %s [%d])\n",
433 thi
->name
, current
->comm
, current
->pid
);
438 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
446 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
450 enum drbd_thread_state ns
= restart
? RESTARTING
: EXITING
;
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi
->t_lock
, flags
);
455 if (thi
->t_state
== NONE
) {
456 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
458 drbd_thread_start(thi
);
462 if (thi
->t_state
!= ns
) {
463 if (thi
->task
== NULL
) {
464 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
470 init_completion(&thi
->stop
);
471 if (thi
->task
!= current
)
472 force_sig(DRBD_SIGKILL
, thi
->task
);
475 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
478 wait_for_completion(&thi
->stop
);
481 static struct drbd_thread
*drbd_task_to_thread(struct drbd_tconn
*tconn
, struct task_struct
*task
)
483 struct drbd_thread
*thi
=
484 task
== tconn
->receiver
.task
? &tconn
->receiver
:
485 task
== tconn
->asender
.task
? &tconn
->asender
:
486 task
== tconn
->worker
.task
? &tconn
->worker
: NULL
;
491 char *drbd_task_to_thread_name(struct drbd_tconn
*tconn
, struct task_struct
*task
)
493 struct drbd_thread
*thi
= drbd_task_to_thread(tconn
, task
);
494 return thi
? thi
->name
: task
->comm
;
497 int conn_lowest_minor(struct drbd_tconn
*tconn
)
499 struct drbd_conf
*mdev
;
503 mdev
= idr_get_next(&tconn
->volumes
, &vnr
);
504 m
= mdev
? mdev_to_minor(mdev
) : -1;
512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
513 * @mdev: DRBD device.
515 * Forces all threads of a device onto the same CPU. This is beneficial for
516 * DRBD's performance. May be overwritten by user's configuration.
518 void drbd_calc_cpu_mask(struct drbd_tconn
*tconn
)
523 if (cpumask_weight(tconn
->cpu_mask
))
526 ord
= conn_lowest_minor(tconn
) % cpumask_weight(cpu_online_mask
);
527 for_each_online_cpu(cpu
) {
529 cpumask_set_cpu(cpu
, tconn
->cpu_mask
);
533 /* should not be reached */
534 cpumask_setall(tconn
->cpu_mask
);
538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
539 * @mdev: DRBD device.
540 * @thi: drbd_thread object
542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
545 void drbd_thread_current_set_cpu(struct drbd_thread
*thi
)
547 struct task_struct
*p
= current
;
549 if (!thi
->reset_cpu_mask
)
551 thi
->reset_cpu_mask
= 0;
552 set_cpus_allowed_ptr(p
, thi
->tconn
->cpu_mask
);
557 * drbd_header_size - size of a packet header
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
563 unsigned int drbd_header_size(struct drbd_tconn
*tconn
)
565 if (tconn
->agreed_pro_version
>= 100) {
566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100
), 8));
567 return sizeof(struct p_header100
);
569 BUILD_BUG_ON(sizeof(struct p_header80
) !=
570 sizeof(struct p_header95
));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80
), 8));
572 return sizeof(struct p_header80
);
576 static unsigned int prepare_header80(struct p_header80
*h
, enum drbd_packet cmd
, int size
)
578 h
->magic
= cpu_to_be32(DRBD_MAGIC
);
579 h
->command
= cpu_to_be16(cmd
);
580 h
->length
= cpu_to_be16(size
);
581 return sizeof(struct p_header80
);
584 static unsigned int prepare_header95(struct p_header95
*h
, enum drbd_packet cmd
, int size
)
586 h
->magic
= cpu_to_be16(DRBD_MAGIC_BIG
);
587 h
->command
= cpu_to_be16(cmd
);
588 h
->length
= cpu_to_be32(size
);
589 return sizeof(struct p_header95
);
592 static unsigned int prepare_header100(struct p_header100
*h
, enum drbd_packet cmd
,
595 h
->magic
= cpu_to_be32(DRBD_MAGIC_100
);
596 h
->volume
= cpu_to_be16(vnr
);
597 h
->command
= cpu_to_be16(cmd
);
598 h
->length
= cpu_to_be32(size
);
600 return sizeof(struct p_header100
);
603 static unsigned int prepare_header(struct drbd_tconn
*tconn
, int vnr
,
604 void *buffer
, enum drbd_packet cmd
, int size
)
606 if (tconn
->agreed_pro_version
>= 100)
607 return prepare_header100(buffer
, cmd
, size
, vnr
);
608 else if (tconn
->agreed_pro_version
>= 95 &&
609 size
> DRBD_MAX_SIZE_H80_PACKET
)
610 return prepare_header95(buffer
, cmd
, size
);
612 return prepare_header80(buffer
, cmd
, size
);
615 static void *__conn_prepare_command(struct drbd_tconn
*tconn
,
616 struct drbd_socket
*sock
)
620 return sock
->sbuf
+ drbd_header_size(tconn
);
623 void *conn_prepare_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
)
627 mutex_lock(&sock
->mutex
);
628 p
= __conn_prepare_command(tconn
, sock
);
630 mutex_unlock(&sock
->mutex
);
635 void *drbd_prepare_command(struct drbd_conf
*mdev
, struct drbd_socket
*sock
)
637 return conn_prepare_command(mdev
->tconn
, sock
);
640 static int __send_command(struct drbd_tconn
*tconn
, int vnr
,
641 struct drbd_socket
*sock
, enum drbd_packet cmd
,
642 unsigned int header_size
, void *data
,
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
655 msg_flags
= data
? MSG_MORE
: 0;
657 header_size
+= prepare_header(tconn
, vnr
, sock
->sbuf
, cmd
,
659 err
= drbd_send_all(tconn
, sock
->socket
, sock
->sbuf
, header_size
,
662 err
= drbd_send_all(tconn
, sock
->socket
, data
, size
, 0);
666 static int __conn_send_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
,
667 enum drbd_packet cmd
, unsigned int header_size
,
668 void *data
, unsigned int size
)
670 return __send_command(tconn
, 0, sock
, cmd
, header_size
, data
, size
);
673 int conn_send_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
,
674 enum drbd_packet cmd
, unsigned int header_size
,
675 void *data
, unsigned int size
)
679 err
= __conn_send_command(tconn
, sock
, cmd
, header_size
, data
, size
);
680 mutex_unlock(&sock
->mutex
);
684 int drbd_send_command(struct drbd_conf
*mdev
, struct drbd_socket
*sock
,
685 enum drbd_packet cmd
, unsigned int header_size
,
686 void *data
, unsigned int size
)
690 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, cmd
, header_size
,
692 mutex_unlock(&sock
->mutex
);
696 int drbd_send_ping(struct drbd_tconn
*tconn
)
698 struct drbd_socket
*sock
;
701 if (!conn_prepare_command(tconn
, sock
))
703 return conn_send_command(tconn
, sock
, P_PING
, 0, NULL
, 0);
706 int drbd_send_ping_ack(struct drbd_tconn
*tconn
)
708 struct drbd_socket
*sock
;
711 if (!conn_prepare_command(tconn
, sock
))
713 return conn_send_command(tconn
, sock
, P_PING_ACK
, 0, NULL
, 0);
716 int drbd_send_sync_param(struct drbd_conf
*mdev
)
718 struct drbd_socket
*sock
;
719 struct p_rs_param_95
*p
;
721 const int apv
= mdev
->tconn
->agreed_pro_version
;
722 enum drbd_packet cmd
;
724 struct disk_conf
*dc
;
726 sock
= &mdev
->tconn
->data
;
727 p
= drbd_prepare_command(mdev
, sock
);
732 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
734 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
735 : apv
== 88 ? sizeof(struct p_rs_param
)
736 + strlen(nc
->verify_alg
) + 1
737 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
738 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
740 cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
742 /* initialize verify_alg and csums_alg */
743 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
745 if (get_ldev(mdev
)) {
746 dc
= rcu_dereference(mdev
->ldev
->disk_conf
);
747 p
->resync_rate
= cpu_to_be32(dc
->resync_rate
);
748 p
->c_plan_ahead
= cpu_to_be32(dc
->c_plan_ahead
);
749 p
->c_delay_target
= cpu_to_be32(dc
->c_delay_target
);
750 p
->c_fill_target
= cpu_to_be32(dc
->c_fill_target
);
751 p
->c_max_rate
= cpu_to_be32(dc
->c_max_rate
);
754 p
->resync_rate
= cpu_to_be32(DRBD_RESYNC_RATE_DEF
);
755 p
->c_plan_ahead
= cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF
);
756 p
->c_delay_target
= cpu_to_be32(DRBD_C_DELAY_TARGET_DEF
);
757 p
->c_fill_target
= cpu_to_be32(DRBD_C_FILL_TARGET_DEF
);
758 p
->c_max_rate
= cpu_to_be32(DRBD_C_MAX_RATE_DEF
);
762 strcpy(p
->verify_alg
, nc
->verify_alg
);
764 strcpy(p
->csums_alg
, nc
->csums_alg
);
767 return drbd_send_command(mdev
, sock
, cmd
, size
, NULL
, 0);
770 int __drbd_send_protocol(struct drbd_tconn
*tconn
, enum drbd_packet cmd
)
772 struct drbd_socket
*sock
;
773 struct p_protocol
*p
;
778 p
= __conn_prepare_command(tconn
, sock
);
783 nc
= rcu_dereference(tconn
->net_conf
);
785 if (nc
->tentative
&& tconn
->agreed_pro_version
< 92) {
787 mutex_unlock(&sock
->mutex
);
788 conn_err(tconn
, "--dry-run is not supported by peer");
793 if (tconn
->agreed_pro_version
>= 87)
794 size
+= strlen(nc
->integrity_alg
) + 1;
796 p
->protocol
= cpu_to_be32(nc
->wire_protocol
);
797 p
->after_sb_0p
= cpu_to_be32(nc
->after_sb_0p
);
798 p
->after_sb_1p
= cpu_to_be32(nc
->after_sb_1p
);
799 p
->after_sb_2p
= cpu_to_be32(nc
->after_sb_2p
);
800 p
->two_primaries
= cpu_to_be32(nc
->two_primaries
);
802 if (nc
->discard_my_data
)
803 cf
|= CF_DISCARD_MY_DATA
;
806 p
->conn_flags
= cpu_to_be32(cf
);
808 if (tconn
->agreed_pro_version
>= 87)
809 strcpy(p
->integrity_alg
, nc
->integrity_alg
);
812 return __conn_send_command(tconn
, sock
, cmd
, size
, NULL
, 0);
815 int drbd_send_protocol(struct drbd_tconn
*tconn
)
819 mutex_lock(&tconn
->data
.mutex
);
820 err
= __drbd_send_protocol(tconn
, P_PROTOCOL
);
821 mutex_unlock(&tconn
->data
.mutex
);
826 int _drbd_send_uuids(struct drbd_conf
*mdev
, u64 uuid_flags
)
828 struct drbd_socket
*sock
;
832 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
))
835 sock
= &mdev
->tconn
->data
;
836 p
= drbd_prepare_command(mdev
, sock
);
841 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
842 p
->uuid
[i
] = mdev
->ldev
? cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]) : 0;
844 mdev
->comm_bm_set
= drbd_bm_total_weight(mdev
);
845 p
->uuid
[UI_SIZE
] = cpu_to_be64(mdev
->comm_bm_set
);
847 uuid_flags
|= rcu_dereference(mdev
->tconn
->net_conf
)->discard_my_data
? 1 : 0;
849 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 2 : 0;
850 uuid_flags
|= mdev
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
851 p
->uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
854 return drbd_send_command(mdev
, sock
, P_UUIDS
, sizeof(*p
), NULL
, 0);
857 int drbd_send_uuids(struct drbd_conf
*mdev
)
859 return _drbd_send_uuids(mdev
, 0);
862 int drbd_send_uuids_skip_initial_sync(struct drbd_conf
*mdev
)
864 return _drbd_send_uuids(mdev
, 8);
867 void drbd_print_uuids(struct drbd_conf
*mdev
, const char *text
)
869 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
870 u64
*uuid
= mdev
->ldev
->md
.uuid
;
871 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX\n",
873 (unsigned long long)uuid
[UI_CURRENT
],
874 (unsigned long long)uuid
[UI_BITMAP
],
875 (unsigned long long)uuid
[UI_HISTORY_START
],
876 (unsigned long long)uuid
[UI_HISTORY_END
]);
879 dev_info(DEV
, "%s effective data uuid: %016llX\n",
881 (unsigned long long)mdev
->ed_uuid
);
885 void drbd_gen_and_send_sync_uuid(struct drbd_conf
*mdev
)
887 struct drbd_socket
*sock
;
891 D_ASSERT(mdev
->state
.disk
== D_UP_TO_DATE
);
893 uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
894 if (uuid
&& uuid
!= UUID_JUST_CREATED
)
895 uuid
= uuid
+ UUID_NEW_BM_OFFSET
;
897 get_random_bytes(&uuid
, sizeof(u64
));
898 drbd_uuid_set(mdev
, UI_BITMAP
, uuid
);
899 drbd_print_uuids(mdev
, "updated sync UUID");
902 sock
= &mdev
->tconn
->data
;
903 p
= drbd_prepare_command(mdev
, sock
);
905 p
->uuid
= cpu_to_be64(uuid
);
906 drbd_send_command(mdev
, sock
, P_SYNC_UUID
, sizeof(*p
), NULL
, 0);
910 int drbd_send_sizes(struct drbd_conf
*mdev
, int trigger_reply
, enum dds_flags flags
)
912 struct drbd_socket
*sock
;
914 sector_t d_size
, u_size
;
915 int q_order_type
, max_bio_size
;
917 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
918 D_ASSERT(mdev
->ldev
->backing_bdev
);
919 d_size
= drbd_get_max_capacity(mdev
->ldev
);
921 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
923 q_order_type
= drbd_queue_order_type(mdev
);
924 max_bio_size
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
925 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_BIO_SIZE
);
930 q_order_type
= QUEUE_ORDERED_NONE
;
931 max_bio_size
= DRBD_MAX_BIO_SIZE
; /* ... multiple BIOs per peer_request */
934 sock
= &mdev
->tconn
->data
;
935 p
= drbd_prepare_command(mdev
, sock
);
939 if (mdev
->tconn
->agreed_pro_version
<= 94)
940 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
941 else if (mdev
->tconn
->agreed_pro_version
< 100)
942 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_BIO_SIZE_P95
);
944 p
->d_size
= cpu_to_be64(d_size
);
945 p
->u_size
= cpu_to_be64(u_size
);
946 p
->c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(mdev
->this_bdev
));
947 p
->max_bio_size
= cpu_to_be32(max_bio_size
);
948 p
->queue_order_type
= cpu_to_be16(q_order_type
);
949 p
->dds_flags
= cpu_to_be16(flags
);
950 return drbd_send_command(mdev
, sock
, P_SIZES
, sizeof(*p
), NULL
, 0);
954 * drbd_send_current_state() - Sends the drbd state to the peer
955 * @mdev: DRBD device.
957 int drbd_send_current_state(struct drbd_conf
*mdev
)
959 struct drbd_socket
*sock
;
962 sock
= &mdev
->tconn
->data
;
963 p
= drbd_prepare_command(mdev
, sock
);
966 p
->state
= cpu_to_be32(mdev
->state
.i
); /* Within the send mutex */
967 return drbd_send_command(mdev
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
971 * drbd_send_state() - After a state change, sends the new state to the peer
972 * @mdev: DRBD device.
973 * @state: the state to send, not necessarily the current state.
975 * Each state change queues an "after_state_ch" work, which will eventually
976 * send the resulting new state to the peer. If more state changes happen
977 * between queuing and processing of the after_state_ch work, we still
978 * want to send each intermediary state in the order it occurred.
980 int drbd_send_state(struct drbd_conf
*mdev
, union drbd_state state
)
982 struct drbd_socket
*sock
;
985 sock
= &mdev
->tconn
->data
;
986 p
= drbd_prepare_command(mdev
, sock
);
989 p
->state
= cpu_to_be32(state
.i
); /* Within the send mutex */
990 return drbd_send_command(mdev
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
993 int drbd_send_state_req(struct drbd_conf
*mdev
, union drbd_state mask
, union drbd_state val
)
995 struct drbd_socket
*sock
;
996 struct p_req_state
*p
;
998 sock
= &mdev
->tconn
->data
;
999 p
= drbd_prepare_command(mdev
, sock
);
1002 p
->mask
= cpu_to_be32(mask
.i
);
1003 p
->val
= cpu_to_be32(val
.i
);
1004 return drbd_send_command(mdev
, sock
, P_STATE_CHG_REQ
, sizeof(*p
), NULL
, 0);
1007 int conn_send_state_req(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
)
1009 enum drbd_packet cmd
;
1010 struct drbd_socket
*sock
;
1011 struct p_req_state
*p
;
1013 cmd
= tconn
->agreed_pro_version
< 100 ? P_STATE_CHG_REQ
: P_CONN_ST_CHG_REQ
;
1014 sock
= &tconn
->data
;
1015 p
= conn_prepare_command(tconn
, sock
);
1018 p
->mask
= cpu_to_be32(mask
.i
);
1019 p
->val
= cpu_to_be32(val
.i
);
1020 return conn_send_command(tconn
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1023 void drbd_send_sr_reply(struct drbd_conf
*mdev
, enum drbd_state_rv retcode
)
1025 struct drbd_socket
*sock
;
1026 struct p_req_state_reply
*p
;
1028 sock
= &mdev
->tconn
->meta
;
1029 p
= drbd_prepare_command(mdev
, sock
);
1031 p
->retcode
= cpu_to_be32(retcode
);
1032 drbd_send_command(mdev
, sock
, P_STATE_CHG_REPLY
, sizeof(*p
), NULL
, 0);
1036 void conn_send_sr_reply(struct drbd_tconn
*tconn
, enum drbd_state_rv retcode
)
1038 struct drbd_socket
*sock
;
1039 struct p_req_state_reply
*p
;
1040 enum drbd_packet cmd
= tconn
->agreed_pro_version
< 100 ? P_STATE_CHG_REPLY
: P_CONN_ST_CHG_REPLY
;
1042 sock
= &tconn
->meta
;
1043 p
= conn_prepare_command(tconn
, sock
);
1045 p
->retcode
= cpu_to_be32(retcode
);
1046 conn_send_command(tconn
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1050 static void dcbp_set_code(struct p_compressed_bm
*p
, enum drbd_bitmap_code code
)
1052 BUG_ON(code
& ~0xf);
1053 p
->encoding
= (p
->encoding
& ~0xf) | code
;
1056 static void dcbp_set_start(struct p_compressed_bm
*p
, int set
)
1058 p
->encoding
= (p
->encoding
& ~0x80) | (set
? 0x80 : 0);
1061 static void dcbp_set_pad_bits(struct p_compressed_bm
*p
, int n
)
1064 p
->encoding
= (p
->encoding
& (~0x7 << 4)) | (n
<< 4);
1067 int fill_bitmap_rle_bits(struct drbd_conf
*mdev
,
1068 struct p_compressed_bm
*p
,
1070 struct bm_xfer_ctx
*c
)
1072 struct bitstream bs
;
1073 unsigned long plain_bits
;
1080 /* may we use this feature? */
1082 use_rle
= rcu_dereference(mdev
->tconn
->net_conf
)->use_rle
;
1084 if (!use_rle
|| mdev
->tconn
->agreed_pro_version
< 90)
1087 if (c
->bit_offset
>= c
->bm_bits
)
1088 return 0; /* nothing to do. */
1090 /* use at most thus many bytes */
1091 bitstream_init(&bs
, p
->code
, size
, 0);
1092 memset(p
->code
, 0, size
);
1093 /* plain bits covered in this code string */
1096 /* p->encoding & 0x80 stores whether the first run length is set.
1097 * bit offset is implicit.
1098 * start with toggle == 2 to be able to tell the first iteration */
1101 /* see how much plain bits we can stuff into one packet
1102 * using RLE and VLI. */
1104 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(mdev
, c
->bit_offset
)
1105 : _drbd_bm_find_next(mdev
, c
->bit_offset
);
1108 rl
= tmp
- c
->bit_offset
;
1110 if (toggle
== 2) { /* first iteration */
1112 /* the first checked bit was set,
1113 * store start value, */
1114 dcbp_set_start(p
, 1);
1115 /* but skip encoding of zero run length */
1119 dcbp_set_start(p
, 0);
1122 /* paranoia: catch zero runlength.
1123 * can only happen if bitmap is modified while we scan it. */
1125 dev_err(DEV
, "unexpected zero runlength while encoding bitmap "
1126 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
1130 bits
= vli_encode_bits(&bs
, rl
);
1131 if (bits
== -ENOBUFS
) /* buffer full */
1134 dev_err(DEV
, "error while encoding bitmap: %d\n", bits
);
1140 c
->bit_offset
= tmp
;
1141 } while (c
->bit_offset
< c
->bm_bits
);
1143 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
1145 if (plain_bits
< (len
<< 3)) {
1146 /* incompressible with this method.
1147 * we need to rewind both word and bit position. */
1148 c
->bit_offset
-= plain_bits
;
1149 bm_xfer_ctx_bit_to_word_offset(c
);
1150 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1154 /* RLE + VLI was able to compress it just fine.
1155 * update c->word_offset. */
1156 bm_xfer_ctx_bit_to_word_offset(c
);
1158 /* store pad_bits */
1159 dcbp_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
1165 * send_bitmap_rle_or_plain
1167 * Return 0 when done, 1 when another iteration is needed, and a negative error
1168 * code upon failure.
1171 send_bitmap_rle_or_plain(struct drbd_conf
*mdev
, struct bm_xfer_ctx
*c
)
1173 struct drbd_socket
*sock
= &mdev
->tconn
->data
;
1174 unsigned int header_size
= drbd_header_size(mdev
->tconn
);
1175 struct p_compressed_bm
*p
= sock
->sbuf
+ header_size
;
1178 len
= fill_bitmap_rle_bits(mdev
, p
,
1179 DRBD_SOCKET_BUFFER_SIZE
- header_size
- sizeof(*p
), c
);
1184 dcbp_set_code(p
, RLE_VLI_Bits
);
1185 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
,
1186 P_COMPRESSED_BITMAP
, sizeof(*p
) + len
,
1189 c
->bytes
[0] += header_size
+ sizeof(*p
) + len
;
1191 if (c
->bit_offset
>= c
->bm_bits
)
1194 /* was not compressible.
1195 * send a buffer full of plain text bits instead. */
1196 unsigned int data_size
;
1197 unsigned long num_words
;
1198 unsigned long *p
= sock
->sbuf
+ header_size
;
1200 data_size
= DRBD_SOCKET_BUFFER_SIZE
- header_size
;
1201 num_words
= min_t(size_t, data_size
/ sizeof(*p
),
1202 c
->bm_words
- c
->word_offset
);
1203 len
= num_words
* sizeof(*p
);
1205 drbd_bm_get_lel(mdev
, c
->word_offset
, num_words
, p
);
1206 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, P_BITMAP
, len
, NULL
, 0);
1207 c
->word_offset
+= num_words
;
1208 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1211 c
->bytes
[1] += header_size
+ len
;
1213 if (c
->bit_offset
> c
->bm_bits
)
1214 c
->bit_offset
= c
->bm_bits
;
1218 INFO_bm_xfer_stats(mdev
, "send", c
);
1226 /* See the comment at receive_bitmap() */
1227 static int _drbd_send_bitmap(struct drbd_conf
*mdev
)
1229 struct bm_xfer_ctx c
;
1232 if (!expect(mdev
->bitmap
))
1235 if (get_ldev(mdev
)) {
1236 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
1237 dev_info(DEV
, "Writing the whole bitmap, MDF_FullSync was set.\n");
1238 drbd_bm_set_all(mdev
);
1239 if (drbd_bm_write(mdev
)) {
1240 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1241 * but otherwise process as per normal - need to tell other
1242 * side that a full resync is required! */
1243 dev_err(DEV
, "Failed to write bitmap to disk!\n");
1245 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
1252 c
= (struct bm_xfer_ctx
) {
1253 .bm_bits
= drbd_bm_bits(mdev
),
1254 .bm_words
= drbd_bm_words(mdev
),
1258 err
= send_bitmap_rle_or_plain(mdev
, &c
);
1264 int drbd_send_bitmap(struct drbd_conf
*mdev
)
1266 struct drbd_socket
*sock
= &mdev
->tconn
->data
;
1269 mutex_lock(&sock
->mutex
);
1271 err
= !_drbd_send_bitmap(mdev
);
1272 mutex_unlock(&sock
->mutex
);
1276 void drbd_send_b_ack(struct drbd_tconn
*tconn
, u32 barrier_nr
, u32 set_size
)
1278 struct drbd_socket
*sock
;
1279 struct p_barrier_ack
*p
;
1281 if (tconn
->cstate
< C_WF_REPORT_PARAMS
)
1284 sock
= &tconn
->meta
;
1285 p
= conn_prepare_command(tconn
, sock
);
1288 p
->barrier
= barrier_nr
;
1289 p
->set_size
= cpu_to_be32(set_size
);
1290 conn_send_command(tconn
, sock
, P_BARRIER_ACK
, sizeof(*p
), NULL
, 0);
1294 * _drbd_send_ack() - Sends an ack packet
1295 * @mdev: DRBD device.
1296 * @cmd: Packet command code.
1297 * @sector: sector, needs to be in big endian byte order
1298 * @blksize: size in byte, needs to be in big endian byte order
1299 * @block_id: Id, big endian byte order
1301 static int _drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1302 u64 sector
, u32 blksize
, u64 block_id
)
1304 struct drbd_socket
*sock
;
1305 struct p_block_ack
*p
;
1307 if (mdev
->state
.conn
< C_CONNECTED
)
1310 sock
= &mdev
->tconn
->meta
;
1311 p
= drbd_prepare_command(mdev
, sock
);
1315 p
->block_id
= block_id
;
1316 p
->blksize
= blksize
;
1317 p
->seq_num
= cpu_to_be32(atomic_inc_return(&mdev
->packet_seq
));
1318 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1321 /* dp->sector and dp->block_id already/still in network byte order,
1322 * data_size is payload size according to dp->head,
1323 * and may need to be corrected for digest size. */
1324 void drbd_send_ack_dp(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1325 struct p_data
*dp
, int data_size
)
1327 if (mdev
->tconn
->peer_integrity_tfm
)
1328 data_size
-= crypto_hash_digestsize(mdev
->tconn
->peer_integrity_tfm
);
1329 _drbd_send_ack(mdev
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
1333 void drbd_send_ack_rp(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1334 struct p_block_req
*rp
)
1336 _drbd_send_ack(mdev
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
1340 * drbd_send_ack() - Sends an ack packet
1341 * @mdev: DRBD device
1342 * @cmd: packet command code
1343 * @peer_req: peer request
1345 int drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1346 struct drbd_peer_request
*peer_req
)
1348 return _drbd_send_ack(mdev
, cmd
,
1349 cpu_to_be64(peer_req
->i
.sector
),
1350 cpu_to_be32(peer_req
->i
.size
),
1351 peer_req
->block_id
);
1354 /* This function misuses the block_id field to signal if the blocks
1355 * are is sync or not. */
1356 int drbd_send_ack_ex(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1357 sector_t sector
, int blksize
, u64 block_id
)
1359 return _drbd_send_ack(mdev
, cmd
,
1360 cpu_to_be64(sector
),
1361 cpu_to_be32(blksize
),
1362 cpu_to_be64(block_id
));
1365 int drbd_send_drequest(struct drbd_conf
*mdev
, int cmd
,
1366 sector_t sector
, int size
, u64 block_id
)
1368 struct drbd_socket
*sock
;
1369 struct p_block_req
*p
;
1371 sock
= &mdev
->tconn
->data
;
1372 p
= drbd_prepare_command(mdev
, sock
);
1375 p
->sector
= cpu_to_be64(sector
);
1376 p
->block_id
= block_id
;
1377 p
->blksize
= cpu_to_be32(size
);
1378 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1381 int drbd_send_drequest_csum(struct drbd_conf
*mdev
, sector_t sector
, int size
,
1382 void *digest
, int digest_size
, enum drbd_packet cmd
)
1384 struct drbd_socket
*sock
;
1385 struct p_block_req
*p
;
1387 /* FIXME: Put the digest into the preallocated socket buffer. */
1389 sock
= &mdev
->tconn
->data
;
1390 p
= drbd_prepare_command(mdev
, sock
);
1393 p
->sector
= cpu_to_be64(sector
);
1394 p
->block_id
= ID_SYNCER
/* unused */;
1395 p
->blksize
= cpu_to_be32(size
);
1396 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
),
1397 digest
, digest_size
);
1400 int drbd_send_ov_request(struct drbd_conf
*mdev
, sector_t sector
, int size
)
1402 struct drbd_socket
*sock
;
1403 struct p_block_req
*p
;
1405 sock
= &mdev
->tconn
->data
;
1406 p
= drbd_prepare_command(mdev
, sock
);
1409 p
->sector
= cpu_to_be64(sector
);
1410 p
->block_id
= ID_SYNCER
/* unused */;
1411 p
->blksize
= cpu_to_be32(size
);
1412 return drbd_send_command(mdev
, sock
, P_OV_REQUEST
, sizeof(*p
), NULL
, 0);
1415 /* called on sndtimeo
1416 * returns false if we should retry,
1417 * true if we think connection is dead
1419 static int we_should_drop_the_connection(struct drbd_tconn
*tconn
, struct socket
*sock
)
1422 /* long elapsed = (long)(jiffies - mdev->last_received); */
1424 drop_it
= tconn
->meta
.socket
== sock
1425 || !tconn
->asender
.task
1426 || get_t_state(&tconn
->asender
) != RUNNING
1427 || tconn
->cstate
< C_WF_REPORT_PARAMS
;
1432 drop_it
= !--tconn
->ko_count
;
1434 conn_err(tconn
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1435 current
->comm
, current
->pid
, tconn
->ko_count
);
1436 request_ping(tconn
);
1439 return drop_it
; /* && (mdev->state == R_PRIMARY) */;
1442 static void drbd_update_congested(struct drbd_tconn
*tconn
)
1444 struct sock
*sk
= tconn
->data
.socket
->sk
;
1445 if (sk
->sk_wmem_queued
> sk
->sk_sndbuf
* 4 / 5)
1446 set_bit(NET_CONGESTED
, &tconn
->flags
);
1449 /* The idea of sendpage seems to be to put some kind of reference
1450 * to the page into the skb, and to hand it over to the NIC. In
1451 * this process get_page() gets called.
1453 * As soon as the page was really sent over the network put_page()
1454 * gets called by some part of the network layer. [ NIC driver? ]
1456 * [ get_page() / put_page() increment/decrement the count. If count
1457 * reaches 0 the page will be freed. ]
1459 * This works nicely with pages from FSs.
1460 * But this means that in protocol A we might signal IO completion too early!
1462 * In order not to corrupt data during a resync we must make sure
1463 * that we do not reuse our own buffer pages (EEs) to early, therefore
1464 * we have the net_ee list.
1466 * XFS seems to have problems, still, it submits pages with page_count == 0!
1467 * As a workaround, we disable sendpage on pages
1468 * with page_count == 0 or PageSlab.
1470 static int _drbd_no_send_page(struct drbd_conf
*mdev
, struct page
*page
,
1471 int offset
, size_t size
, unsigned msg_flags
)
1473 struct socket
*socket
;
1477 socket
= mdev
->tconn
->data
.socket
;
1478 addr
= kmap(page
) + offset
;
1479 err
= drbd_send_all(mdev
->tconn
, socket
, addr
, size
, msg_flags
);
1482 mdev
->send_cnt
+= size
>> 9;
1486 static int _drbd_send_page(struct drbd_conf
*mdev
, struct page
*page
,
1487 int offset
, size_t size
, unsigned msg_flags
)
1489 struct socket
*socket
= mdev
->tconn
->data
.socket
;
1490 mm_segment_t oldfs
= get_fs();
1494 /* e.g. XFS meta- & log-data is in slab pages, which have a
1495 * page_count of 0 and/or have PageSlab() set.
1496 * we cannot use send_page for those, as that does get_page();
1497 * put_page(); and would cause either a VM_BUG directly, or
1498 * __page_cache_release a page that would actually still be referenced
1499 * by someone, leading to some obscure delayed Oops somewhere else. */
1500 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
1501 return _drbd_no_send_page(mdev
, page
, offset
, size
, msg_flags
);
1503 msg_flags
|= MSG_NOSIGNAL
;
1504 drbd_update_congested(mdev
->tconn
);
1509 sent
= socket
->ops
->sendpage(socket
, page
, offset
, len
, msg_flags
);
1511 if (sent
== -EAGAIN
) {
1512 if (we_should_drop_the_connection(mdev
->tconn
, socket
))
1516 dev_warn(DEV
, "%s: size=%d len=%d sent=%d\n",
1517 __func__
, (int)size
, len
, sent
);
1524 } while (len
> 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1526 clear_bit(NET_CONGESTED
, &mdev
->tconn
->flags
);
1530 mdev
->send_cnt
+= size
>> 9;
1535 static int _drbd_send_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
1537 struct bio_vec
*bvec
;
1539 /* hint all but last page with MSG_MORE */
1540 bio_for_each_segment(bvec
, bio
, i
) {
1543 err
= _drbd_no_send_page(mdev
, bvec
->bv_page
,
1544 bvec
->bv_offset
, bvec
->bv_len
,
1545 i
== bio
->bi_vcnt
- 1 ? 0 : MSG_MORE
);
1552 static int _drbd_send_zc_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
1554 struct bio_vec
*bvec
;
1556 /* hint all but last page with MSG_MORE */
1557 bio_for_each_segment(bvec
, bio
, i
) {
1560 err
= _drbd_send_page(mdev
, bvec
->bv_page
,
1561 bvec
->bv_offset
, bvec
->bv_len
,
1562 i
== bio
->bi_vcnt
- 1 ? 0 : MSG_MORE
);
1569 static int _drbd_send_zc_ee(struct drbd_conf
*mdev
,
1570 struct drbd_peer_request
*peer_req
)
1572 struct page
*page
= peer_req
->pages
;
1573 unsigned len
= peer_req
->i
.size
;
1576 /* hint all but last page with MSG_MORE */
1577 page_chain_for_each(page
) {
1578 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
1580 err
= _drbd_send_page(mdev
, page
, 0, l
,
1581 page_chain_next(page
) ? MSG_MORE
: 0);
1589 static u32
bio_flags_to_wire(struct drbd_conf
*mdev
, unsigned long bi_rw
)
1591 if (mdev
->tconn
->agreed_pro_version
>= 95)
1592 return (bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0) |
1593 (bi_rw
& REQ_FUA
? DP_FUA
: 0) |
1594 (bi_rw
& REQ_FLUSH
? DP_FLUSH
: 0) |
1595 (bi_rw
& REQ_DISCARD
? DP_DISCARD
: 0);
1597 return bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0;
1600 /* Used to send write requests
1601 * R_PRIMARY -> Peer (P_DATA)
1603 int drbd_send_dblock(struct drbd_conf
*mdev
, struct drbd_request
*req
)
1605 struct drbd_socket
*sock
;
1607 unsigned int dp_flags
= 0;
1611 sock
= &mdev
->tconn
->data
;
1612 p
= drbd_prepare_command(mdev
, sock
);
1613 dgs
= mdev
->tconn
->integrity_tfm
? crypto_hash_digestsize(mdev
->tconn
->integrity_tfm
) : 0;
1617 p
->sector
= cpu_to_be64(req
->i
.sector
);
1618 p
->block_id
= (unsigned long)req
;
1619 p
->seq_num
= cpu_to_be32(atomic_inc_return(&mdev
->packet_seq
));
1620 dp_flags
= bio_flags_to_wire(mdev
, req
->master_bio
->bi_rw
);
1621 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1622 mdev
->state
.conn
<= C_PAUSED_SYNC_T
)
1623 dp_flags
|= DP_MAY_SET_IN_SYNC
;
1624 if (mdev
->tconn
->agreed_pro_version
>= 100) {
1625 if (req
->rq_state
& RQ_EXP_RECEIVE_ACK
)
1626 dp_flags
|= DP_SEND_RECEIVE_ACK
;
1627 if (req
->rq_state
& RQ_EXP_WRITE_ACK
)
1628 dp_flags
|= DP_SEND_WRITE_ACK
;
1630 p
->dp_flags
= cpu_to_be32(dp_flags
);
1632 drbd_csum_bio(mdev
, mdev
->tconn
->integrity_tfm
, req
->master_bio
, p
+ 1);
1633 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, P_DATA
, sizeof(*p
) + dgs
, NULL
, req
->i
.size
);
1635 /* For protocol A, we have to memcpy the payload into
1636 * socket buffers, as we may complete right away
1637 * as soon as we handed it over to tcp, at which point the data
1638 * pages may become invalid.
1640 * For data-integrity enabled, we copy it as well, so we can be
1641 * sure that even if the bio pages may still be modified, it
1642 * won't change the data on the wire, thus if the digest checks
1643 * out ok after sending on this side, but does not fit on the
1644 * receiving side, we sure have detected corruption elsewhere.
1646 if (!(req
->rq_state
& (RQ_EXP_RECEIVE_ACK
| RQ_EXP_WRITE_ACK
)) || dgs
)
1647 err
= _drbd_send_bio(mdev
, req
->master_bio
);
1649 err
= _drbd_send_zc_bio(mdev
, req
->master_bio
);
1651 /* double check digest, sometimes buffers have been modified in flight. */
1652 if (dgs
> 0 && dgs
<= 64) {
1653 /* 64 byte, 512 bit, is the largest digest size
1654 * currently supported in kernel crypto. */
1655 unsigned char digest
[64];
1656 drbd_csum_bio(mdev
, mdev
->tconn
->integrity_tfm
, req
->master_bio
, digest
);
1657 if (memcmp(p
+ 1, digest
, dgs
)) {
1659 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1660 (unsigned long long)req
->i
.sector
, req
->i
.size
);
1662 } /* else if (dgs > 64) {
1663 ... Be noisy about digest too large ...
1666 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1671 /* answer packet, used to send data back for read requests:
1672 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1673 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1675 int drbd_send_block(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1676 struct drbd_peer_request
*peer_req
)
1678 struct drbd_socket
*sock
;
1683 sock
= &mdev
->tconn
->data
;
1684 p
= drbd_prepare_command(mdev
, sock
);
1686 dgs
= mdev
->tconn
->integrity_tfm
? crypto_hash_digestsize(mdev
->tconn
->integrity_tfm
) : 0;
1690 p
->sector
= cpu_to_be64(peer_req
->i
.sector
);
1691 p
->block_id
= peer_req
->block_id
;
1692 p
->seq_num
= 0; /* unused */
1695 drbd_csum_ee(mdev
, mdev
->tconn
->integrity_tfm
, peer_req
, p
+ 1);
1696 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, cmd
, sizeof(*p
) + dgs
, NULL
, peer_req
->i
.size
);
1698 err
= _drbd_send_zc_ee(mdev
, peer_req
);
1699 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1704 int drbd_send_out_of_sync(struct drbd_conf
*mdev
, struct drbd_request
*req
)
1706 struct drbd_socket
*sock
;
1707 struct p_block_desc
*p
;
1709 sock
= &mdev
->tconn
->data
;
1710 p
= drbd_prepare_command(mdev
, sock
);
1713 p
->sector
= cpu_to_be64(req
->i
.sector
);
1714 p
->blksize
= cpu_to_be32(req
->i
.size
);
1715 return drbd_send_command(mdev
, sock
, P_OUT_OF_SYNC
, sizeof(*p
), NULL
, 0);
1719 drbd_send distinguishes two cases:
1721 Packets sent via the data socket "sock"
1722 and packets sent via the meta data socket "msock"
1725 -----------------+-------------------------+------------------------------
1726 timeout conf.timeout / 2 conf.timeout / 2
1727 timeout action send a ping via msock Abort communication
1728 and close all sockets
1732 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1734 int drbd_send(struct drbd_tconn
*tconn
, struct socket
*sock
,
1735 void *buf
, size_t size
, unsigned msg_flags
)
1744 /* THINK if (signal_pending) return ... ? */
1749 msg
.msg_name
= NULL
;
1750 msg
.msg_namelen
= 0;
1751 msg
.msg_control
= NULL
;
1752 msg
.msg_controllen
= 0;
1753 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
1755 if (sock
== tconn
->data
.socket
) {
1757 tconn
->ko_count
= rcu_dereference(tconn
->net_conf
)->ko_count
;
1759 drbd_update_congested(tconn
);
1763 * tcp_sendmsg does _not_ use its size parameter at all ?
1765 * -EAGAIN on timeout, -EINTR on signal.
1768 * do we need to block DRBD_SIG if sock == &meta.socket ??
1769 * otherwise wake_asender() might interrupt some send_*Ack !
1771 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
1772 if (rv
== -EAGAIN
) {
1773 if (we_should_drop_the_connection(tconn
, sock
))
1779 flush_signals(current
);
1787 } while (sent
< size
);
1789 if (sock
== tconn
->data
.socket
)
1790 clear_bit(NET_CONGESTED
, &tconn
->flags
);
1793 if (rv
!= -EAGAIN
) {
1794 conn_err(tconn
, "%s_sendmsg returned %d\n",
1795 sock
== tconn
->meta
.socket
? "msock" : "sock",
1797 conn_request_state(tconn
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
1799 conn_request_state(tconn
, NS(conn
, C_TIMEOUT
), CS_HARD
);
1806 * drbd_send_all - Send an entire buffer
1808 * Returns 0 upon success and a negative error value otherwise.
1810 int drbd_send_all(struct drbd_tconn
*tconn
, struct socket
*sock
, void *buffer
,
1811 size_t size
, unsigned msg_flags
)
1815 err
= drbd_send(tconn
, sock
, buffer
, size
, msg_flags
);
1823 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
1825 struct drbd_conf
*mdev
= bdev
->bd_disk
->private_data
;
1826 unsigned long flags
;
1829 mutex_lock(&drbd_main_mutex
);
1830 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
1831 /* to have a stable mdev->state.role
1832 * and no race with updating open_cnt */
1834 if (mdev
->state
.role
!= R_PRIMARY
) {
1835 if (mode
& FMODE_WRITE
)
1837 else if (!allow_oos
)
1843 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
1844 mutex_unlock(&drbd_main_mutex
);
1849 static int drbd_release(struct gendisk
*gd
, fmode_t mode
)
1851 struct drbd_conf
*mdev
= gd
->private_data
;
1852 mutex_lock(&drbd_main_mutex
);
1854 mutex_unlock(&drbd_main_mutex
);
1858 static void drbd_set_defaults(struct drbd_conf
*mdev
)
1860 /* Beware! The actual layout differs
1861 * between big endian and little endian */
1862 mdev
->state
= (union drbd_dev_state
) {
1863 { .role
= R_SECONDARY
,
1865 .conn
= C_STANDALONE
,
1871 void drbd_init_set_defaults(struct drbd_conf
*mdev
)
1873 /* the memset(,0,) did most of this.
1874 * note: only assignments, no allocation in here */
1876 drbd_set_defaults(mdev
);
1878 atomic_set(&mdev
->ap_bio_cnt
, 0);
1879 atomic_set(&mdev
->ap_pending_cnt
, 0);
1880 atomic_set(&mdev
->rs_pending_cnt
, 0);
1881 atomic_set(&mdev
->unacked_cnt
, 0);
1882 atomic_set(&mdev
->local_cnt
, 0);
1883 atomic_set(&mdev
->pp_in_use_by_net
, 0);
1884 atomic_set(&mdev
->rs_sect_in
, 0);
1885 atomic_set(&mdev
->rs_sect_ev
, 0);
1886 atomic_set(&mdev
->ap_in_flight
, 0);
1887 atomic_set(&mdev
->md_io_in_use
, 0);
1889 mutex_init(&mdev
->own_state_mutex
);
1890 mdev
->state_mutex
= &mdev
->own_state_mutex
;
1892 spin_lock_init(&mdev
->al_lock
);
1893 spin_lock_init(&mdev
->peer_seq_lock
);
1895 INIT_LIST_HEAD(&mdev
->active_ee
);
1896 INIT_LIST_HEAD(&mdev
->sync_ee
);
1897 INIT_LIST_HEAD(&mdev
->done_ee
);
1898 INIT_LIST_HEAD(&mdev
->read_ee
);
1899 INIT_LIST_HEAD(&mdev
->net_ee
);
1900 INIT_LIST_HEAD(&mdev
->resync_reads
);
1901 INIT_LIST_HEAD(&mdev
->resync_work
.list
);
1902 INIT_LIST_HEAD(&mdev
->unplug_work
.list
);
1903 INIT_LIST_HEAD(&mdev
->go_diskless
.list
);
1904 INIT_LIST_HEAD(&mdev
->md_sync_work
.list
);
1905 INIT_LIST_HEAD(&mdev
->start_resync_work
.list
);
1906 INIT_LIST_HEAD(&mdev
->bm_io_work
.w
.list
);
1908 mdev
->resync_work
.cb
= w_resync_timer
;
1909 mdev
->unplug_work
.cb
= w_send_write_hint
;
1910 mdev
->go_diskless
.cb
= w_go_diskless
;
1911 mdev
->md_sync_work
.cb
= w_md_sync
;
1912 mdev
->bm_io_work
.w
.cb
= w_bitmap_io
;
1913 mdev
->start_resync_work
.cb
= w_start_resync
;
1915 mdev
->resync_work
.mdev
= mdev
;
1916 mdev
->unplug_work
.mdev
= mdev
;
1917 mdev
->go_diskless
.mdev
= mdev
;
1918 mdev
->md_sync_work
.mdev
= mdev
;
1919 mdev
->bm_io_work
.w
.mdev
= mdev
;
1920 mdev
->start_resync_work
.mdev
= mdev
;
1922 init_timer(&mdev
->resync_timer
);
1923 init_timer(&mdev
->md_sync_timer
);
1924 init_timer(&mdev
->start_resync_timer
);
1925 init_timer(&mdev
->request_timer
);
1926 mdev
->resync_timer
.function
= resync_timer_fn
;
1927 mdev
->resync_timer
.data
= (unsigned long) mdev
;
1928 mdev
->md_sync_timer
.function
= md_sync_timer_fn
;
1929 mdev
->md_sync_timer
.data
= (unsigned long) mdev
;
1930 mdev
->start_resync_timer
.function
= start_resync_timer_fn
;
1931 mdev
->start_resync_timer
.data
= (unsigned long) mdev
;
1932 mdev
->request_timer
.function
= request_timer_fn
;
1933 mdev
->request_timer
.data
= (unsigned long) mdev
;
1935 init_waitqueue_head(&mdev
->misc_wait
);
1936 init_waitqueue_head(&mdev
->state_wait
);
1937 init_waitqueue_head(&mdev
->ee_wait
);
1938 init_waitqueue_head(&mdev
->al_wait
);
1939 init_waitqueue_head(&mdev
->seq_wait
);
1941 mdev
->resync_wenr
= LC_FREE
;
1942 mdev
->peer_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
1943 mdev
->local_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
1946 void drbd_mdev_cleanup(struct drbd_conf
*mdev
)
1949 if (mdev
->tconn
->receiver
.t_state
!= NONE
)
1950 dev_err(DEV
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1951 mdev
->tconn
->receiver
.t_state
);
1962 mdev
->rs_failed
= 0;
1963 mdev
->rs_last_events
= 0;
1964 mdev
->rs_last_sect_ev
= 0;
1965 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1966 mdev
->rs_mark_left
[i
] = 0;
1967 mdev
->rs_mark_time
[i
] = 0;
1969 D_ASSERT(mdev
->tconn
->net_conf
== NULL
);
1971 drbd_set_my_capacity(mdev
, 0);
1973 /* maybe never allocated. */
1974 drbd_bm_resize(mdev
, 0, 1);
1975 drbd_bm_cleanup(mdev
);
1978 drbd_free_bc(mdev
->ldev
);
1981 clear_bit(AL_SUSPENDED
, &mdev
->flags
);
1983 D_ASSERT(list_empty(&mdev
->active_ee
));
1984 D_ASSERT(list_empty(&mdev
->sync_ee
));
1985 D_ASSERT(list_empty(&mdev
->done_ee
));
1986 D_ASSERT(list_empty(&mdev
->read_ee
));
1987 D_ASSERT(list_empty(&mdev
->net_ee
));
1988 D_ASSERT(list_empty(&mdev
->resync_reads
));
1989 D_ASSERT(list_empty(&mdev
->tconn
->sender_work
.q
));
1990 D_ASSERT(list_empty(&mdev
->resync_work
.list
));
1991 D_ASSERT(list_empty(&mdev
->unplug_work
.list
));
1992 D_ASSERT(list_empty(&mdev
->go_diskless
.list
));
1994 drbd_set_defaults(mdev
);
1998 static void drbd_destroy_mempools(void)
2002 while (drbd_pp_pool
) {
2003 page
= drbd_pp_pool
;
2004 drbd_pp_pool
= (struct page
*)page_private(page
);
2009 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2011 if (drbd_md_io_bio_set
)
2012 bioset_free(drbd_md_io_bio_set
);
2013 if (drbd_md_io_page_pool
)
2014 mempool_destroy(drbd_md_io_page_pool
);
2015 if (drbd_ee_mempool
)
2016 mempool_destroy(drbd_ee_mempool
);
2017 if (drbd_request_mempool
)
2018 mempool_destroy(drbd_request_mempool
);
2020 kmem_cache_destroy(drbd_ee_cache
);
2021 if (drbd_request_cache
)
2022 kmem_cache_destroy(drbd_request_cache
);
2023 if (drbd_bm_ext_cache
)
2024 kmem_cache_destroy(drbd_bm_ext_cache
);
2025 if (drbd_al_ext_cache
)
2026 kmem_cache_destroy(drbd_al_ext_cache
);
2028 drbd_md_io_bio_set
= NULL
;
2029 drbd_md_io_page_pool
= NULL
;
2030 drbd_ee_mempool
= NULL
;
2031 drbd_request_mempool
= NULL
;
2032 drbd_ee_cache
= NULL
;
2033 drbd_request_cache
= NULL
;
2034 drbd_bm_ext_cache
= NULL
;
2035 drbd_al_ext_cache
= NULL
;
2040 static int drbd_create_mempools(void)
2043 const int number
= (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
;
2046 /* prepare our caches and mempools */
2047 drbd_request_mempool
= NULL
;
2048 drbd_ee_cache
= NULL
;
2049 drbd_request_cache
= NULL
;
2050 drbd_bm_ext_cache
= NULL
;
2051 drbd_al_ext_cache
= NULL
;
2052 drbd_pp_pool
= NULL
;
2053 drbd_md_io_page_pool
= NULL
;
2054 drbd_md_io_bio_set
= NULL
;
2057 drbd_request_cache
= kmem_cache_create(
2058 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2059 if (drbd_request_cache
== NULL
)
2062 drbd_ee_cache
= kmem_cache_create(
2063 "drbd_ee", sizeof(struct drbd_peer_request
), 0, 0, NULL
);
2064 if (drbd_ee_cache
== NULL
)
2067 drbd_bm_ext_cache
= kmem_cache_create(
2068 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2069 if (drbd_bm_ext_cache
== NULL
)
2072 drbd_al_ext_cache
= kmem_cache_create(
2073 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2074 if (drbd_al_ext_cache
== NULL
)
2078 drbd_md_io_bio_set
= bioset_create(DRBD_MIN_POOL_PAGES
, 0);
2079 if (drbd_md_io_bio_set
== NULL
)
2082 drbd_md_io_page_pool
= mempool_create_page_pool(DRBD_MIN_POOL_PAGES
, 0);
2083 if (drbd_md_io_page_pool
== NULL
)
2086 drbd_request_mempool
= mempool_create(number
,
2087 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
2088 if (drbd_request_mempool
== NULL
)
2091 drbd_ee_mempool
= mempool_create(number
,
2092 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
2093 if (drbd_ee_mempool
== NULL
)
2096 /* drbd's page pool */
2097 spin_lock_init(&drbd_pp_lock
);
2099 for (i
= 0; i
< number
; i
++) {
2100 page
= alloc_page(GFP_HIGHUSER
);
2103 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2104 drbd_pp_pool
= page
;
2106 drbd_pp_vacant
= number
;
2111 drbd_destroy_mempools(); /* in case we allocated some */
2115 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
2118 /* just so we have it. you never know what interesting things we
2119 * might want to do here some day...
2125 static struct notifier_block drbd_notifier
= {
2126 .notifier_call
= drbd_notify_sys
,
2129 static void drbd_release_all_peer_reqs(struct drbd_conf
*mdev
)
2133 rr
= drbd_free_peer_reqs(mdev
, &mdev
->active_ee
);
2135 dev_err(DEV
, "%d EEs in active list found!\n", rr
);
2137 rr
= drbd_free_peer_reqs(mdev
, &mdev
->sync_ee
);
2139 dev_err(DEV
, "%d EEs in sync list found!\n", rr
);
2141 rr
= drbd_free_peer_reqs(mdev
, &mdev
->read_ee
);
2143 dev_err(DEV
, "%d EEs in read list found!\n", rr
);
2145 rr
= drbd_free_peer_reqs(mdev
, &mdev
->done_ee
);
2147 dev_err(DEV
, "%d EEs in done list found!\n", rr
);
2149 rr
= drbd_free_peer_reqs(mdev
, &mdev
->net_ee
);
2151 dev_err(DEV
, "%d EEs in net list found!\n", rr
);
2154 /* caution. no locking. */
2155 void drbd_minor_destroy(struct kref
*kref
)
2157 struct drbd_conf
*mdev
= container_of(kref
, struct drbd_conf
, kref
);
2158 struct drbd_tconn
*tconn
= mdev
->tconn
;
2160 del_timer_sync(&mdev
->request_timer
);
2162 /* paranoia asserts */
2163 D_ASSERT(mdev
->open_cnt
== 0);
2164 /* end paranoia asserts */
2166 /* cleanup stuff that may have been allocated during
2167 * device (re-)configuration or state changes */
2169 if (mdev
->this_bdev
)
2170 bdput(mdev
->this_bdev
);
2172 drbd_free_bc(mdev
->ldev
);
2175 drbd_release_all_peer_reqs(mdev
);
2177 lc_destroy(mdev
->act_log
);
2178 lc_destroy(mdev
->resync
);
2180 kfree(mdev
->p_uuid
);
2181 /* mdev->p_uuid = NULL; */
2183 if (mdev
->bitmap
) /* should no longer be there. */
2184 drbd_bm_cleanup(mdev
);
2185 __free_page(mdev
->md_io_page
);
2186 put_disk(mdev
->vdisk
);
2187 blk_cleanup_queue(mdev
->rq_queue
);
2188 kfree(mdev
->rs_plan_s
);
2191 kref_put(&tconn
->kref
, &conn_destroy
);
2194 /* One global retry thread, if we need to push back some bio and have it
2195 * reinserted through our make request function.
2197 static struct retry_worker
{
2198 struct workqueue_struct
*wq
;
2199 struct work_struct worker
;
2202 struct list_head writes
;
2205 static void do_retry(struct work_struct
*ws
)
2207 struct retry_worker
*retry
= container_of(ws
, struct retry_worker
, worker
);
2209 struct drbd_request
*req
, *tmp
;
2211 spin_lock_irq(&retry
->lock
);
2212 list_splice_init(&retry
->writes
, &writes
);
2213 spin_unlock_irq(&retry
->lock
);
2215 list_for_each_entry_safe(req
, tmp
, &writes
, tl_requests
) {
2216 struct drbd_conf
*mdev
= req
->w
.mdev
;
2217 struct bio
*bio
= req
->master_bio
;
2218 unsigned long start_time
= req
->start_time
;
2220 /* We have exclusive access to this request object.
2221 * If it had not been RQ_POSTPONED, the code path which queued
2222 * it here would have completed and freed it already.
2224 mempool_free(req
, drbd_request_mempool
);
2226 /* A single suspended or otherwise blocking device may stall
2227 * all others as well. Fortunately, this code path is to
2228 * recover from a situation that "should not happen":
2229 * concurrent writes in multi-primary setup.
2230 * In a "normal" lifecycle, this workqueue is supposed to be
2231 * destroyed without ever doing anything.
2232 * If it turns out to be an issue anyways, we can do per
2233 * resource (replication group) or per device (minor) retry
2234 * workqueues instead.
2237 /* We are not just doing generic_make_request(),
2238 * as we want to keep the start_time information. */
2240 __drbd_make_request(mdev
, bio
, start_time
);
2244 void drbd_restart_request(struct drbd_request
*req
)
2246 unsigned long flags
;
2247 spin_lock_irqsave(&retry
.lock
, flags
);
2248 list_move_tail(&req
->tl_requests
, &retry
.writes
);
2249 spin_unlock_irqrestore(&retry
.lock
, flags
);
2251 /* Drop the extra reference that would otherwise
2252 * have been dropped by complete_master_bio.
2253 * do_retry() needs to grab a new one. */
2254 dec_ap_bio(req
->w
.mdev
);
2256 queue_work(retry
.wq
, &retry
.worker
);
2260 static void drbd_cleanup(void)
2263 struct drbd_conf
*mdev
;
2264 struct drbd_tconn
*tconn
, *tmp
;
2266 unregister_reboot_notifier(&drbd_notifier
);
2268 /* first remove proc,
2269 * drbdsetup uses it's presence to detect
2270 * whether DRBD is loaded.
2271 * If we would get stuck in proc removal,
2272 * but have netlink already deregistered,
2273 * some drbdsetup commands may wait forever
2277 remove_proc_entry("drbd", NULL
);
2280 destroy_workqueue(retry
.wq
);
2282 drbd_genl_unregister();
2284 idr_for_each_entry(&minors
, mdev
, i
) {
2285 idr_remove(&minors
, mdev_to_minor(mdev
));
2286 idr_remove(&mdev
->tconn
->volumes
, mdev
->vnr
);
2287 del_gendisk(mdev
->vdisk
);
2288 /* synchronize_rcu(); No other threads running at this point */
2289 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
2292 /* not _rcu since, no other updater anymore. Genl already unregistered */
2293 list_for_each_entry_safe(tconn
, tmp
, &drbd_tconns
, all_tconn
) {
2294 list_del(&tconn
->all_tconn
); /* not _rcu no proc, not other threads */
2295 /* synchronize_rcu(); */
2296 kref_put(&tconn
->kref
, &conn_destroy
);
2299 drbd_destroy_mempools();
2300 unregister_blkdev(DRBD_MAJOR
, "drbd");
2302 idr_destroy(&minors
);
2304 printk(KERN_INFO
"drbd: module cleanup done.\n");
2308 * drbd_congested() - Callback for pdflush
2309 * @congested_data: User data
2310 * @bdi_bits: Bits pdflush is currently interested in
2312 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2314 static int drbd_congested(void *congested_data
, int bdi_bits
)
2316 struct drbd_conf
*mdev
= congested_data
;
2317 struct request_queue
*q
;
2321 if (!may_inc_ap_bio(mdev
)) {
2322 /* DRBD has frozen IO */
2328 if (get_ldev(mdev
)) {
2329 q
= bdev_get_queue(mdev
->ldev
->backing_bdev
);
2330 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
2336 if (bdi_bits
& (1 << BDI_async_congested
) && test_bit(NET_CONGESTED
, &mdev
->tconn
->flags
)) {
2337 r
|= (1 << BDI_async_congested
);
2338 reason
= reason
== 'b' ? 'a' : 'n';
2342 mdev
->congestion_reason
= reason
;
2346 static void drbd_init_workqueue(struct drbd_work_queue
* wq
)
2348 spin_lock_init(&wq
->q_lock
);
2349 INIT_LIST_HEAD(&wq
->q
);
2350 init_waitqueue_head(&wq
->q_wait
);
2353 struct drbd_tconn
*conn_get_by_name(const char *name
)
2355 struct drbd_tconn
*tconn
;
2357 if (!name
|| !name
[0])
2361 list_for_each_entry_rcu(tconn
, &drbd_tconns
, all_tconn
) {
2362 if (!strcmp(tconn
->name
, name
)) {
2363 kref_get(&tconn
->kref
);
2373 struct drbd_tconn
*conn_get_by_addrs(void *my_addr
, int my_addr_len
,
2374 void *peer_addr
, int peer_addr_len
)
2376 struct drbd_tconn
*tconn
;
2379 list_for_each_entry_rcu(tconn
, &drbd_tconns
, all_tconn
) {
2380 if (tconn
->my_addr_len
== my_addr_len
&&
2381 tconn
->peer_addr_len
== peer_addr_len
&&
2382 !memcmp(&tconn
->my_addr
, my_addr
, my_addr_len
) &&
2383 !memcmp(&tconn
->peer_addr
, peer_addr
, peer_addr_len
)) {
2384 kref_get(&tconn
->kref
);
2394 static int drbd_alloc_socket(struct drbd_socket
*socket
)
2396 socket
->rbuf
= (void *) __get_free_page(GFP_KERNEL
);
2399 socket
->sbuf
= (void *) __get_free_page(GFP_KERNEL
);
2405 static void drbd_free_socket(struct drbd_socket
*socket
)
2407 free_page((unsigned long) socket
->sbuf
);
2408 free_page((unsigned long) socket
->rbuf
);
2411 void conn_free_crypto(struct drbd_tconn
*tconn
)
2413 drbd_free_sock(tconn
);
2415 crypto_free_hash(tconn
->csums_tfm
);
2416 crypto_free_hash(tconn
->verify_tfm
);
2417 crypto_free_hash(tconn
->cram_hmac_tfm
);
2418 crypto_free_hash(tconn
->integrity_tfm
);
2419 crypto_free_hash(tconn
->peer_integrity_tfm
);
2420 kfree(tconn
->int_dig_in
);
2421 kfree(tconn
->int_dig_vv
);
2423 tconn
->csums_tfm
= NULL
;
2424 tconn
->verify_tfm
= NULL
;
2425 tconn
->cram_hmac_tfm
= NULL
;
2426 tconn
->integrity_tfm
= NULL
;
2427 tconn
->peer_integrity_tfm
= NULL
;
2428 tconn
->int_dig_in
= NULL
;
2429 tconn
->int_dig_vv
= NULL
;
2432 int set_resource_options(struct drbd_tconn
*tconn
, struct res_opts
*res_opts
)
2434 cpumask_var_t new_cpu_mask
;
2437 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
))
2440 retcode = ERR_NOMEM;
2441 drbd_msg_put_info("unable to allocate cpumask");
2444 /* silently ignore cpu mask on UP kernel */
2445 if (nr_cpu_ids
> 1 && res_opts
->cpu_mask
[0] != 0) {
2446 /* FIXME: Get rid of constant 32 here */
2447 err
= bitmap_parse(res_opts
->cpu_mask
, 32,
2448 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2450 conn_warn(tconn
, "bitmap_parse() failed with %d\n", err
);
2451 /* retcode = ERR_CPU_MASK_PARSE; */
2455 tconn
->res_opts
= *res_opts
;
2456 if (!cpumask_equal(tconn
->cpu_mask
, new_cpu_mask
)) {
2457 cpumask_copy(tconn
->cpu_mask
, new_cpu_mask
);
2458 drbd_calc_cpu_mask(tconn
);
2459 tconn
->receiver
.reset_cpu_mask
= 1;
2460 tconn
->asender
.reset_cpu_mask
= 1;
2461 tconn
->worker
.reset_cpu_mask
= 1;
2466 free_cpumask_var(new_cpu_mask
);
2471 /* caller must be under genl_lock() */
2472 struct drbd_tconn
*conn_create(const char *name
, struct res_opts
*res_opts
)
2474 struct drbd_tconn
*tconn
;
2476 tconn
= kzalloc(sizeof(struct drbd_tconn
), GFP_KERNEL
);
2480 tconn
->name
= kstrdup(name
, GFP_KERNEL
);
2484 if (drbd_alloc_socket(&tconn
->data
))
2486 if (drbd_alloc_socket(&tconn
->meta
))
2489 if (!zalloc_cpumask_var(&tconn
->cpu_mask
, GFP_KERNEL
))
2492 if (set_resource_options(tconn
, res_opts
))
2495 tconn
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
2496 if (!tconn
->current_epoch
)
2499 INIT_LIST_HEAD(&tconn
->transfer_log
);
2501 INIT_LIST_HEAD(&tconn
->current_epoch
->list
);
2503 spin_lock_init(&tconn
->epoch_lock
);
2504 tconn
->write_ordering
= WO_bdev_flush
;
2506 tconn
->send
.seen_any_write_yet
= false;
2507 tconn
->send
.current_epoch_nr
= 0;
2508 tconn
->send
.current_epoch_writes
= 0;
2510 tconn
->cstate
= C_STANDALONE
;
2511 mutex_init(&tconn
->cstate_mutex
);
2512 spin_lock_init(&tconn
->req_lock
);
2513 mutex_init(&tconn
->conf_update
);
2514 init_waitqueue_head(&tconn
->ping_wait
);
2515 idr_init(&tconn
->volumes
);
2517 drbd_init_workqueue(&tconn
->sender_work
);
2518 mutex_init(&tconn
->data
.mutex
);
2519 mutex_init(&tconn
->meta
.mutex
);
2521 drbd_thread_init(tconn
, &tconn
->receiver
, drbdd_init
, "receiver");
2522 drbd_thread_init(tconn
, &tconn
->worker
, drbd_worker
, "worker");
2523 drbd_thread_init(tconn
, &tconn
->asender
, drbd_asender
, "asender");
2525 kref_init(&tconn
->kref
);
2526 list_add_tail_rcu(&tconn
->all_tconn
, &drbd_tconns
);
2531 kfree(tconn
->current_epoch
);
2532 free_cpumask_var(tconn
->cpu_mask
);
2533 drbd_free_socket(&tconn
->meta
);
2534 drbd_free_socket(&tconn
->data
);
2541 void conn_destroy(struct kref
*kref
)
2543 struct drbd_tconn
*tconn
= container_of(kref
, struct drbd_tconn
, kref
);
2545 if (atomic_read(&tconn
->current_epoch
->epoch_size
) != 0)
2546 conn_err(tconn
, "epoch_size:%d\n", atomic_read(&tconn
->current_epoch
->epoch_size
));
2547 kfree(tconn
->current_epoch
);
2549 idr_destroy(&tconn
->volumes
);
2551 free_cpumask_var(tconn
->cpu_mask
);
2552 drbd_free_socket(&tconn
->meta
);
2553 drbd_free_socket(&tconn
->data
);
2555 kfree(tconn
->int_dig_in
);
2556 kfree(tconn
->int_dig_vv
);
2560 enum drbd_ret_code
conn_new_minor(struct drbd_tconn
*tconn
, unsigned int minor
, int vnr
)
2562 struct drbd_conf
*mdev
;
2563 struct gendisk
*disk
;
2564 struct request_queue
*q
;
2566 int minor_got
= minor
;
2567 enum drbd_ret_code err
= ERR_NOMEM
;
2569 mdev
= minor_to_mdev(minor
);
2571 return ERR_MINOR_EXISTS
;
2573 /* GFP_KERNEL, we are outside of all write-out paths */
2574 mdev
= kzalloc(sizeof(struct drbd_conf
), GFP_KERNEL
);
2578 kref_get(&tconn
->kref
);
2579 mdev
->tconn
= tconn
;
2581 mdev
->minor
= minor
;
2584 drbd_init_set_defaults(mdev
);
2586 q
= blk_alloc_queue(GFP_KERNEL
);
2590 q
->queuedata
= mdev
;
2592 disk
= alloc_disk(1);
2597 set_disk_ro(disk
, true);
2600 disk
->major
= DRBD_MAJOR
;
2601 disk
->first_minor
= minor
;
2602 disk
->fops
= &drbd_ops
;
2603 sprintf(disk
->disk_name
, "drbd%d", minor
);
2604 disk
->private_data
= mdev
;
2606 mdev
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
2607 /* we have no partitions. we contain only ourselves. */
2608 mdev
->this_bdev
->bd_contains
= mdev
->this_bdev
;
2610 q
->backing_dev_info
.congested_fn
= drbd_congested
;
2611 q
->backing_dev_info
.congested_data
= mdev
;
2613 blk_queue_make_request(q
, drbd_make_request
);
2614 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2615 This triggers a max_bio_size message upon first attach or connect */
2616 blk_queue_max_hw_sectors(q
, DRBD_MAX_BIO_SIZE_SAFE
>> 8);
2617 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
2618 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
2619 q
->queue_lock
= &mdev
->tconn
->req_lock
; /* needed since we use */
2621 mdev
->md_io_page
= alloc_page(GFP_KERNEL
);
2622 if (!mdev
->md_io_page
)
2623 goto out_no_io_page
;
2625 if (drbd_bm_init(mdev
))
2627 mdev
->read_requests
= RB_ROOT
;
2628 mdev
->write_requests
= RB_ROOT
;
2630 if (!idr_pre_get(&minors
, GFP_KERNEL
))
2631 goto out_no_minor_idr
;
2632 if (idr_get_new_above(&minors
, mdev
, minor
, &minor_got
))
2633 goto out_no_minor_idr
;
2634 if (minor_got
!= minor
) {
2635 err
= ERR_MINOR_EXISTS
;
2636 drbd_msg_put_info("requested minor exists already");
2637 goto out_idr_remove_minor
;
2640 if (!idr_pre_get(&tconn
->volumes
, GFP_KERNEL
))
2641 goto out_idr_remove_minor
;
2642 if (idr_get_new_above(&tconn
->volumes
, mdev
, vnr
, &vnr_got
))
2643 goto out_idr_remove_minor
;
2644 if (vnr_got
!= vnr
) {
2645 err
= ERR_INVALID_REQUEST
;
2646 drbd_msg_put_info("requested volume exists already");
2647 goto out_idr_remove_vol
;
2650 kref_init(&mdev
->kref
); /* one ref for both idrs and the the add_disk */
2652 /* inherit the connection state */
2653 mdev
->state
.conn
= tconn
->cstate
;
2654 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2655 drbd_connected(mdev
);
2660 idr_remove(&tconn
->volumes
, vnr_got
);
2661 out_idr_remove_minor
:
2662 idr_remove(&minors
, minor_got
);
2665 drbd_bm_cleanup(mdev
);
2667 __free_page(mdev
->md_io_page
);
2671 blk_cleanup_queue(q
);
2674 kref_put(&tconn
->kref
, &conn_destroy
);
2678 int __init
drbd_init(void)
2682 if (minor_count
< DRBD_MINOR_COUNT_MIN
|| minor_count
> DRBD_MINOR_COUNT_MAX
) {
2684 "drbd: invalid minor_count (%d)\n", minor_count
);
2688 minor_count
= DRBD_MINOR_COUNT_DEF
;
2692 err
= register_blkdev(DRBD_MAJOR
, "drbd");
2695 "drbd: unable to register block device major %d\n",
2700 err
= drbd_genl_register();
2702 printk(KERN_ERR
"drbd: unable to register generic netlink family\n");
2707 register_reboot_notifier(&drbd_notifier
);
2710 * allocate all necessary structs
2714 init_waitqueue_head(&drbd_pp_wait
);
2716 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
2719 err
= drbd_create_mempools();
2723 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
2725 printk(KERN_ERR
"drbd: unable to register proc file\n");
2729 rwlock_init(&global_state_lock
);
2730 INIT_LIST_HEAD(&drbd_tconns
);
2732 retry
.wq
= create_singlethread_workqueue("drbd-reissue");
2734 printk(KERN_ERR
"drbd: unable to create retry workqueue\n");
2737 INIT_WORK(&retry
.worker
, do_retry
);
2738 spin_lock_init(&retry
.lock
);
2739 INIT_LIST_HEAD(&retry
.writes
);
2741 printk(KERN_INFO
"drbd: initialized. "
2742 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
2743 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
2744 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
2745 printk(KERN_INFO
"drbd: registered as block device major %d\n",
2748 return 0; /* Success! */
2753 /* currently always the case */
2754 printk(KERN_ERR
"drbd: ran out of memory\n");
2756 printk(KERN_ERR
"drbd: initialization failure\n");
2760 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
2765 blkdev_put(ldev
->backing_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2766 blkdev_put(ldev
->md_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2771 void drbd_free_sock(struct drbd_tconn
*tconn
)
2773 if (tconn
->data
.socket
) {
2774 mutex_lock(&tconn
->data
.mutex
);
2775 kernel_sock_shutdown(tconn
->data
.socket
, SHUT_RDWR
);
2776 sock_release(tconn
->data
.socket
);
2777 tconn
->data
.socket
= NULL
;
2778 mutex_unlock(&tconn
->data
.mutex
);
2780 if (tconn
->meta
.socket
) {
2781 mutex_lock(&tconn
->meta
.mutex
);
2782 kernel_sock_shutdown(tconn
->meta
.socket
, SHUT_RDWR
);
2783 sock_release(tconn
->meta
.socket
);
2784 tconn
->meta
.socket
= NULL
;
2785 mutex_unlock(&tconn
->meta
.mutex
);
2789 /* meta data management */
2791 struct meta_data_on_disk
{
2792 u64 la_size
; /* last agreed size. */
2793 u64 uuid
[UI_SIZE
]; /* UUIDs. */
2796 u32 flags
; /* MDF */
2799 u32 al_offset
; /* offset to this block */
2800 u32 al_nr_extents
; /* important for restoring the AL */
2801 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2802 u32 bm_offset
; /* offset to the bitmap, from here */
2803 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
2804 u32 la_peer_max_bio_size
; /* last peer max_bio_size */
2805 u32 reserved_u32
[3];
2810 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2811 * @mdev: DRBD device.
2813 void drbd_md_sync(struct drbd_conf
*mdev
)
2815 struct meta_data_on_disk
*buffer
;
2819 del_timer(&mdev
->md_sync_timer
);
2820 /* timer may be rearmed by drbd_md_mark_dirty() now. */
2821 if (!test_and_clear_bit(MD_DIRTY
, &mdev
->flags
))
2824 /* We use here D_FAILED and not D_ATTACHING because we try to write
2825 * metadata even if we detach due to a disk failure! */
2826 if (!get_ldev_if_state(mdev
, D_FAILED
))
2829 buffer
= drbd_md_get_buffer(mdev
);
2833 memset(buffer
, 0, 512);
2835 buffer
->la_size
= cpu_to_be64(drbd_get_capacity(mdev
->this_bdev
));
2836 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
2837 buffer
->uuid
[i
] = cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]);
2838 buffer
->flags
= cpu_to_be32(mdev
->ldev
->md
.flags
);
2839 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN
);
2841 buffer
->md_size_sect
= cpu_to_be32(mdev
->ldev
->md
.md_size_sect
);
2842 buffer
->al_offset
= cpu_to_be32(mdev
->ldev
->md
.al_offset
);
2843 buffer
->al_nr_extents
= cpu_to_be32(mdev
->act_log
->nr_elements
);
2844 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
2845 buffer
->device_uuid
= cpu_to_be64(mdev
->ldev
->md
.device_uuid
);
2847 buffer
->bm_offset
= cpu_to_be32(mdev
->ldev
->md
.bm_offset
);
2848 buffer
->la_peer_max_bio_size
= cpu_to_be32(mdev
->peer_max_bio_size
);
2850 D_ASSERT(drbd_md_ss__(mdev
, mdev
->ldev
) == mdev
->ldev
->md
.md_offset
);
2851 sector
= mdev
->ldev
->md
.md_offset
;
2853 if (drbd_md_sync_page_io(mdev
, mdev
->ldev
, sector
, WRITE
)) {
2854 /* this was a try anyways ... */
2855 dev_err(DEV
, "meta data update failed!\n");
2856 drbd_chk_io_error(mdev
, 1, true);
2859 /* Update mdev->ldev->md.la_size_sect,
2860 * since we updated it on metadata. */
2861 mdev
->ldev
->md
.la_size_sect
= drbd_get_capacity(mdev
->this_bdev
);
2863 drbd_md_put_buffer(mdev
);
2869 * drbd_md_read() - Reads in the meta data super block
2870 * @mdev: DRBD device.
2871 * @bdev: Device from which the meta data should be read in.
2873 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
2874 * something goes wrong.
2876 int drbd_md_read(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
)
2878 struct meta_data_on_disk
*buffer
;
2880 int i
, rv
= NO_ERROR
;
2882 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
2883 return ERR_IO_MD_DISK
;
2885 buffer
= drbd_md_get_buffer(mdev
);
2889 if (drbd_md_sync_page_io(mdev
, bdev
, bdev
->md
.md_offset
, READ
)) {
2890 /* NOTE: can't do normal error processing here as this is
2891 called BEFORE disk is attached */
2892 dev_err(DEV
, "Error while reading metadata.\n");
2893 rv
= ERR_IO_MD_DISK
;
2897 magic
= be32_to_cpu(buffer
->magic
);
2898 flags
= be32_to_cpu(buffer
->flags
);
2899 if (magic
== DRBD_MD_MAGIC_84_UNCLEAN
||
2900 (magic
== DRBD_MD_MAGIC_08
&& !(flags
& MDF_AL_CLEAN
))) {
2901 /* btw: that's Activity Log clean, not "all" clean. */
2902 dev_err(DEV
, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2903 rv
= ERR_MD_UNCLEAN
;
2906 if (magic
!= DRBD_MD_MAGIC_08
) {
2907 if (magic
== DRBD_MD_MAGIC_07
)
2908 dev_err(DEV
, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
2910 dev_err(DEV
, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
2911 rv
= ERR_MD_INVALID
;
2914 if (be32_to_cpu(buffer
->al_offset
) != bdev
->md
.al_offset
) {
2915 dev_err(DEV
, "unexpected al_offset: %d (expected %d)\n",
2916 be32_to_cpu(buffer
->al_offset
), bdev
->md
.al_offset
);
2917 rv
= ERR_MD_INVALID
;
2920 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
2921 dev_err(DEV
, "unexpected bm_offset: %d (expected %d)\n",
2922 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
2923 rv
= ERR_MD_INVALID
;
2926 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
2927 dev_err(DEV
, "unexpected md_size: %u (expected %u)\n",
2928 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
2929 rv
= ERR_MD_INVALID
;
2933 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
2934 dev_err(DEV
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2935 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
2936 rv
= ERR_MD_INVALID
;
2940 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size
);
2941 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
2942 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
2943 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
2944 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
2946 spin_lock_irq(&mdev
->tconn
->req_lock
);
2947 if (mdev
->state
.conn
< C_CONNECTED
) {
2949 peer
= be32_to_cpu(buffer
->la_peer_max_bio_size
);
2950 peer
= max_t(int, peer
, DRBD_MAX_BIO_SIZE_SAFE
);
2951 mdev
->peer_max_bio_size
= peer
;
2953 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2956 drbd_md_put_buffer(mdev
);
2964 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2965 * @mdev: DRBD device.
2967 * Call this function if you change anything that should be written to
2968 * the meta-data super block. This function sets MD_DIRTY, and starts a
2969 * timer that ensures that within five seconds you have to call drbd_md_sync().
2972 void drbd_md_mark_dirty_(struct drbd_conf
*mdev
, unsigned int line
, const char *func
)
2974 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
)) {
2975 mod_timer(&mdev
->md_sync_timer
, jiffies
+ HZ
);
2976 mdev
->last_md_mark_dirty
.line
= line
;
2977 mdev
->last_md_mark_dirty
.func
= func
;
2981 void drbd_md_mark_dirty(struct drbd_conf
*mdev
)
2983 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
))
2984 mod_timer(&mdev
->md_sync_timer
, jiffies
+ 5*HZ
);
2988 static void drbd_uuid_move_history(struct drbd_conf
*mdev
) __must_hold(local
)
2992 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++)
2993 mdev
->ldev
->md
.uuid
[i
+1] = mdev
->ldev
->md
.uuid
[i
];
2996 void _drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
2998 if (idx
== UI_CURRENT
) {
2999 if (mdev
->state
.role
== R_PRIMARY
)
3004 drbd_set_ed_uuid(mdev
, val
);
3007 mdev
->ldev
->md
.uuid
[idx
] = val
;
3008 drbd_md_mark_dirty(mdev
);
3012 void drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3014 if (mdev
->ldev
->md
.uuid
[idx
]) {
3015 drbd_uuid_move_history(mdev
);
3016 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[idx
];
3018 _drbd_uuid_set(mdev
, idx
, val
);
3022 * drbd_uuid_new_current() - Creates a new current UUID
3023 * @mdev: DRBD device.
3025 * Creates a new current UUID, and rotates the old current UUID into
3026 * the bitmap slot. Causes an incremental resync upon next connect.
3028 void drbd_uuid_new_current(struct drbd_conf
*mdev
) __must_hold(local
)
3031 unsigned long long bm_uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3034 dev_warn(DEV
, "bm UUID was already set: %llX\n", bm_uuid
);
3036 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = mdev
->ldev
->md
.uuid
[UI_CURRENT
];
3038 get_random_bytes(&val
, sizeof(u64
));
3039 _drbd_uuid_set(mdev
, UI_CURRENT
, val
);
3040 drbd_print_uuids(mdev
, "new current UUID");
3041 /* get it to stable storage _now_ */
3045 void drbd_uuid_set_bm(struct drbd_conf
*mdev
, u64 val
) __must_hold(local
)
3047 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3051 drbd_uuid_move_history(mdev
);
3052 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3053 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3055 unsigned long long bm_uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3057 dev_warn(DEV
, "bm UUID was already set: %llX\n", bm_uuid
);
3059 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = val
& ~((u64
)1);
3061 drbd_md_mark_dirty(mdev
);
3065 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3066 * @mdev: DRBD device.
3068 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3070 int drbd_bmio_set_n_write(struct drbd_conf
*mdev
)
3074 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3075 drbd_md_set_flag(mdev
, MDF_FULL_SYNC
);
3077 drbd_bm_set_all(mdev
);
3079 rv
= drbd_bm_write(mdev
);
3082 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
3093 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3094 * @mdev: DRBD device.
3096 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3098 int drbd_bmio_clear_n_write(struct drbd_conf
*mdev
)
3102 drbd_resume_al(mdev
);
3103 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3104 drbd_bm_clear_all(mdev
);
3105 rv
= drbd_bm_write(mdev
);
3112 static int w_bitmap_io(struct drbd_work
*w
, int unused
)
3114 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3115 struct drbd_conf
*mdev
= w
->mdev
;
3118 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
3120 if (get_ldev(mdev
)) {
3121 drbd_bm_lock(mdev
, work
->why
, work
->flags
);
3122 rv
= work
->io_fn(mdev
);
3123 drbd_bm_unlock(mdev
);
3127 clear_bit_unlock(BITMAP_IO
, &mdev
->flags
);
3128 wake_up(&mdev
->misc_wait
);
3131 work
->done(mdev
, rv
);
3133 clear_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3140 void drbd_ldev_destroy(struct drbd_conf
*mdev
)
3142 lc_destroy(mdev
->resync
);
3143 mdev
->resync
= NULL
;
3144 lc_destroy(mdev
->act_log
);
3145 mdev
->act_log
= NULL
;
3147 drbd_free_bc(mdev
->ldev
);
3148 mdev
->ldev
= NULL
;);
3150 clear_bit(GO_DISKLESS
, &mdev
->flags
);
3153 static int w_go_diskless(struct drbd_work
*w
, int unused
)
3155 struct drbd_conf
*mdev
= w
->mdev
;
3157 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3158 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3159 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3160 * the protected members anymore, though, so once put_ldev reaches zero
3161 * again, it will be safe to free them. */
3162 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
3166 void drbd_go_diskless(struct drbd_conf
*mdev
)
3168 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3169 if (!test_and_set_bit(GO_DISKLESS
, &mdev
->flags
))
3170 drbd_queue_work(&mdev
->tconn
->sender_work
, &mdev
->go_diskless
);
3174 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3175 * @mdev: DRBD device.
3176 * @io_fn: IO callback to be called when bitmap IO is possible
3177 * @done: callback to be called after the bitmap IO was performed
3178 * @why: Descriptive text of the reason for doing the IO
3180 * While IO on the bitmap happens we freeze application IO thus we ensure
3181 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3182 * called from worker context. It MUST NOT be used while a previous such
3183 * work is still pending!
3185 void drbd_queue_bitmap_io(struct drbd_conf
*mdev
,
3186 int (*io_fn
)(struct drbd_conf
*),
3187 void (*done
)(struct drbd_conf
*, int),
3188 char *why
, enum bm_flag flags
)
3190 D_ASSERT(current
== mdev
->tconn
->worker
.task
);
3192 D_ASSERT(!test_bit(BITMAP_IO_QUEUED
, &mdev
->flags
));
3193 D_ASSERT(!test_bit(BITMAP_IO
, &mdev
->flags
));
3194 D_ASSERT(list_empty(&mdev
->bm_io_work
.w
.list
));
3195 if (mdev
->bm_io_work
.why
)
3196 dev_err(DEV
, "FIXME going to queue '%s' but '%s' still pending?\n",
3197 why
, mdev
->bm_io_work
.why
);
3199 mdev
->bm_io_work
.io_fn
= io_fn
;
3200 mdev
->bm_io_work
.done
= done
;
3201 mdev
->bm_io_work
.why
= why
;
3202 mdev
->bm_io_work
.flags
= flags
;
3204 spin_lock_irq(&mdev
->tconn
->req_lock
);
3205 set_bit(BITMAP_IO
, &mdev
->flags
);
3206 if (atomic_read(&mdev
->ap_bio_cnt
) == 0) {
3207 if (!test_and_set_bit(BITMAP_IO_QUEUED
, &mdev
->flags
))
3208 drbd_queue_work(&mdev
->tconn
->sender_work
, &mdev
->bm_io_work
.w
);
3210 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3214 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3215 * @mdev: DRBD device.
3216 * @io_fn: IO callback to be called when bitmap IO is possible
3217 * @why: Descriptive text of the reason for doing the IO
3219 * freezes application IO while that the actual IO operations runs. This
3220 * functions MAY NOT be called from worker context.
3222 int drbd_bitmap_io(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*),
3223 char *why
, enum bm_flag flags
)
3227 D_ASSERT(current
!= mdev
->tconn
->worker
.task
);
3229 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3230 drbd_suspend_io(mdev
);
3232 drbd_bm_lock(mdev
, why
, flags
);
3234 drbd_bm_unlock(mdev
);
3236 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3237 drbd_resume_io(mdev
);
3242 void drbd_md_set_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3244 if ((mdev
->ldev
->md
.flags
& flag
) != flag
) {
3245 drbd_md_mark_dirty(mdev
);
3246 mdev
->ldev
->md
.flags
|= flag
;
3250 void drbd_md_clear_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3252 if ((mdev
->ldev
->md
.flags
& flag
) != 0) {
3253 drbd_md_mark_dirty(mdev
);
3254 mdev
->ldev
->md
.flags
&= ~flag
;
3257 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3259 return (bdev
->md
.flags
& flag
) != 0;
3262 static void md_sync_timer_fn(unsigned long data
)
3264 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
3266 drbd_queue_work_front(&mdev
->tconn
->sender_work
, &mdev
->md_sync_work
);
3269 static int w_md_sync(struct drbd_work
*w
, int unused
)
3271 struct drbd_conf
*mdev
= w
->mdev
;
3273 dev_warn(DEV
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3275 dev_warn(DEV
, "last md_mark_dirty: %s:%u\n",
3276 mdev
->last_md_mark_dirty
.func
, mdev
->last_md_mark_dirty
.line
);
3282 const char *cmdname(enum drbd_packet cmd
)
3284 /* THINK may need to become several global tables
3285 * when we want to support more than
3286 * one PRO_VERSION */
3287 static const char *cmdnames
[] = {
3289 [P_DATA_REPLY
] = "DataReply",
3290 [P_RS_DATA_REPLY
] = "RSDataReply",
3291 [P_BARRIER
] = "Barrier",
3292 [P_BITMAP
] = "ReportBitMap",
3293 [P_BECOME_SYNC_TARGET
] = "BecomeSyncTarget",
3294 [P_BECOME_SYNC_SOURCE
] = "BecomeSyncSource",
3295 [P_UNPLUG_REMOTE
] = "UnplugRemote",
3296 [P_DATA_REQUEST
] = "DataRequest",
3297 [P_RS_DATA_REQUEST
] = "RSDataRequest",
3298 [P_SYNC_PARAM
] = "SyncParam",
3299 [P_SYNC_PARAM89
] = "SyncParam89",
3300 [P_PROTOCOL
] = "ReportProtocol",
3301 [P_UUIDS
] = "ReportUUIDs",
3302 [P_SIZES
] = "ReportSizes",
3303 [P_STATE
] = "ReportState",
3304 [P_SYNC_UUID
] = "ReportSyncUUID",
3305 [P_AUTH_CHALLENGE
] = "AuthChallenge",
3306 [P_AUTH_RESPONSE
] = "AuthResponse",
3308 [P_PING_ACK
] = "PingAck",
3309 [P_RECV_ACK
] = "RecvAck",
3310 [P_WRITE_ACK
] = "WriteAck",
3311 [P_RS_WRITE_ACK
] = "RSWriteAck",
3312 [P_DISCARD_WRITE
] = "DiscardWrite",
3313 [P_NEG_ACK
] = "NegAck",
3314 [P_NEG_DREPLY
] = "NegDReply",
3315 [P_NEG_RS_DREPLY
] = "NegRSDReply",
3316 [P_BARRIER_ACK
] = "BarrierAck",
3317 [P_STATE_CHG_REQ
] = "StateChgRequest",
3318 [P_STATE_CHG_REPLY
] = "StateChgReply",
3319 [P_OV_REQUEST
] = "OVRequest",
3320 [P_OV_REPLY
] = "OVReply",
3321 [P_OV_RESULT
] = "OVResult",
3322 [P_CSUM_RS_REQUEST
] = "CsumRSRequest",
3323 [P_RS_IS_IN_SYNC
] = "CsumRSIsInSync",
3324 [P_COMPRESSED_BITMAP
] = "CBitmap",
3325 [P_DELAY_PROBE
] = "DelayProbe",
3326 [P_OUT_OF_SYNC
] = "OutOfSync",
3327 [P_RETRY_WRITE
] = "RetryWrite",
3328 [P_RS_CANCEL
] = "RSCancel",
3329 [P_CONN_ST_CHG_REQ
] = "conn_st_chg_req",
3330 [P_CONN_ST_CHG_REPLY
] = "conn_st_chg_reply",
3331 [P_RETRY_WRITE
] = "retry_write",
3332 [P_PROTOCOL_UPDATE
] = "protocol_update",
3334 /* enum drbd_packet, but not commands - obsoleted flags:
3340 /* too big for the array: 0xfffX */
3341 if (cmd
== P_INITIAL_META
)
3342 return "InitialMeta";
3343 if (cmd
== P_INITIAL_DATA
)
3344 return "InitialData";
3345 if (cmd
== P_CONNECTION_FEATURES
)
3346 return "ConnectionFeatures";
3347 if (cmd
>= ARRAY_SIZE(cmdnames
))
3349 return cmdnames
[cmd
];
3353 * drbd_wait_misc - wait for a request to make progress
3354 * @mdev: device associated with the request
3355 * @i: the struct drbd_interval embedded in struct drbd_request or
3356 * struct drbd_peer_request
3358 int drbd_wait_misc(struct drbd_conf
*mdev
, struct drbd_interval
*i
)
3360 struct net_conf
*nc
;
3365 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
3370 timeout
= nc
->ko_count
? nc
->timeout
* HZ
/ 10 * nc
->ko_count
: MAX_SCHEDULE_TIMEOUT
;
3373 /* Indicate to wake up mdev->misc_wait on progress. */
3375 prepare_to_wait(&mdev
->misc_wait
, &wait
, TASK_INTERRUPTIBLE
);
3376 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3377 timeout
= schedule_timeout(timeout
);
3378 finish_wait(&mdev
->misc_wait
, &wait
);
3379 spin_lock_irq(&mdev
->tconn
->req_lock
);
3380 if (!timeout
|| mdev
->state
.conn
< C_CONNECTED
)
3382 if (signal_pending(current
))
3383 return -ERESTARTSYS
;
3387 #ifdef CONFIG_DRBD_FAULT_INJECTION
3388 /* Fault insertion support including random number generator shamelessly
3389 * stolen from kernel/rcutorture.c */
3390 struct fault_random_state
{
3391 unsigned long state
;
3392 unsigned long count
;
3395 #define FAULT_RANDOM_MULT 39916801 /* prime */
3396 #define FAULT_RANDOM_ADD 479001701 /* prime */
3397 #define FAULT_RANDOM_REFRESH 10000
3400 * Crude but fast random-number generator. Uses a linear congruential
3401 * generator, with occasional help from get_random_bytes().
3403 static unsigned long
3404 _drbd_fault_random(struct fault_random_state
*rsp
)
3408 if (!rsp
->count
--) {
3409 get_random_bytes(&refresh
, sizeof(refresh
));
3410 rsp
->state
+= refresh
;
3411 rsp
->count
= FAULT_RANDOM_REFRESH
;
3413 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3414 return swahw32(rsp
->state
);
3418 _drbd_fault_str(unsigned int type
) {
3419 static char *_faults
[] = {
3420 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3421 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3422 [DRBD_FAULT_RS_WR
] = "Resync write",
3423 [DRBD_FAULT_RS_RD
] = "Resync read",
3424 [DRBD_FAULT_DT_WR
] = "Data write",
3425 [DRBD_FAULT_DT_RD
] = "Data read",
3426 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3427 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3428 [DRBD_FAULT_AL_EE
] = "EE allocation",
3429 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3432 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3436 _drbd_insert_fault(struct drbd_conf
*mdev
, unsigned int type
)
3438 static struct fault_random_state rrs
= {0, 0};
3440 unsigned int ret
= (
3442 ((1 << mdev_to_minor(mdev
)) & fault_devs
) != 0) &&
3443 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
3448 if (__ratelimit(&drbd_ratelimit_state
))
3449 dev_warn(DEV
, "***Simulating %s failure\n",
3450 _drbd_fault_str(type
));
3457 const char *drbd_buildtag(void)
3459 /* DRBD built from external sources has here a reference to the
3460 git hash of the source code. */
3462 static char buildtag
[38] = "\0uilt-in";
3464 if (buildtag
[0] == 0) {
3465 #ifdef CONFIG_MODULES
3466 if (THIS_MODULE
!= NULL
)
3467 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3476 module_init(drbd_init
)
3477 module_exit(drbd_cleanup
)
3479 EXPORT_SYMBOL(drbd_conn_str
);
3480 EXPORT_SYMBOL(drbd_role_str
);
3481 EXPORT_SYMBOL(drbd_disk_str
);
3482 EXPORT_SYMBOL(drbd_set_st_err_str
);