4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 static DEFINE_MUTEX(drbd_main_mutex
);
60 int drbdd_init(struct drbd_thread
*);
61 int drbd_worker(struct drbd_thread
*);
62 int drbd_asender(struct drbd_thread
*);
65 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
66 static int drbd_release(struct gendisk
*gd
, fmode_t mode
);
67 static int w_md_sync(struct drbd_work
*w
, int unused
);
68 static void md_sync_timer_fn(unsigned long data
);
69 static int w_bitmap_io(struct drbd_work
*w
, int unused
);
70 static int w_go_diskless(struct drbd_work
*w
, int unused
);
72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
75 MODULE_VERSION(REL_VERSION
);
76 MODULE_LICENSE("GPL");
77 MODULE_PARM_DESC(minor_count
, "Approximate number of drbd devices ("
78 __stringify(DRBD_MINOR_COUNT_MIN
) "-" __stringify(DRBD_MINOR_COUNT_MAX
) ")");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
81 #include <linux/moduleparam.h>
82 /* allow_open_on_secondary */
83 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
84 /* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86 module_param(minor_count
, uint
, 0444);
87 module_param(disable_sendpage
, bool, 0644);
88 module_param(allow_oos
, bool, 0);
89 module_param(proc_details
, int, 0644);
91 #ifdef CONFIG_DRBD_FAULT_INJECTION
94 static int fault_count
;
96 /* bitmap of enabled faults */
97 module_param(enable_faults
, int, 0664);
98 /* fault rate % value - applies to all enabled faults */
99 module_param(fault_rate
, int, 0664);
100 /* count of faults inserted */
101 module_param(fault_count
, int, 0664);
102 /* bitmap of devices to insert faults on */
103 module_param(fault_devs
, int, 0644);
106 /* module parameter, defined */
107 unsigned int minor_count
= DRBD_MINOR_COUNT_DEF
;
108 int disable_sendpage
;
110 int proc_details
; /* Detail level in proc drbd*/
112 /* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114 char usermode_helper
[80] = "/sbin/drbdadm";
116 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
122 struct list_head drbd_tconns
; /* list of struct drbd_tconn */
124 struct kmem_cache
*drbd_request_cache
;
125 struct kmem_cache
*drbd_ee_cache
; /* peer requests */
126 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
127 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
128 mempool_t
*drbd_request_mempool
;
129 mempool_t
*drbd_ee_mempool
;
130 mempool_t
*drbd_md_io_page_pool
;
131 struct bio_set
*drbd_md_io_bio_set
;
133 /* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
139 struct page
*drbd_pp_pool
;
140 spinlock_t drbd_pp_lock
;
142 wait_queue_head_t drbd_pp_wait
;
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
146 static const struct block_device_operations drbd_ops
= {
147 .owner
= THIS_MODULE
,
149 .release
= drbd_release
,
152 static void bio_destructor_drbd(struct bio
*bio
)
154 bio_free(bio
, drbd_md_io_bio_set
);
157 struct bio
*bio_alloc_drbd(gfp_t gfp_mask
)
161 if (!drbd_md_io_bio_set
)
162 return bio_alloc(gfp_mask
, 1);
164 bio
= bio_alloc_bioset(gfp_mask
, 1, drbd_md_io_bio_set
);
167 bio
->bi_destructor
= bio_destructor_drbd
;
172 /* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
175 int _get_ldev_if_state(struct drbd_conf
*mdev
, enum drbd_disk_state mins
)
179 atomic_inc(&mdev
->local_cnt
);
180 io_allowed
= (mdev
->state
.disk
>= mins
);
182 if (atomic_dec_and_test(&mdev
->local_cnt
))
183 wake_up(&mdev
->misc_wait
);
191 * DOC: The transfer log
193 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
194 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
195 * of the list. There is always at least one &struct drbd_tl_epoch object.
197 * Each &struct drbd_tl_epoch has a circular double linked list of requests
200 static int tl_init(struct drbd_tconn
*tconn
)
202 struct drbd_tl_epoch
*b
;
204 /* during device minor initialization, we may well use GFP_KERNEL */
205 b
= kmalloc(sizeof(struct drbd_tl_epoch
), GFP_KERNEL
);
208 INIT_LIST_HEAD(&b
->requests
);
209 INIT_LIST_HEAD(&b
->w
.list
);
213 b
->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215 tconn
->oldest_tle
= b
;
216 tconn
->newest_tle
= b
;
217 INIT_LIST_HEAD(&tconn
->out_of_sequence_requests
);
218 INIT_LIST_HEAD(&tconn
->barrier_acked_requests
);
223 static void tl_cleanup(struct drbd_tconn
*tconn
)
225 if (tconn
->oldest_tle
!= tconn
->newest_tle
)
226 conn_err(tconn
, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn
->out_of_sequence_requests
))
228 conn_err(tconn
, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn
->oldest_tle
);
230 tconn
->oldest_tle
= NULL
;
231 kfree(tconn
->unused_spare_tle
);
232 tconn
->unused_spare_tle
= NULL
;
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
240 * The caller must hold the req_lock.
242 void _tl_add_barrier(struct drbd_tconn
*tconn
, struct drbd_tl_epoch
*new)
244 struct drbd_tl_epoch
*newest_before
;
246 INIT_LIST_HEAD(&new->requests
);
247 INIT_LIST_HEAD(&new->w
.list
);
248 new->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
252 newest_before
= tconn
->newest_tle
;
253 new->br_number
= newest_before
->br_number
+1;
254 if (tconn
->newest_tle
!= new) {
255 tconn
->newest_tle
->next
= new;
256 tconn
->newest_tle
= new;
261 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
262 * @mdev: DRBD device.
263 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
264 * @set_size: Expected number of requests before that barrier.
266 * In case the passed barrier_nr or set_size does not match the oldest
267 * &struct drbd_tl_epoch objects this function will cause a termination
270 void tl_release(struct drbd_tconn
*tconn
, unsigned int barrier_nr
,
271 unsigned int set_size
)
273 struct drbd_conf
*mdev
;
274 struct drbd_tl_epoch
*b
, *nob
; /* next old barrier */
275 struct list_head
*le
, *tle
;
276 struct drbd_request
*r
;
278 spin_lock_irq(&tconn
->req_lock
);
280 b
= tconn
->oldest_tle
;
282 /* first some paranoia code */
284 conn_err(tconn
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
288 if (b
->br_number
!= barrier_nr
) {
289 conn_err(tconn
, "BAD! BarrierAck #%u received, expected #%u!\n",
290 barrier_nr
, b
->br_number
);
293 if (b
->n_writes
!= set_size
) {
294 conn_err(tconn
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
295 barrier_nr
, set_size
, b
->n_writes
);
299 /* Clean up list of requests processed during current epoch */
300 list_for_each_safe(le
, tle
, &b
->requests
) {
301 r
= list_entry(le
, struct drbd_request
, tl_requests
);
302 _req_mod(r
, BARRIER_ACKED
);
304 /* There could be requests on the list waiting for completion
305 of the write to the local disk. To avoid corruptions of
306 slab's data structures we have to remove the lists head.
308 Also there could have been a barrier ack out of sequence, overtaking
309 the write acks - which would be a bug and violating write ordering.
310 To not deadlock in case we lose connection while such requests are
311 still pending, we need some way to find them for the
312 _req_mode(CONNECTION_LOST_WHILE_PENDING).
314 These have been list_move'd to the out_of_sequence_requests list in
315 _req_mod(, BARRIER_ACKED) above.
317 list_splice_init(&b
->requests
, &tconn
->barrier_acked_requests
);
321 if (test_and_clear_bit(CREATE_BARRIER
, &tconn
->flags
)) {
322 _tl_add_barrier(tconn
, b
);
324 tconn
->oldest_tle
= nob
;
325 /* if nob == NULL b was the only barrier, and becomes the new
326 barrier. Therefore tconn->oldest_tle points already to b */
328 D_ASSERT(nob
!= NULL
);
329 tconn
->oldest_tle
= nob
;
333 spin_unlock_irq(&tconn
->req_lock
);
334 dec_ap_pending(mdev
);
339 spin_unlock_irq(&tconn
->req_lock
);
340 conn_request_state(tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
345 * _tl_restart() - Walks the transfer log, and applies an action to all requests
346 * @mdev: DRBD device.
347 * @what: The action/event to perform with all request objects
349 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
350 * RESTART_FROZEN_DISK_IO.
352 void _tl_restart(struct drbd_tconn
*tconn
, enum drbd_req_event what
)
354 struct drbd_tl_epoch
*b
, *tmp
, **pn
;
355 struct list_head
*le
, *tle
, carry_reads
;
356 struct drbd_request
*req
;
357 int rv
, n_writes
, n_reads
;
359 b
= tconn
->oldest_tle
;
360 pn
= &tconn
->oldest_tle
;
364 INIT_LIST_HEAD(&carry_reads
);
365 list_for_each_safe(le
, tle
, &b
->requests
) {
366 req
= list_entry(le
, struct drbd_request
, tl_requests
);
367 rv
= _req_mod(req
, what
);
377 if (what
== RESEND
) {
378 b
->n_writes
= n_writes
;
379 if (b
->w
.cb
== NULL
) {
380 b
->w
.cb
= w_send_barrier
;
381 inc_ap_pending(b
->w
.mdev
);
382 set_bit(CREATE_BARRIER
, &tconn
->flags
);
385 drbd_queue_work(&tconn
->data
.work
, &b
->w
);
390 list_add(&carry_reads
, &b
->requests
);
391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b
->requests
);
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
399 dec_ap_pending(b
->w
.mdev
);
401 if (b
== tconn
->newest_tle
) {
402 /* recycle, but reinit! */
404 conn_err(tconn
, "ASSERT FAILED tmp == NULL");
405 INIT_LIST_HEAD(&b
->requests
);
406 list_splice(&carry_reads
, &b
->requests
);
407 INIT_LIST_HEAD(&b
->w
.list
);
409 b
->br_number
= net_random();
419 list_splice(&carry_reads
, &b
->requests
);
422 /* Actions operating on the disk state, also want to work on
423 requests that got barrier acked. */
425 case FAIL_FROZEN_DISK_IO
:
426 case RESTART_FROZEN_DISK_IO
:
427 list_for_each_safe(le
, tle
, &tconn
->barrier_acked_requests
) {
428 req
= list_entry(le
, struct drbd_request
, tl_requests
);
431 case CONNECTION_LOST_WHILE_PENDING
:
435 conn_err(tconn
, "what = %d in _tl_restart()\n", what
);
440 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
441 * @mdev: DRBD device.
443 * This is called after the connection to the peer was lost. The storage covered
444 * by the requests on the transfer gets marked as our of sync. Called from the
445 * receiver thread and the worker thread.
447 void tl_clear(struct drbd_tconn
*tconn
)
449 struct list_head
*le
, *tle
;
450 struct drbd_request
*r
;
452 spin_lock_irq(&tconn
->req_lock
);
454 _tl_restart(tconn
, CONNECTION_LOST_WHILE_PENDING
);
456 /* we expect this list to be empty. */
457 if (!list_empty(&tconn
->out_of_sequence_requests
))
458 conn_err(tconn
, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
460 /* but just in case, clean it up anyways! */
461 list_for_each_safe(le
, tle
, &tconn
->out_of_sequence_requests
) {
462 r
= list_entry(le
, struct drbd_request
, tl_requests
);
463 /* It would be nice to complete outside of spinlock.
464 * But this is easier for now. */
465 _req_mod(r
, CONNECTION_LOST_WHILE_PENDING
);
468 /* ensure bit indicating barrier is required is clear */
469 clear_bit(CREATE_BARRIER
, &tconn
->flags
);
471 spin_unlock_irq(&tconn
->req_lock
);
474 void tl_restart(struct drbd_tconn
*tconn
, enum drbd_req_event what
)
476 spin_lock_irq(&tconn
->req_lock
);
477 _tl_restart(tconn
, what
);
478 spin_unlock_irq(&tconn
->req_lock
);
482 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
483 * @mdev: DRBD device.
485 void tl_abort_disk_io(struct drbd_conf
*mdev
)
487 struct drbd_tconn
*tconn
= mdev
->tconn
;
488 struct drbd_tl_epoch
*b
;
489 struct list_head
*le
, *tle
;
490 struct drbd_request
*req
;
492 spin_lock_irq(&tconn
->req_lock
);
493 b
= tconn
->oldest_tle
;
495 list_for_each_safe(le
, tle
, &b
->requests
) {
496 req
= list_entry(le
, struct drbd_request
, tl_requests
);
497 if (!(req
->rq_state
& RQ_LOCAL_PENDING
))
499 if (req
->w
.mdev
== mdev
)
500 _req_mod(req
, ABORT_DISK_IO
);
505 list_for_each_safe(le
, tle
, &tconn
->barrier_acked_requests
) {
506 req
= list_entry(le
, struct drbd_request
, tl_requests
);
507 if (!(req
->rq_state
& RQ_LOCAL_PENDING
))
509 if (req
->w
.mdev
== mdev
)
510 _req_mod(req
, ABORT_DISK_IO
);
513 spin_unlock_irq(&tconn
->req_lock
);
516 static int drbd_thread_setup(void *arg
)
518 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
519 struct drbd_tconn
*tconn
= thi
->tconn
;
523 snprintf(current
->comm
, sizeof(current
->comm
), "drbd_%c_%s",
524 thi
->name
[0], thi
->tconn
->name
);
527 retval
= thi
->function(thi
);
529 spin_lock_irqsave(&thi
->t_lock
, flags
);
531 /* if the receiver has been "EXITING", the last thing it did
532 * was set the conn state to "StandAlone",
533 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
534 * and receiver thread will be "started".
535 * drbd_thread_start needs to set "RESTARTING" in that case.
536 * t_state check and assignment needs to be within the same spinlock,
537 * so either thread_start sees EXITING, and can remap to RESTARTING,
538 * or thread_start see NONE, and can proceed as normal.
541 if (thi
->t_state
== RESTARTING
) {
542 conn_info(tconn
, "Restarting %s thread\n", thi
->name
);
543 thi
->t_state
= RUNNING
;
544 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
551 complete_all(&thi
->stop
);
552 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
554 conn_info(tconn
, "Terminating %s\n", current
->comm
);
556 /* Release mod reference taken when thread was started */
558 kref_put(&tconn
->kref
, &conn_destroy
);
559 module_put(THIS_MODULE
);
563 static void drbd_thread_init(struct drbd_tconn
*tconn
, struct drbd_thread
*thi
,
564 int (*func
) (struct drbd_thread
*), char *name
)
566 spin_lock_init(&thi
->t_lock
);
569 thi
->function
= func
;
571 strncpy(thi
->name
, name
, ARRAY_SIZE(thi
->name
));
574 int drbd_thread_start(struct drbd_thread
*thi
)
576 struct drbd_tconn
*tconn
= thi
->tconn
;
577 struct task_struct
*nt
;
580 /* is used from state engine doing drbd_thread_stop_nowait,
581 * while holding the req lock irqsave */
582 spin_lock_irqsave(&thi
->t_lock
, flags
);
584 switch (thi
->t_state
) {
586 conn_info(tconn
, "Starting %s thread (from %s [%d])\n",
587 thi
->name
, current
->comm
, current
->pid
);
589 /* Get ref on module for thread - this is released when thread exits */
590 if (!try_module_get(THIS_MODULE
)) {
591 conn_err(tconn
, "Failed to get module reference in drbd_thread_start\n");
592 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
596 kref_get(&thi
->tconn
->kref
);
598 init_completion(&thi
->stop
);
599 thi
->reset_cpu_mask
= 1;
600 thi
->t_state
= RUNNING
;
601 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
602 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
604 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
605 "drbd_%c_%s", thi
->name
[0], thi
->tconn
->name
);
608 conn_err(tconn
, "Couldn't start thread\n");
610 kref_put(&tconn
->kref
, &conn_destroy
);
611 module_put(THIS_MODULE
);
614 spin_lock_irqsave(&thi
->t_lock
, flags
);
616 thi
->t_state
= RUNNING
;
617 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
621 thi
->t_state
= RESTARTING
;
622 conn_info(tconn
, "Restarting %s thread (from %s [%d])\n",
623 thi
->name
, current
->comm
, current
->pid
);
628 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
636 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
640 enum drbd_thread_state ns
= restart
? RESTARTING
: EXITING
;
642 /* may be called from state engine, holding the req lock irqsave */
643 spin_lock_irqsave(&thi
->t_lock
, flags
);
645 if (thi
->t_state
== NONE
) {
646 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
648 drbd_thread_start(thi
);
652 if (thi
->t_state
!= ns
) {
653 if (thi
->task
== NULL
) {
654 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
660 init_completion(&thi
->stop
);
661 if (thi
->task
!= current
)
662 force_sig(DRBD_SIGKILL
, thi
->task
);
665 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
668 wait_for_completion(&thi
->stop
);
671 static struct drbd_thread
*drbd_task_to_thread(struct drbd_tconn
*tconn
, struct task_struct
*task
)
673 struct drbd_thread
*thi
=
674 task
== tconn
->receiver
.task
? &tconn
->receiver
:
675 task
== tconn
->asender
.task
? &tconn
->asender
:
676 task
== tconn
->worker
.task
? &tconn
->worker
: NULL
;
681 char *drbd_task_to_thread_name(struct drbd_tconn
*tconn
, struct task_struct
*task
)
683 struct drbd_thread
*thi
= drbd_task_to_thread(tconn
, task
);
684 return thi
? thi
->name
: task
->comm
;
687 int conn_lowest_minor(struct drbd_tconn
*tconn
)
689 struct drbd_conf
*mdev
;
693 mdev
= idr_get_next(&tconn
->volumes
, &vnr
);
694 m
= mdev
? mdev_to_minor(mdev
) : -1;
702 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
703 * @mdev: DRBD device.
705 * Forces all threads of a device onto the same CPU. This is beneficial for
706 * DRBD's performance. May be overwritten by user's configuration.
708 void drbd_calc_cpu_mask(struct drbd_tconn
*tconn
)
713 if (cpumask_weight(tconn
->cpu_mask
))
716 ord
= conn_lowest_minor(tconn
) % cpumask_weight(cpu_online_mask
);
717 for_each_online_cpu(cpu
) {
719 cpumask_set_cpu(cpu
, tconn
->cpu_mask
);
723 /* should not be reached */
724 cpumask_setall(tconn
->cpu_mask
);
728 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
729 * @mdev: DRBD device.
730 * @thi: drbd_thread object
732 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
735 void drbd_thread_current_set_cpu(struct drbd_thread
*thi
)
737 struct task_struct
*p
= current
;
739 if (!thi
->reset_cpu_mask
)
741 thi
->reset_cpu_mask
= 0;
742 set_cpus_allowed_ptr(p
, thi
->tconn
->cpu_mask
);
747 * drbd_header_size - size of a packet header
749 * The header size is a multiple of 8, so any payload following the header is
750 * word aligned on 64-bit architectures. (The bitmap send and receive code
753 unsigned int drbd_header_size(struct drbd_tconn
*tconn
)
755 if (tconn
->agreed_pro_version
>= 100) {
756 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100
), 8));
757 return sizeof(struct p_header100
);
759 BUILD_BUG_ON(sizeof(struct p_header80
) !=
760 sizeof(struct p_header95
));
761 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80
), 8));
762 return sizeof(struct p_header80
);
766 static unsigned int prepare_header80(struct p_header80
*h
, enum drbd_packet cmd
, int size
)
768 h
->magic
= cpu_to_be32(DRBD_MAGIC
);
769 h
->command
= cpu_to_be16(cmd
);
770 h
->length
= cpu_to_be16(size
);
771 return sizeof(struct p_header80
);
774 static unsigned int prepare_header95(struct p_header95
*h
, enum drbd_packet cmd
, int size
)
776 h
->magic
= cpu_to_be16(DRBD_MAGIC_BIG
);
777 h
->command
= cpu_to_be16(cmd
);
778 h
->length
= cpu_to_be32(size
);
779 return sizeof(struct p_header95
);
782 static unsigned int prepare_header100(struct p_header100
*h
, enum drbd_packet cmd
,
785 h
->magic
= cpu_to_be32(DRBD_MAGIC_100
);
786 h
->volume
= cpu_to_be16(vnr
);
787 h
->command
= cpu_to_be16(cmd
);
788 h
->length
= cpu_to_be32(size
);
790 return sizeof(struct p_header100
);
793 static unsigned int prepare_header(struct drbd_tconn
*tconn
, int vnr
,
794 void *buffer
, enum drbd_packet cmd
, int size
)
796 if (tconn
->agreed_pro_version
>= 100)
797 return prepare_header100(buffer
, cmd
, size
, vnr
);
798 else if (tconn
->agreed_pro_version
>= 95 &&
799 size
> DRBD_MAX_SIZE_H80_PACKET
)
800 return prepare_header95(buffer
, cmd
, size
);
802 return prepare_header80(buffer
, cmd
, size
);
805 static void *__conn_prepare_command(struct drbd_tconn
*tconn
,
806 struct drbd_socket
*sock
)
810 return sock
->sbuf
+ drbd_header_size(tconn
);
813 void *conn_prepare_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
)
817 mutex_lock(&sock
->mutex
);
818 p
= __conn_prepare_command(tconn
, sock
);
820 mutex_unlock(&sock
->mutex
);
825 void *drbd_prepare_command(struct drbd_conf
*mdev
, struct drbd_socket
*sock
)
827 return conn_prepare_command(mdev
->tconn
, sock
);
830 static int __send_command(struct drbd_tconn
*tconn
, int vnr
,
831 struct drbd_socket
*sock
, enum drbd_packet cmd
,
832 unsigned int header_size
, void *data
,
839 * Called with @data == NULL and the size of the data blocks in @size
840 * for commands that send data blocks. For those commands, omit the
841 * MSG_MORE flag: this will increase the likelihood that data blocks
842 * which are page aligned on the sender will end up page aligned on the
845 msg_flags
= data
? MSG_MORE
: 0;
847 header_size
+= prepare_header(tconn
, vnr
, sock
->sbuf
, cmd
,
849 err
= drbd_send_all(tconn
, sock
->socket
, sock
->sbuf
, header_size
,
852 err
= drbd_send_all(tconn
, sock
->socket
, data
, size
, 0);
856 static int __conn_send_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
,
857 enum drbd_packet cmd
, unsigned int header_size
,
858 void *data
, unsigned int size
)
860 return __send_command(tconn
, 0, sock
, cmd
, header_size
, data
, size
);
863 int conn_send_command(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
,
864 enum drbd_packet cmd
, unsigned int header_size
,
865 void *data
, unsigned int size
)
869 err
= __conn_send_command(tconn
, sock
, cmd
, header_size
, data
, size
);
870 mutex_unlock(&sock
->mutex
);
874 int drbd_send_command(struct drbd_conf
*mdev
, struct drbd_socket
*sock
,
875 enum drbd_packet cmd
, unsigned int header_size
,
876 void *data
, unsigned int size
)
880 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, cmd
, header_size
,
882 mutex_unlock(&sock
->mutex
);
886 int drbd_send_ping(struct drbd_tconn
*tconn
)
888 struct drbd_socket
*sock
;
891 if (!conn_prepare_command(tconn
, sock
))
893 return conn_send_command(tconn
, sock
, P_PING
, 0, NULL
, 0);
896 int drbd_send_ping_ack(struct drbd_tconn
*tconn
)
898 struct drbd_socket
*sock
;
901 if (!conn_prepare_command(tconn
, sock
))
903 return conn_send_command(tconn
, sock
, P_PING_ACK
, 0, NULL
, 0);
906 int drbd_send_sync_param(struct drbd_conf
*mdev
)
908 struct drbd_socket
*sock
;
909 struct p_rs_param_95
*p
;
911 const int apv
= mdev
->tconn
->agreed_pro_version
;
912 enum drbd_packet cmd
;
914 struct disk_conf
*dc
;
916 sock
= &mdev
->tconn
->data
;
917 p
= drbd_prepare_command(mdev
, sock
);
922 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
924 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
925 : apv
== 88 ? sizeof(struct p_rs_param
)
926 + strlen(nc
->verify_alg
) + 1
927 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
928 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
930 cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
932 /* initialize verify_alg and csums_alg */
933 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
935 if (get_ldev(mdev
)) {
936 dc
= rcu_dereference(mdev
->ldev
->disk_conf
);
937 p
->resync_rate
= cpu_to_be32(dc
->resync_rate
);
938 p
->c_plan_ahead
= cpu_to_be32(dc
->c_plan_ahead
);
939 p
->c_delay_target
= cpu_to_be32(dc
->c_delay_target
);
940 p
->c_fill_target
= cpu_to_be32(dc
->c_fill_target
);
941 p
->c_max_rate
= cpu_to_be32(dc
->c_max_rate
);
944 p
->resync_rate
= cpu_to_be32(DRBD_RESYNC_RATE_DEF
);
945 p
->c_plan_ahead
= cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF
);
946 p
->c_delay_target
= cpu_to_be32(DRBD_C_DELAY_TARGET_DEF
);
947 p
->c_fill_target
= cpu_to_be32(DRBD_C_FILL_TARGET_DEF
);
948 p
->c_max_rate
= cpu_to_be32(DRBD_C_MAX_RATE_DEF
);
952 strcpy(p
->verify_alg
, nc
->verify_alg
);
954 strcpy(p
->csums_alg
, nc
->csums_alg
);
957 return drbd_send_command(mdev
, sock
, cmd
, size
, NULL
, 0);
960 int __drbd_send_protocol(struct drbd_tconn
*tconn
, enum drbd_packet cmd
)
962 struct drbd_socket
*sock
;
963 struct p_protocol
*p
;
968 p
= __conn_prepare_command(tconn
, sock
);
973 nc
= rcu_dereference(tconn
->net_conf
);
975 if (nc
->tentative
&& tconn
->agreed_pro_version
< 92) {
977 mutex_unlock(&sock
->mutex
);
978 conn_err(tconn
, "--dry-run is not supported by peer");
983 if (tconn
->agreed_pro_version
>= 87)
984 size
+= strlen(nc
->integrity_alg
) + 1;
986 p
->protocol
= cpu_to_be32(nc
->wire_protocol
);
987 p
->after_sb_0p
= cpu_to_be32(nc
->after_sb_0p
);
988 p
->after_sb_1p
= cpu_to_be32(nc
->after_sb_1p
);
989 p
->after_sb_2p
= cpu_to_be32(nc
->after_sb_2p
);
990 p
->two_primaries
= cpu_to_be32(nc
->two_primaries
);
992 if (nc
->discard_my_data
)
993 cf
|= CF_DISCARD_MY_DATA
;
996 p
->conn_flags
= cpu_to_be32(cf
);
998 if (tconn
->agreed_pro_version
>= 87)
999 strcpy(p
->integrity_alg
, nc
->integrity_alg
);
1002 return __conn_send_command(tconn
, sock
, cmd
, size
, NULL
, 0);
1005 int drbd_send_protocol(struct drbd_tconn
*tconn
)
1009 mutex_lock(&tconn
->data
.mutex
);
1010 err
= __drbd_send_protocol(tconn
, P_PROTOCOL
);
1011 mutex_unlock(&tconn
->data
.mutex
);
1016 int _drbd_send_uuids(struct drbd_conf
*mdev
, u64 uuid_flags
)
1018 struct drbd_socket
*sock
;
1022 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
))
1025 sock
= &mdev
->tconn
->data
;
1026 p
= drbd_prepare_command(mdev
, sock
);
1031 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
1032 p
->uuid
[i
] = mdev
->ldev
? cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]) : 0;
1034 mdev
->comm_bm_set
= drbd_bm_total_weight(mdev
);
1035 p
->uuid
[UI_SIZE
] = cpu_to_be64(mdev
->comm_bm_set
);
1037 uuid_flags
|= rcu_dereference(mdev
->tconn
->net_conf
)->discard_my_data
? 1 : 0;
1039 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 2 : 0;
1040 uuid_flags
|= mdev
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
1041 p
->uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
1044 return drbd_send_command(mdev
, sock
, P_UUIDS
, sizeof(*p
), NULL
, 0);
1047 int drbd_send_uuids(struct drbd_conf
*mdev
)
1049 return _drbd_send_uuids(mdev
, 0);
1052 int drbd_send_uuids_skip_initial_sync(struct drbd_conf
*mdev
)
1054 return _drbd_send_uuids(mdev
, 8);
1057 void drbd_print_uuids(struct drbd_conf
*mdev
, const char *text
)
1059 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1060 u64
*uuid
= mdev
->ldev
->md
.uuid
;
1061 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX\n",
1063 (unsigned long long)uuid
[UI_CURRENT
],
1064 (unsigned long long)uuid
[UI_BITMAP
],
1065 (unsigned long long)uuid
[UI_HISTORY_START
],
1066 (unsigned long long)uuid
[UI_HISTORY_END
]);
1069 dev_info(DEV
, "%s effective data uuid: %016llX\n",
1071 (unsigned long long)mdev
->ed_uuid
);
1075 void drbd_gen_and_send_sync_uuid(struct drbd_conf
*mdev
)
1077 struct drbd_socket
*sock
;
1078 struct p_rs_uuid
*p
;
1081 D_ASSERT(mdev
->state
.disk
== D_UP_TO_DATE
);
1083 uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
1084 if (uuid
&& uuid
!= UUID_JUST_CREATED
)
1085 uuid
= uuid
+ UUID_NEW_BM_OFFSET
;
1087 get_random_bytes(&uuid
, sizeof(u64
));
1088 drbd_uuid_set(mdev
, UI_BITMAP
, uuid
);
1089 drbd_print_uuids(mdev
, "updated sync UUID");
1092 sock
= &mdev
->tconn
->data
;
1093 p
= drbd_prepare_command(mdev
, sock
);
1095 p
->uuid
= cpu_to_be64(uuid
);
1096 drbd_send_command(mdev
, sock
, P_SYNC_UUID
, sizeof(*p
), NULL
, 0);
1100 int drbd_send_sizes(struct drbd_conf
*mdev
, int trigger_reply
, enum dds_flags flags
)
1102 struct drbd_socket
*sock
;
1104 sector_t d_size
, u_size
;
1105 int q_order_type
, max_bio_size
;
1107 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1108 D_ASSERT(mdev
->ldev
->backing_bdev
);
1109 d_size
= drbd_get_max_capacity(mdev
->ldev
);
1111 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
1113 q_order_type
= drbd_queue_order_type(mdev
);
1114 max_bio_size
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
1115 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_BIO_SIZE
);
1120 q_order_type
= QUEUE_ORDERED_NONE
;
1121 max_bio_size
= DRBD_MAX_BIO_SIZE
; /* ... multiple BIOs per peer_request */
1124 sock
= &mdev
->tconn
->data
;
1125 p
= drbd_prepare_command(mdev
, sock
);
1129 if (mdev
->tconn
->agreed_pro_version
<= 94)
1130 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
1131 else if (mdev
->tconn
->agreed_pro_version
< 100)
1132 max_bio_size
= min_t(int, max_bio_size
, DRBD_MAX_BIO_SIZE_P95
);
1134 p
->d_size
= cpu_to_be64(d_size
);
1135 p
->u_size
= cpu_to_be64(u_size
);
1136 p
->c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(mdev
->this_bdev
));
1137 p
->max_bio_size
= cpu_to_be32(max_bio_size
);
1138 p
->queue_order_type
= cpu_to_be16(q_order_type
);
1139 p
->dds_flags
= cpu_to_be16(flags
);
1140 return drbd_send_command(mdev
, sock
, P_SIZES
, sizeof(*p
), NULL
, 0);
1144 * drbd_send_current_state() - Sends the drbd state to the peer
1145 * @mdev: DRBD device.
1147 int drbd_send_current_state(struct drbd_conf
*mdev
)
1149 struct drbd_socket
*sock
;
1152 sock
= &mdev
->tconn
->data
;
1153 p
= drbd_prepare_command(mdev
, sock
);
1156 p
->state
= cpu_to_be32(mdev
->state
.i
); /* Within the send mutex */
1157 return drbd_send_command(mdev
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
1161 * drbd_send_state() - After a state change, sends the new state to the peer
1162 * @mdev: DRBD device.
1163 * @state: the state to send, not necessarily the current state.
1165 * Each state change queues an "after_state_ch" work, which will eventually
1166 * send the resulting new state to the peer. If more state changes happen
1167 * between queuing and processing of the after_state_ch work, we still
1168 * want to send each intermediary state in the order it occurred.
1170 int drbd_send_state(struct drbd_conf
*mdev
, union drbd_state state
)
1172 struct drbd_socket
*sock
;
1175 sock
= &mdev
->tconn
->data
;
1176 p
= drbd_prepare_command(mdev
, sock
);
1179 p
->state
= cpu_to_be32(state
.i
); /* Within the send mutex */
1180 return drbd_send_command(mdev
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
1183 int drbd_send_state_req(struct drbd_conf
*mdev
, union drbd_state mask
, union drbd_state val
)
1185 struct drbd_socket
*sock
;
1186 struct p_req_state
*p
;
1188 sock
= &mdev
->tconn
->data
;
1189 p
= drbd_prepare_command(mdev
, sock
);
1192 p
->mask
= cpu_to_be32(mask
.i
);
1193 p
->val
= cpu_to_be32(val
.i
);
1194 return drbd_send_command(mdev
, sock
, P_STATE_CHG_REQ
, sizeof(*p
), NULL
, 0);
1197 int conn_send_state_req(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
)
1199 enum drbd_packet cmd
;
1200 struct drbd_socket
*sock
;
1201 struct p_req_state
*p
;
1203 cmd
= tconn
->agreed_pro_version
< 100 ? P_STATE_CHG_REQ
: P_CONN_ST_CHG_REQ
;
1204 sock
= &tconn
->data
;
1205 p
= conn_prepare_command(tconn
, sock
);
1208 p
->mask
= cpu_to_be32(mask
.i
);
1209 p
->val
= cpu_to_be32(val
.i
);
1210 return conn_send_command(tconn
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1213 void drbd_send_sr_reply(struct drbd_conf
*mdev
, enum drbd_state_rv retcode
)
1215 struct drbd_socket
*sock
;
1216 struct p_req_state_reply
*p
;
1218 sock
= &mdev
->tconn
->meta
;
1219 p
= drbd_prepare_command(mdev
, sock
);
1221 p
->retcode
= cpu_to_be32(retcode
);
1222 drbd_send_command(mdev
, sock
, P_STATE_CHG_REPLY
, sizeof(*p
), NULL
, 0);
1226 void conn_send_sr_reply(struct drbd_tconn
*tconn
, enum drbd_state_rv retcode
)
1228 struct drbd_socket
*sock
;
1229 struct p_req_state_reply
*p
;
1230 enum drbd_packet cmd
= tconn
->agreed_pro_version
< 100 ? P_STATE_CHG_REPLY
: P_CONN_ST_CHG_REPLY
;
1232 sock
= &tconn
->meta
;
1233 p
= conn_prepare_command(tconn
, sock
);
1235 p
->retcode
= cpu_to_be32(retcode
);
1236 conn_send_command(tconn
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1240 static void dcbp_set_code(struct p_compressed_bm
*p
, enum drbd_bitmap_code code
)
1242 BUG_ON(code
& ~0xf);
1243 p
->encoding
= (p
->encoding
& ~0xf) | code
;
1246 static void dcbp_set_start(struct p_compressed_bm
*p
, int set
)
1248 p
->encoding
= (p
->encoding
& ~0x80) | (set
? 0x80 : 0);
1251 static void dcbp_set_pad_bits(struct p_compressed_bm
*p
, int n
)
1254 p
->encoding
= (p
->encoding
& (~0x7 << 4)) | (n
<< 4);
1257 int fill_bitmap_rle_bits(struct drbd_conf
*mdev
,
1258 struct p_compressed_bm
*p
,
1260 struct bm_xfer_ctx
*c
)
1262 struct bitstream bs
;
1263 unsigned long plain_bits
;
1270 /* may we use this feature? */
1272 use_rle
= rcu_dereference(mdev
->tconn
->net_conf
)->use_rle
;
1274 if (!use_rle
|| mdev
->tconn
->agreed_pro_version
< 90)
1277 if (c
->bit_offset
>= c
->bm_bits
)
1278 return 0; /* nothing to do. */
1280 /* use at most thus many bytes */
1281 bitstream_init(&bs
, p
->code
, size
, 0);
1282 memset(p
->code
, 0, size
);
1283 /* plain bits covered in this code string */
1286 /* p->encoding & 0x80 stores whether the first run length is set.
1287 * bit offset is implicit.
1288 * start with toggle == 2 to be able to tell the first iteration */
1291 /* see how much plain bits we can stuff into one packet
1292 * using RLE and VLI. */
1294 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(mdev
, c
->bit_offset
)
1295 : _drbd_bm_find_next(mdev
, c
->bit_offset
);
1298 rl
= tmp
- c
->bit_offset
;
1300 if (toggle
== 2) { /* first iteration */
1302 /* the first checked bit was set,
1303 * store start value, */
1304 dcbp_set_start(p
, 1);
1305 /* but skip encoding of zero run length */
1309 dcbp_set_start(p
, 0);
1312 /* paranoia: catch zero runlength.
1313 * can only happen if bitmap is modified while we scan it. */
1315 dev_err(DEV
, "unexpected zero runlength while encoding bitmap "
1316 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
1320 bits
= vli_encode_bits(&bs
, rl
);
1321 if (bits
== -ENOBUFS
) /* buffer full */
1324 dev_err(DEV
, "error while encoding bitmap: %d\n", bits
);
1330 c
->bit_offset
= tmp
;
1331 } while (c
->bit_offset
< c
->bm_bits
);
1333 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
1335 if (plain_bits
< (len
<< 3)) {
1336 /* incompressible with this method.
1337 * we need to rewind both word and bit position. */
1338 c
->bit_offset
-= plain_bits
;
1339 bm_xfer_ctx_bit_to_word_offset(c
);
1340 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1344 /* RLE + VLI was able to compress it just fine.
1345 * update c->word_offset. */
1346 bm_xfer_ctx_bit_to_word_offset(c
);
1348 /* store pad_bits */
1349 dcbp_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
1355 * send_bitmap_rle_or_plain
1357 * Return 0 when done, 1 when another iteration is needed, and a negative error
1358 * code upon failure.
1361 send_bitmap_rle_or_plain(struct drbd_conf
*mdev
, struct bm_xfer_ctx
*c
)
1363 struct drbd_socket
*sock
= &mdev
->tconn
->data
;
1364 unsigned int header_size
= drbd_header_size(mdev
->tconn
);
1365 struct p_compressed_bm
*p
= sock
->sbuf
+ header_size
;
1368 len
= fill_bitmap_rle_bits(mdev
, p
,
1369 DRBD_SOCKET_BUFFER_SIZE
- header_size
- sizeof(*p
), c
);
1374 dcbp_set_code(p
, RLE_VLI_Bits
);
1375 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
,
1376 P_COMPRESSED_BITMAP
, sizeof(*p
) + len
,
1379 c
->bytes
[0] += header_size
+ sizeof(*p
) + len
;
1381 if (c
->bit_offset
>= c
->bm_bits
)
1384 /* was not compressible.
1385 * send a buffer full of plain text bits instead. */
1386 unsigned int data_size
;
1387 unsigned long num_words
;
1388 unsigned long *p
= sock
->sbuf
+ header_size
;
1390 data_size
= DRBD_SOCKET_BUFFER_SIZE
- header_size
;
1391 num_words
= min_t(size_t, data_size
/ sizeof(*p
),
1392 c
->bm_words
- c
->word_offset
);
1393 len
= num_words
* sizeof(*p
);
1395 drbd_bm_get_lel(mdev
, c
->word_offset
, num_words
, p
);
1396 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, P_BITMAP
, len
, NULL
, 0);
1397 c
->word_offset
+= num_words
;
1398 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1401 c
->bytes
[1] += header_size
+ len
;
1403 if (c
->bit_offset
> c
->bm_bits
)
1404 c
->bit_offset
= c
->bm_bits
;
1408 INFO_bm_xfer_stats(mdev
, "send", c
);
1416 /* See the comment at receive_bitmap() */
1417 static int _drbd_send_bitmap(struct drbd_conf
*mdev
)
1419 struct bm_xfer_ctx c
;
1422 if (!expect(mdev
->bitmap
))
1425 if (get_ldev(mdev
)) {
1426 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
1427 dev_info(DEV
, "Writing the whole bitmap, MDF_FullSync was set.\n");
1428 drbd_bm_set_all(mdev
);
1429 if (drbd_bm_write(mdev
)) {
1430 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1431 * but otherwise process as per normal - need to tell other
1432 * side that a full resync is required! */
1433 dev_err(DEV
, "Failed to write bitmap to disk!\n");
1435 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
1442 c
= (struct bm_xfer_ctx
) {
1443 .bm_bits
= drbd_bm_bits(mdev
),
1444 .bm_words
= drbd_bm_words(mdev
),
1448 err
= send_bitmap_rle_or_plain(mdev
, &c
);
1454 int drbd_send_bitmap(struct drbd_conf
*mdev
)
1456 struct drbd_socket
*sock
= &mdev
->tconn
->data
;
1459 mutex_lock(&sock
->mutex
);
1461 err
= !_drbd_send_bitmap(mdev
);
1462 mutex_unlock(&sock
->mutex
);
1466 void drbd_send_b_ack(struct drbd_tconn
*tconn
, u32 barrier_nr
, u32 set_size
)
1468 struct drbd_socket
*sock
;
1469 struct p_barrier_ack
*p
;
1471 if (tconn
->cstate
< C_WF_REPORT_PARAMS
)
1474 sock
= &tconn
->meta
;
1475 p
= conn_prepare_command(tconn
, sock
);
1478 p
->barrier
= barrier_nr
;
1479 p
->set_size
= cpu_to_be32(set_size
);
1480 conn_send_command(tconn
, sock
, P_BARRIER_ACK
, sizeof(*p
), NULL
, 0);
1484 * _drbd_send_ack() - Sends an ack packet
1485 * @mdev: DRBD device.
1486 * @cmd: Packet command code.
1487 * @sector: sector, needs to be in big endian byte order
1488 * @blksize: size in byte, needs to be in big endian byte order
1489 * @block_id: Id, big endian byte order
1491 static int _drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1492 u64 sector
, u32 blksize
, u64 block_id
)
1494 struct drbd_socket
*sock
;
1495 struct p_block_ack
*p
;
1497 if (mdev
->state
.conn
< C_CONNECTED
)
1500 sock
= &mdev
->tconn
->meta
;
1501 p
= drbd_prepare_command(mdev
, sock
);
1505 p
->block_id
= block_id
;
1506 p
->blksize
= blksize
;
1507 p
->seq_num
= cpu_to_be32(atomic_inc_return(&mdev
->packet_seq
));
1508 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1511 /* dp->sector and dp->block_id already/still in network byte order,
1512 * data_size is payload size according to dp->head,
1513 * and may need to be corrected for digest size. */
1514 void drbd_send_ack_dp(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1515 struct p_data
*dp
, int data_size
)
1517 if (mdev
->tconn
->peer_integrity_tfm
)
1518 data_size
-= crypto_hash_digestsize(mdev
->tconn
->peer_integrity_tfm
);
1519 _drbd_send_ack(mdev
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
1523 void drbd_send_ack_rp(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1524 struct p_block_req
*rp
)
1526 _drbd_send_ack(mdev
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
1530 * drbd_send_ack() - Sends an ack packet
1531 * @mdev: DRBD device
1532 * @cmd: packet command code
1533 * @peer_req: peer request
1535 int drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1536 struct drbd_peer_request
*peer_req
)
1538 return _drbd_send_ack(mdev
, cmd
,
1539 cpu_to_be64(peer_req
->i
.sector
),
1540 cpu_to_be32(peer_req
->i
.size
),
1541 peer_req
->block_id
);
1544 /* This function misuses the block_id field to signal if the blocks
1545 * are is sync or not. */
1546 int drbd_send_ack_ex(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1547 sector_t sector
, int blksize
, u64 block_id
)
1549 return _drbd_send_ack(mdev
, cmd
,
1550 cpu_to_be64(sector
),
1551 cpu_to_be32(blksize
),
1552 cpu_to_be64(block_id
));
1555 int drbd_send_drequest(struct drbd_conf
*mdev
, int cmd
,
1556 sector_t sector
, int size
, u64 block_id
)
1558 struct drbd_socket
*sock
;
1559 struct p_block_req
*p
;
1561 sock
= &mdev
->tconn
->data
;
1562 p
= drbd_prepare_command(mdev
, sock
);
1565 p
->sector
= cpu_to_be64(sector
);
1566 p
->block_id
= block_id
;
1567 p
->blksize
= cpu_to_be32(size
);
1568 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1571 int drbd_send_drequest_csum(struct drbd_conf
*mdev
, sector_t sector
, int size
,
1572 void *digest
, int digest_size
, enum drbd_packet cmd
)
1574 struct drbd_socket
*sock
;
1575 struct p_block_req
*p
;
1577 /* FIXME: Put the digest into the preallocated socket buffer. */
1579 sock
= &mdev
->tconn
->data
;
1580 p
= drbd_prepare_command(mdev
, sock
);
1583 p
->sector
= cpu_to_be64(sector
);
1584 p
->block_id
= ID_SYNCER
/* unused */;
1585 p
->blksize
= cpu_to_be32(size
);
1586 return drbd_send_command(mdev
, sock
, cmd
, sizeof(*p
),
1587 digest
, digest_size
);
1590 int drbd_send_ov_request(struct drbd_conf
*mdev
, sector_t sector
, int size
)
1592 struct drbd_socket
*sock
;
1593 struct p_block_req
*p
;
1595 sock
= &mdev
->tconn
->data
;
1596 p
= drbd_prepare_command(mdev
, sock
);
1599 p
->sector
= cpu_to_be64(sector
);
1600 p
->block_id
= ID_SYNCER
/* unused */;
1601 p
->blksize
= cpu_to_be32(size
);
1602 return drbd_send_command(mdev
, sock
, P_OV_REQUEST
, sizeof(*p
), NULL
, 0);
1605 /* called on sndtimeo
1606 * returns false if we should retry,
1607 * true if we think connection is dead
1609 static int we_should_drop_the_connection(struct drbd_tconn
*tconn
, struct socket
*sock
)
1612 /* long elapsed = (long)(jiffies - mdev->last_received); */
1614 drop_it
= tconn
->meta
.socket
== sock
1615 || !tconn
->asender
.task
1616 || get_t_state(&tconn
->asender
) != RUNNING
1617 || tconn
->cstate
< C_WF_REPORT_PARAMS
;
1622 drop_it
= !--tconn
->ko_count
;
1624 conn_err(tconn
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1625 current
->comm
, current
->pid
, tconn
->ko_count
);
1626 request_ping(tconn
);
1629 return drop_it
; /* && (mdev->state == R_PRIMARY) */;
1632 static void drbd_update_congested(struct drbd_tconn
*tconn
)
1634 struct sock
*sk
= tconn
->data
.socket
->sk
;
1635 if (sk
->sk_wmem_queued
> sk
->sk_sndbuf
* 4 / 5)
1636 set_bit(NET_CONGESTED
, &tconn
->flags
);
1639 /* The idea of sendpage seems to be to put some kind of reference
1640 * to the page into the skb, and to hand it over to the NIC. In
1641 * this process get_page() gets called.
1643 * As soon as the page was really sent over the network put_page()
1644 * gets called by some part of the network layer. [ NIC driver? ]
1646 * [ get_page() / put_page() increment/decrement the count. If count
1647 * reaches 0 the page will be freed. ]
1649 * This works nicely with pages from FSs.
1650 * But this means that in protocol A we might signal IO completion too early!
1652 * In order not to corrupt data during a resync we must make sure
1653 * that we do not reuse our own buffer pages (EEs) to early, therefore
1654 * we have the net_ee list.
1656 * XFS seems to have problems, still, it submits pages with page_count == 0!
1657 * As a workaround, we disable sendpage on pages
1658 * with page_count == 0 or PageSlab.
1660 static int _drbd_no_send_page(struct drbd_conf
*mdev
, struct page
*page
,
1661 int offset
, size_t size
, unsigned msg_flags
)
1663 struct socket
*socket
;
1667 socket
= mdev
->tconn
->data
.socket
;
1668 addr
= kmap(page
) + offset
;
1669 err
= drbd_send_all(mdev
->tconn
, socket
, addr
, size
, msg_flags
);
1672 mdev
->send_cnt
+= size
>> 9;
1676 static int _drbd_send_page(struct drbd_conf
*mdev
, struct page
*page
,
1677 int offset
, size_t size
, unsigned msg_flags
)
1679 struct socket
*socket
= mdev
->tconn
->data
.socket
;
1680 mm_segment_t oldfs
= get_fs();
1684 /* e.g. XFS meta- & log-data is in slab pages, which have a
1685 * page_count of 0 and/or have PageSlab() set.
1686 * we cannot use send_page for those, as that does get_page();
1687 * put_page(); and would cause either a VM_BUG directly, or
1688 * __page_cache_release a page that would actually still be referenced
1689 * by someone, leading to some obscure delayed Oops somewhere else. */
1690 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
1691 return _drbd_no_send_page(mdev
, page
, offset
, size
, msg_flags
);
1693 msg_flags
|= MSG_NOSIGNAL
;
1694 drbd_update_congested(mdev
->tconn
);
1699 sent
= socket
->ops
->sendpage(socket
, page
, offset
, len
, msg_flags
);
1701 if (sent
== -EAGAIN
) {
1702 if (we_should_drop_the_connection(mdev
->tconn
, socket
))
1706 dev_warn(DEV
, "%s: size=%d len=%d sent=%d\n",
1707 __func__
, (int)size
, len
, sent
);
1714 } while (len
> 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1716 clear_bit(NET_CONGESTED
, &mdev
->tconn
->flags
);
1720 mdev
->send_cnt
+= size
>> 9;
1725 static int _drbd_send_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
1727 struct bio_vec
*bvec
;
1729 /* hint all but last page with MSG_MORE */
1730 bio_for_each_segment(bvec
, bio
, i
) {
1733 err
= _drbd_no_send_page(mdev
, bvec
->bv_page
,
1734 bvec
->bv_offset
, bvec
->bv_len
,
1735 i
== bio
->bi_vcnt
- 1 ? 0 : MSG_MORE
);
1742 static int _drbd_send_zc_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
1744 struct bio_vec
*bvec
;
1746 /* hint all but last page with MSG_MORE */
1747 bio_for_each_segment(bvec
, bio
, i
) {
1750 err
= _drbd_send_page(mdev
, bvec
->bv_page
,
1751 bvec
->bv_offset
, bvec
->bv_len
,
1752 i
== bio
->bi_vcnt
- 1 ? 0 : MSG_MORE
);
1759 static int _drbd_send_zc_ee(struct drbd_conf
*mdev
,
1760 struct drbd_peer_request
*peer_req
)
1762 struct page
*page
= peer_req
->pages
;
1763 unsigned len
= peer_req
->i
.size
;
1766 /* hint all but last page with MSG_MORE */
1767 page_chain_for_each(page
) {
1768 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
1770 err
= _drbd_send_page(mdev
, page
, 0, l
,
1771 page_chain_next(page
) ? MSG_MORE
: 0);
1779 static u32
bio_flags_to_wire(struct drbd_conf
*mdev
, unsigned long bi_rw
)
1781 if (mdev
->tconn
->agreed_pro_version
>= 95)
1782 return (bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0) |
1783 (bi_rw
& REQ_FUA
? DP_FUA
: 0) |
1784 (bi_rw
& REQ_FLUSH
? DP_FLUSH
: 0) |
1785 (bi_rw
& REQ_DISCARD
? DP_DISCARD
: 0);
1787 return bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0;
1790 /* Used to send write requests
1791 * R_PRIMARY -> Peer (P_DATA)
1793 int drbd_send_dblock(struct drbd_conf
*mdev
, struct drbd_request
*req
)
1795 struct drbd_socket
*sock
;
1797 unsigned int dp_flags
= 0;
1801 sock
= &mdev
->tconn
->data
;
1802 p
= drbd_prepare_command(mdev
, sock
);
1803 dgs
= mdev
->tconn
->integrity_tfm
? crypto_hash_digestsize(mdev
->tconn
->integrity_tfm
) : 0;
1807 p
->sector
= cpu_to_be64(req
->i
.sector
);
1808 p
->block_id
= (unsigned long)req
;
1809 p
->seq_num
= cpu_to_be32(atomic_inc_return(&mdev
->packet_seq
));
1810 dp_flags
= bio_flags_to_wire(mdev
, req
->master_bio
->bi_rw
);
1811 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1812 mdev
->state
.conn
<= C_PAUSED_SYNC_T
)
1813 dp_flags
|= DP_MAY_SET_IN_SYNC
;
1814 if (mdev
->tconn
->agreed_pro_version
>= 100) {
1815 if (req
->rq_state
& RQ_EXP_RECEIVE_ACK
)
1816 dp_flags
|= DP_SEND_RECEIVE_ACK
;
1817 if (req
->rq_state
& RQ_EXP_WRITE_ACK
)
1818 dp_flags
|= DP_SEND_WRITE_ACK
;
1820 p
->dp_flags
= cpu_to_be32(dp_flags
);
1822 drbd_csum_bio(mdev
, mdev
->tconn
->integrity_tfm
, req
->master_bio
, p
+ 1);
1823 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, P_DATA
, sizeof(*p
) + dgs
, NULL
, req
->i
.size
);
1825 /* For protocol A, we have to memcpy the payload into
1826 * socket buffers, as we may complete right away
1827 * as soon as we handed it over to tcp, at which point the data
1828 * pages may become invalid.
1830 * For data-integrity enabled, we copy it as well, so we can be
1831 * sure that even if the bio pages may still be modified, it
1832 * won't change the data on the wire, thus if the digest checks
1833 * out ok after sending on this side, but does not fit on the
1834 * receiving side, we sure have detected corruption elsewhere.
1836 if (!(req
->rq_state
& (RQ_EXP_RECEIVE_ACK
| RQ_EXP_WRITE_ACK
)) || dgs
)
1837 err
= _drbd_send_bio(mdev
, req
->master_bio
);
1839 err
= _drbd_send_zc_bio(mdev
, req
->master_bio
);
1841 /* double check digest, sometimes buffers have been modified in flight. */
1842 if (dgs
> 0 && dgs
<= 64) {
1843 /* 64 byte, 512 bit, is the largest digest size
1844 * currently supported in kernel crypto. */
1845 unsigned char digest
[64];
1846 drbd_csum_bio(mdev
, mdev
->tconn
->integrity_tfm
, req
->master_bio
, digest
);
1847 if (memcmp(p
+ 1, digest
, dgs
)) {
1849 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1850 (unsigned long long)req
->i
.sector
, req
->i
.size
);
1852 } /* else if (dgs > 64) {
1853 ... Be noisy about digest too large ...
1856 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1861 /* answer packet, used to send data back for read requests:
1862 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1863 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1865 int drbd_send_block(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1866 struct drbd_peer_request
*peer_req
)
1868 struct drbd_socket
*sock
;
1873 sock
= &mdev
->tconn
->data
;
1874 p
= drbd_prepare_command(mdev
, sock
);
1876 dgs
= mdev
->tconn
->integrity_tfm
? crypto_hash_digestsize(mdev
->tconn
->integrity_tfm
) : 0;
1880 p
->sector
= cpu_to_be64(peer_req
->i
.sector
);
1881 p
->block_id
= peer_req
->block_id
;
1882 p
->seq_num
= 0; /* unused */
1885 drbd_csum_ee(mdev
, mdev
->tconn
->integrity_tfm
, peer_req
, p
+ 1);
1886 err
= __send_command(mdev
->tconn
, mdev
->vnr
, sock
, cmd
, sizeof(*p
) + dgs
, NULL
, peer_req
->i
.size
);
1888 err
= _drbd_send_zc_ee(mdev
, peer_req
);
1889 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1894 int drbd_send_out_of_sync(struct drbd_conf
*mdev
, struct drbd_request
*req
)
1896 struct drbd_socket
*sock
;
1897 struct p_block_desc
*p
;
1899 sock
= &mdev
->tconn
->data
;
1900 p
= drbd_prepare_command(mdev
, sock
);
1903 p
->sector
= cpu_to_be64(req
->i
.sector
);
1904 p
->blksize
= cpu_to_be32(req
->i
.size
);
1905 return drbd_send_command(mdev
, sock
, P_OUT_OF_SYNC
, sizeof(*p
), NULL
, 0);
1909 drbd_send distinguishes two cases:
1911 Packets sent via the data socket "sock"
1912 and packets sent via the meta data socket "msock"
1915 -----------------+-------------------------+------------------------------
1916 timeout conf.timeout / 2 conf.timeout / 2
1917 timeout action send a ping via msock Abort communication
1918 and close all sockets
1922 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1924 int drbd_send(struct drbd_tconn
*tconn
, struct socket
*sock
,
1925 void *buf
, size_t size
, unsigned msg_flags
)
1934 /* THINK if (signal_pending) return ... ? */
1939 msg
.msg_name
= NULL
;
1940 msg
.msg_namelen
= 0;
1941 msg
.msg_control
= NULL
;
1942 msg
.msg_controllen
= 0;
1943 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
1945 if (sock
== tconn
->data
.socket
) {
1947 tconn
->ko_count
= rcu_dereference(tconn
->net_conf
)->ko_count
;
1949 drbd_update_congested(tconn
);
1953 * tcp_sendmsg does _not_ use its size parameter at all ?
1955 * -EAGAIN on timeout, -EINTR on signal.
1958 * do we need to block DRBD_SIG if sock == &meta.socket ??
1959 * otherwise wake_asender() might interrupt some send_*Ack !
1961 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
1962 if (rv
== -EAGAIN
) {
1963 if (we_should_drop_the_connection(tconn
, sock
))
1969 flush_signals(current
);
1977 } while (sent
< size
);
1979 if (sock
== tconn
->data
.socket
)
1980 clear_bit(NET_CONGESTED
, &tconn
->flags
);
1983 if (rv
!= -EAGAIN
) {
1984 conn_err(tconn
, "%s_sendmsg returned %d\n",
1985 sock
== tconn
->meta
.socket
? "msock" : "sock",
1987 conn_request_state(tconn
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
1989 conn_request_state(tconn
, NS(conn
, C_TIMEOUT
), CS_HARD
);
1996 * drbd_send_all - Send an entire buffer
1998 * Returns 0 upon success and a negative error value otherwise.
2000 int drbd_send_all(struct drbd_tconn
*tconn
, struct socket
*sock
, void *buffer
,
2001 size_t size
, unsigned msg_flags
)
2005 err
= drbd_send(tconn
, sock
, buffer
, size
, msg_flags
);
2013 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
2015 struct drbd_conf
*mdev
= bdev
->bd_disk
->private_data
;
2016 unsigned long flags
;
2019 mutex_lock(&drbd_main_mutex
);
2020 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
2021 /* to have a stable mdev->state.role
2022 * and no race with updating open_cnt */
2024 if (mdev
->state
.role
!= R_PRIMARY
) {
2025 if (mode
& FMODE_WRITE
)
2027 else if (!allow_oos
)
2033 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
2034 mutex_unlock(&drbd_main_mutex
);
2039 static int drbd_release(struct gendisk
*gd
, fmode_t mode
)
2041 struct drbd_conf
*mdev
= gd
->private_data
;
2042 mutex_lock(&drbd_main_mutex
);
2044 mutex_unlock(&drbd_main_mutex
);
2048 static void drbd_set_defaults(struct drbd_conf
*mdev
)
2050 /* Beware! The actual layout differs
2051 * between big endian and little endian */
2052 mdev
->state
= (union drbd_dev_state
) {
2053 { .role
= R_SECONDARY
,
2055 .conn
= C_STANDALONE
,
2061 void drbd_init_set_defaults(struct drbd_conf
*mdev
)
2063 /* the memset(,0,) did most of this.
2064 * note: only assignments, no allocation in here */
2066 drbd_set_defaults(mdev
);
2068 atomic_set(&mdev
->ap_bio_cnt
, 0);
2069 atomic_set(&mdev
->ap_pending_cnt
, 0);
2070 atomic_set(&mdev
->rs_pending_cnt
, 0);
2071 atomic_set(&mdev
->unacked_cnt
, 0);
2072 atomic_set(&mdev
->local_cnt
, 0);
2073 atomic_set(&mdev
->pp_in_use_by_net
, 0);
2074 atomic_set(&mdev
->rs_sect_in
, 0);
2075 atomic_set(&mdev
->rs_sect_ev
, 0);
2076 atomic_set(&mdev
->ap_in_flight
, 0);
2077 atomic_set(&mdev
->md_io_in_use
, 0);
2079 mutex_init(&mdev
->own_state_mutex
);
2080 mdev
->state_mutex
= &mdev
->own_state_mutex
;
2082 spin_lock_init(&mdev
->al_lock
);
2083 spin_lock_init(&mdev
->peer_seq_lock
);
2085 INIT_LIST_HEAD(&mdev
->active_ee
);
2086 INIT_LIST_HEAD(&mdev
->sync_ee
);
2087 INIT_LIST_HEAD(&mdev
->done_ee
);
2088 INIT_LIST_HEAD(&mdev
->read_ee
);
2089 INIT_LIST_HEAD(&mdev
->net_ee
);
2090 INIT_LIST_HEAD(&mdev
->resync_reads
);
2091 INIT_LIST_HEAD(&mdev
->resync_work
.list
);
2092 INIT_LIST_HEAD(&mdev
->unplug_work
.list
);
2093 INIT_LIST_HEAD(&mdev
->go_diskless
.list
);
2094 INIT_LIST_HEAD(&mdev
->md_sync_work
.list
);
2095 INIT_LIST_HEAD(&mdev
->start_resync_work
.list
);
2096 INIT_LIST_HEAD(&mdev
->bm_io_work
.w
.list
);
2098 mdev
->resync_work
.cb
= w_resync_timer
;
2099 mdev
->unplug_work
.cb
= w_send_write_hint
;
2100 mdev
->go_diskless
.cb
= w_go_diskless
;
2101 mdev
->md_sync_work
.cb
= w_md_sync
;
2102 mdev
->bm_io_work
.w
.cb
= w_bitmap_io
;
2103 mdev
->start_resync_work
.cb
= w_start_resync
;
2105 mdev
->resync_work
.mdev
= mdev
;
2106 mdev
->unplug_work
.mdev
= mdev
;
2107 mdev
->go_diskless
.mdev
= mdev
;
2108 mdev
->md_sync_work
.mdev
= mdev
;
2109 mdev
->bm_io_work
.w
.mdev
= mdev
;
2110 mdev
->start_resync_work
.mdev
= mdev
;
2112 init_timer(&mdev
->resync_timer
);
2113 init_timer(&mdev
->md_sync_timer
);
2114 init_timer(&mdev
->start_resync_timer
);
2115 init_timer(&mdev
->request_timer
);
2116 mdev
->resync_timer
.function
= resync_timer_fn
;
2117 mdev
->resync_timer
.data
= (unsigned long) mdev
;
2118 mdev
->md_sync_timer
.function
= md_sync_timer_fn
;
2119 mdev
->md_sync_timer
.data
= (unsigned long) mdev
;
2120 mdev
->start_resync_timer
.function
= start_resync_timer_fn
;
2121 mdev
->start_resync_timer
.data
= (unsigned long) mdev
;
2122 mdev
->request_timer
.function
= request_timer_fn
;
2123 mdev
->request_timer
.data
= (unsigned long) mdev
;
2125 init_waitqueue_head(&mdev
->misc_wait
);
2126 init_waitqueue_head(&mdev
->state_wait
);
2127 init_waitqueue_head(&mdev
->ee_wait
);
2128 init_waitqueue_head(&mdev
->al_wait
);
2129 init_waitqueue_head(&mdev
->seq_wait
);
2131 mdev
->resync_wenr
= LC_FREE
;
2132 mdev
->peer_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
2133 mdev
->local_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
2136 void drbd_mdev_cleanup(struct drbd_conf
*mdev
)
2139 if (mdev
->tconn
->receiver
.t_state
!= NONE
)
2140 dev_err(DEV
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2141 mdev
->tconn
->receiver
.t_state
);
2152 mdev
->rs_failed
= 0;
2153 mdev
->rs_last_events
= 0;
2154 mdev
->rs_last_sect_ev
= 0;
2155 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2156 mdev
->rs_mark_left
[i
] = 0;
2157 mdev
->rs_mark_time
[i
] = 0;
2159 D_ASSERT(mdev
->tconn
->net_conf
== NULL
);
2161 drbd_set_my_capacity(mdev
, 0);
2163 /* maybe never allocated. */
2164 drbd_bm_resize(mdev
, 0, 1);
2165 drbd_bm_cleanup(mdev
);
2168 drbd_free_bc(mdev
->ldev
);
2171 clear_bit(AL_SUSPENDED
, &mdev
->flags
);
2173 D_ASSERT(list_empty(&mdev
->active_ee
));
2174 D_ASSERT(list_empty(&mdev
->sync_ee
));
2175 D_ASSERT(list_empty(&mdev
->done_ee
));
2176 D_ASSERT(list_empty(&mdev
->read_ee
));
2177 D_ASSERT(list_empty(&mdev
->net_ee
));
2178 D_ASSERT(list_empty(&mdev
->resync_reads
));
2179 D_ASSERT(list_empty(&mdev
->tconn
->data
.work
.q
));
2180 D_ASSERT(list_empty(&mdev
->tconn
->meta
.work
.q
));
2181 D_ASSERT(list_empty(&mdev
->resync_work
.list
));
2182 D_ASSERT(list_empty(&mdev
->unplug_work
.list
));
2183 D_ASSERT(list_empty(&mdev
->go_diskless
.list
));
2185 drbd_set_defaults(mdev
);
2189 static void drbd_destroy_mempools(void)
2193 while (drbd_pp_pool
) {
2194 page
= drbd_pp_pool
;
2195 drbd_pp_pool
= (struct page
*)page_private(page
);
2200 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2202 if (drbd_md_io_bio_set
)
2203 bioset_free(drbd_md_io_bio_set
);
2204 if (drbd_md_io_page_pool
)
2205 mempool_destroy(drbd_md_io_page_pool
);
2206 if (drbd_ee_mempool
)
2207 mempool_destroy(drbd_ee_mempool
);
2208 if (drbd_request_mempool
)
2209 mempool_destroy(drbd_request_mempool
);
2211 kmem_cache_destroy(drbd_ee_cache
);
2212 if (drbd_request_cache
)
2213 kmem_cache_destroy(drbd_request_cache
);
2214 if (drbd_bm_ext_cache
)
2215 kmem_cache_destroy(drbd_bm_ext_cache
);
2216 if (drbd_al_ext_cache
)
2217 kmem_cache_destroy(drbd_al_ext_cache
);
2219 drbd_md_io_bio_set
= NULL
;
2220 drbd_md_io_page_pool
= NULL
;
2221 drbd_ee_mempool
= NULL
;
2222 drbd_request_mempool
= NULL
;
2223 drbd_ee_cache
= NULL
;
2224 drbd_request_cache
= NULL
;
2225 drbd_bm_ext_cache
= NULL
;
2226 drbd_al_ext_cache
= NULL
;
2231 static int drbd_create_mempools(void)
2234 const int number
= (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
;
2237 /* prepare our caches and mempools */
2238 drbd_request_mempool
= NULL
;
2239 drbd_ee_cache
= NULL
;
2240 drbd_request_cache
= NULL
;
2241 drbd_bm_ext_cache
= NULL
;
2242 drbd_al_ext_cache
= NULL
;
2243 drbd_pp_pool
= NULL
;
2244 drbd_md_io_page_pool
= NULL
;
2245 drbd_md_io_bio_set
= NULL
;
2248 drbd_request_cache
= kmem_cache_create(
2249 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2250 if (drbd_request_cache
== NULL
)
2253 drbd_ee_cache
= kmem_cache_create(
2254 "drbd_ee", sizeof(struct drbd_peer_request
), 0, 0, NULL
);
2255 if (drbd_ee_cache
== NULL
)
2258 drbd_bm_ext_cache
= kmem_cache_create(
2259 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2260 if (drbd_bm_ext_cache
== NULL
)
2263 drbd_al_ext_cache
= kmem_cache_create(
2264 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2265 if (drbd_al_ext_cache
== NULL
)
2269 drbd_md_io_bio_set
= bioset_create(DRBD_MIN_POOL_PAGES
, 0);
2270 if (drbd_md_io_bio_set
== NULL
)
2273 drbd_md_io_page_pool
= mempool_create_page_pool(DRBD_MIN_POOL_PAGES
, 0);
2274 if (drbd_md_io_page_pool
== NULL
)
2277 drbd_request_mempool
= mempool_create(number
,
2278 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
2279 if (drbd_request_mempool
== NULL
)
2282 drbd_ee_mempool
= mempool_create(number
,
2283 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
2284 if (drbd_ee_mempool
== NULL
)
2287 /* drbd's page pool */
2288 spin_lock_init(&drbd_pp_lock
);
2290 for (i
= 0; i
< number
; i
++) {
2291 page
= alloc_page(GFP_HIGHUSER
);
2294 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2295 drbd_pp_pool
= page
;
2297 drbd_pp_vacant
= number
;
2302 drbd_destroy_mempools(); /* in case we allocated some */
2306 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
2309 /* just so we have it. you never know what interesting things we
2310 * might want to do here some day...
2316 static struct notifier_block drbd_notifier
= {
2317 .notifier_call
= drbd_notify_sys
,
2320 static void drbd_release_all_peer_reqs(struct drbd_conf
*mdev
)
2324 rr
= drbd_free_peer_reqs(mdev
, &mdev
->active_ee
);
2326 dev_err(DEV
, "%d EEs in active list found!\n", rr
);
2328 rr
= drbd_free_peer_reqs(mdev
, &mdev
->sync_ee
);
2330 dev_err(DEV
, "%d EEs in sync list found!\n", rr
);
2332 rr
= drbd_free_peer_reqs(mdev
, &mdev
->read_ee
);
2334 dev_err(DEV
, "%d EEs in read list found!\n", rr
);
2336 rr
= drbd_free_peer_reqs(mdev
, &mdev
->done_ee
);
2338 dev_err(DEV
, "%d EEs in done list found!\n", rr
);
2340 rr
= drbd_free_peer_reqs(mdev
, &mdev
->net_ee
);
2342 dev_err(DEV
, "%d EEs in net list found!\n", rr
);
2345 /* caution. no locking. */
2346 void drbd_minor_destroy(struct kref
*kref
)
2348 struct drbd_conf
*mdev
= container_of(kref
, struct drbd_conf
, kref
);
2349 struct drbd_tconn
*tconn
= mdev
->tconn
;
2351 del_timer_sync(&mdev
->request_timer
);
2353 /* paranoia asserts */
2354 D_ASSERT(mdev
->open_cnt
== 0);
2355 D_ASSERT(list_empty(&mdev
->tconn
->data
.work
.q
));
2356 /* end paranoia asserts */
2358 /* cleanup stuff that may have been allocated during
2359 * device (re-)configuration or state changes */
2361 if (mdev
->this_bdev
)
2362 bdput(mdev
->this_bdev
);
2364 drbd_free_bc(mdev
->ldev
);
2367 drbd_release_all_peer_reqs(mdev
);
2369 lc_destroy(mdev
->act_log
);
2370 lc_destroy(mdev
->resync
);
2372 kfree(mdev
->p_uuid
);
2373 /* mdev->p_uuid = NULL; */
2375 if (mdev
->bitmap
) /* should no longer be there. */
2376 drbd_bm_cleanup(mdev
);
2377 __free_page(mdev
->md_io_page
);
2378 put_disk(mdev
->vdisk
);
2379 blk_cleanup_queue(mdev
->rq_queue
);
2380 kfree(mdev
->rs_plan_s
);
2383 kref_put(&tconn
->kref
, &conn_destroy
);
2386 /* One global retry thread, if we need to push back some bio and have it
2387 * reinserted through our make request function.
2389 static struct retry_worker
{
2390 struct workqueue_struct
*wq
;
2391 struct work_struct worker
;
2394 struct list_head writes
;
2397 static void do_retry(struct work_struct
*ws
)
2399 struct retry_worker
*retry
= container_of(ws
, struct retry_worker
, worker
);
2401 struct drbd_request
*req
, *tmp
;
2403 spin_lock_irq(&retry
->lock
);
2404 list_splice_init(&retry
->writes
, &writes
);
2405 spin_unlock_irq(&retry
->lock
);
2407 list_for_each_entry_safe(req
, tmp
, &writes
, tl_requests
) {
2408 struct drbd_conf
*mdev
= req
->w
.mdev
;
2409 struct bio
*bio
= req
->master_bio
;
2410 unsigned long start_time
= req
->start_time
;
2412 /* We have exclusive access to this request object.
2413 * If it had not been RQ_POSTPONED, the code path which queued
2414 * it here would have completed and freed it already.
2416 mempool_free(req
, drbd_request_mempool
);
2418 /* A single suspended or otherwise blocking device may stall
2419 * all others as well. Fortunately, this code path is to
2420 * recover from a situation that "should not happen":
2421 * concurrent writes in multi-primary setup.
2422 * In a "normal" lifecycle, this workqueue is supposed to be
2423 * destroyed without ever doing anything.
2424 * If it turns out to be an issue anyways, we can do per
2425 * resource (replication group) or per device (minor) retry
2426 * workqueues instead.
2429 /* We are not just doing generic_make_request(),
2430 * as we want to keep the start_time information. */
2433 } while(__drbd_make_request(mdev
, bio
, start_time
));
2437 void drbd_restart_write(struct drbd_request
*req
)
2439 unsigned long flags
;
2440 spin_lock_irqsave(&retry
.lock
, flags
);
2441 list_move_tail(&req
->tl_requests
, &retry
.writes
);
2442 spin_unlock_irqrestore(&retry
.lock
, flags
);
2444 /* Drop the extra reference that would otherwise
2445 * have been dropped by complete_master_bio.
2446 * do_retry() needs to grab a new one. */
2447 dec_ap_bio(req
->w
.mdev
);
2449 queue_work(retry
.wq
, &retry
.worker
);
2453 static void drbd_cleanup(void)
2456 struct drbd_conf
*mdev
;
2457 struct drbd_tconn
*tconn
, *tmp
;
2459 unregister_reboot_notifier(&drbd_notifier
);
2461 /* first remove proc,
2462 * drbdsetup uses it's presence to detect
2463 * whether DRBD is loaded.
2464 * If we would get stuck in proc removal,
2465 * but have netlink already deregistered,
2466 * some drbdsetup commands may wait forever
2470 remove_proc_entry("drbd", NULL
);
2473 destroy_workqueue(retry
.wq
);
2475 drbd_genl_unregister();
2477 idr_for_each_entry(&minors
, mdev
, i
) {
2478 idr_remove(&minors
, mdev_to_minor(mdev
));
2479 idr_remove(&mdev
->tconn
->volumes
, mdev
->vnr
);
2480 del_gendisk(mdev
->vdisk
);
2481 /* synchronize_rcu(); No other threads running at this point */
2482 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
2485 /* not _rcu since, no other updater anymore. Genl already unregistered */
2486 list_for_each_entry_safe(tconn
, tmp
, &drbd_tconns
, all_tconn
) {
2487 list_del(&tconn
->all_tconn
); /* not _rcu no proc, not other threads */
2488 /* synchronize_rcu(); */
2489 kref_put(&tconn
->kref
, &conn_destroy
);
2492 drbd_destroy_mempools();
2493 unregister_blkdev(DRBD_MAJOR
, "drbd");
2495 idr_destroy(&minors
);
2497 printk(KERN_INFO
"drbd: module cleanup done.\n");
2501 * drbd_congested() - Callback for pdflush
2502 * @congested_data: User data
2503 * @bdi_bits: Bits pdflush is currently interested in
2505 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2507 static int drbd_congested(void *congested_data
, int bdi_bits
)
2509 struct drbd_conf
*mdev
= congested_data
;
2510 struct request_queue
*q
;
2514 if (!may_inc_ap_bio(mdev
)) {
2515 /* DRBD has frozen IO */
2521 if (get_ldev(mdev
)) {
2522 q
= bdev_get_queue(mdev
->ldev
->backing_bdev
);
2523 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
2529 if (bdi_bits
& (1 << BDI_async_congested
) && test_bit(NET_CONGESTED
, &mdev
->tconn
->flags
)) {
2530 r
|= (1 << BDI_async_congested
);
2531 reason
= reason
== 'b' ? 'a' : 'n';
2535 mdev
->congestion_reason
= reason
;
2539 static void drbd_init_workqueue(struct drbd_work_queue
* wq
)
2541 sema_init(&wq
->s
, 0);
2542 spin_lock_init(&wq
->q_lock
);
2543 INIT_LIST_HEAD(&wq
->q
);
2546 struct drbd_tconn
*conn_get_by_name(const char *name
)
2548 struct drbd_tconn
*tconn
;
2550 if (!name
|| !name
[0])
2554 list_for_each_entry_rcu(tconn
, &drbd_tconns
, all_tconn
) {
2555 if (!strcmp(tconn
->name
, name
)) {
2556 kref_get(&tconn
->kref
);
2566 struct drbd_tconn
*conn_get_by_addrs(void *my_addr
, int my_addr_len
,
2567 void *peer_addr
, int peer_addr_len
)
2569 struct drbd_tconn
*tconn
;
2572 list_for_each_entry_rcu(tconn
, &drbd_tconns
, all_tconn
) {
2573 if (tconn
->my_addr_len
== my_addr_len
&&
2574 tconn
->peer_addr_len
== peer_addr_len
&&
2575 !memcmp(&tconn
->my_addr
, my_addr
, my_addr_len
) &&
2576 !memcmp(&tconn
->peer_addr
, peer_addr
, peer_addr_len
)) {
2577 kref_get(&tconn
->kref
);
2587 static int drbd_alloc_socket(struct drbd_socket
*socket
)
2589 socket
->rbuf
= (void *) __get_free_page(GFP_KERNEL
);
2592 socket
->sbuf
= (void *) __get_free_page(GFP_KERNEL
);
2598 static void drbd_free_socket(struct drbd_socket
*socket
)
2600 free_page((unsigned long) socket
->sbuf
);
2601 free_page((unsigned long) socket
->rbuf
);
2604 void conn_free_crypto(struct drbd_tconn
*tconn
)
2606 drbd_free_sock(tconn
);
2608 crypto_free_hash(tconn
->csums_tfm
);
2609 crypto_free_hash(tconn
->verify_tfm
);
2610 crypto_free_hash(tconn
->cram_hmac_tfm
);
2611 crypto_free_hash(tconn
->integrity_tfm
);
2612 crypto_free_hash(tconn
->peer_integrity_tfm
);
2613 kfree(tconn
->int_dig_in
);
2614 kfree(tconn
->int_dig_vv
);
2616 tconn
->csums_tfm
= NULL
;
2617 tconn
->verify_tfm
= NULL
;
2618 tconn
->cram_hmac_tfm
= NULL
;
2619 tconn
->integrity_tfm
= NULL
;
2620 tconn
->peer_integrity_tfm
= NULL
;
2621 tconn
->int_dig_in
= NULL
;
2622 tconn
->int_dig_vv
= NULL
;
2625 int set_resource_options(struct drbd_tconn
*tconn
, struct res_opts
*res_opts
)
2627 cpumask_var_t new_cpu_mask
;
2630 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
))
2633 retcode = ERR_NOMEM;
2634 drbd_msg_put_info("unable to allocate cpumask");
2637 /* silently ignore cpu mask on UP kernel */
2638 if (nr_cpu_ids
> 1 && res_opts
->cpu_mask
[0] != 0) {
2639 /* FIXME: Get rid of constant 32 here */
2640 err
= __bitmap_parse(res_opts
->cpu_mask
, 32, 0,
2641 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2643 conn_warn(tconn
, "__bitmap_parse() failed with %d\n", err
);
2644 /* retcode = ERR_CPU_MASK_PARSE; */
2648 tconn
->res_opts
= *res_opts
;
2649 if (!cpumask_equal(tconn
->cpu_mask
, new_cpu_mask
)) {
2650 cpumask_copy(tconn
->cpu_mask
, new_cpu_mask
);
2651 drbd_calc_cpu_mask(tconn
);
2652 tconn
->receiver
.reset_cpu_mask
= 1;
2653 tconn
->asender
.reset_cpu_mask
= 1;
2654 tconn
->worker
.reset_cpu_mask
= 1;
2659 free_cpumask_var(new_cpu_mask
);
2664 /* caller must be under genl_lock() */
2665 struct drbd_tconn
*conn_create(const char *name
, struct res_opts
*res_opts
)
2667 struct drbd_tconn
*tconn
;
2669 tconn
= kzalloc(sizeof(struct drbd_tconn
), GFP_KERNEL
);
2673 tconn
->name
= kstrdup(name
, GFP_KERNEL
);
2677 if (drbd_alloc_socket(&tconn
->data
))
2679 if (drbd_alloc_socket(&tconn
->meta
))
2682 if (!zalloc_cpumask_var(&tconn
->cpu_mask
, GFP_KERNEL
))
2685 if (set_resource_options(tconn
, res_opts
))
2688 if (!tl_init(tconn
))
2691 tconn
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
2692 if (!tconn
->current_epoch
)
2694 INIT_LIST_HEAD(&tconn
->current_epoch
->list
);
2696 spin_lock_init(&tconn
->epoch_lock
);
2697 tconn
->write_ordering
= WO_bdev_flush
;
2699 tconn
->cstate
= C_STANDALONE
;
2700 mutex_init(&tconn
->cstate_mutex
);
2701 spin_lock_init(&tconn
->req_lock
);
2702 mutex_init(&tconn
->conf_update
);
2703 init_waitqueue_head(&tconn
->ping_wait
);
2704 idr_init(&tconn
->volumes
);
2706 drbd_init_workqueue(&tconn
->data
.work
);
2707 mutex_init(&tconn
->data
.mutex
);
2709 drbd_init_workqueue(&tconn
->meta
.work
);
2710 mutex_init(&tconn
->meta
.mutex
);
2712 drbd_thread_init(tconn
, &tconn
->receiver
, drbdd_init
, "receiver");
2713 drbd_thread_init(tconn
, &tconn
->worker
, drbd_worker
, "worker");
2714 drbd_thread_init(tconn
, &tconn
->asender
, drbd_asender
, "asender");
2716 kref_init(&tconn
->kref
);
2717 list_add_tail_rcu(&tconn
->all_tconn
, &drbd_tconns
);
2722 kfree(tconn
->current_epoch
);
2724 free_cpumask_var(tconn
->cpu_mask
);
2725 drbd_free_socket(&tconn
->meta
);
2726 drbd_free_socket(&tconn
->data
);
2733 void conn_destroy(struct kref
*kref
)
2735 struct drbd_tconn
*tconn
= container_of(kref
, struct drbd_tconn
, kref
);
2737 if (atomic_read(&tconn
->current_epoch
->epoch_size
) != 0)
2738 conn_err(tconn
, "epoch_size:%d\n", atomic_read(&tconn
->current_epoch
->epoch_size
));
2739 kfree(tconn
->current_epoch
);
2741 idr_destroy(&tconn
->volumes
);
2743 free_cpumask_var(tconn
->cpu_mask
);
2744 drbd_free_socket(&tconn
->meta
);
2745 drbd_free_socket(&tconn
->data
);
2747 kfree(tconn
->int_dig_in
);
2748 kfree(tconn
->int_dig_vv
);
2752 enum drbd_ret_code
conn_new_minor(struct drbd_tconn
*tconn
, unsigned int minor
, int vnr
)
2754 struct drbd_conf
*mdev
;
2755 struct gendisk
*disk
;
2756 struct request_queue
*q
;
2758 int minor_got
= minor
;
2759 enum drbd_ret_code err
= ERR_NOMEM
;
2761 mdev
= minor_to_mdev(minor
);
2763 return ERR_MINOR_EXISTS
;
2765 /* GFP_KERNEL, we are outside of all write-out paths */
2766 mdev
= kzalloc(sizeof(struct drbd_conf
), GFP_KERNEL
);
2770 kref_get(&tconn
->kref
);
2771 mdev
->tconn
= tconn
;
2773 mdev
->minor
= minor
;
2776 drbd_init_set_defaults(mdev
);
2778 q
= blk_alloc_queue(GFP_KERNEL
);
2782 q
->queuedata
= mdev
;
2784 disk
= alloc_disk(1);
2789 set_disk_ro(disk
, true);
2792 disk
->major
= DRBD_MAJOR
;
2793 disk
->first_minor
= minor
;
2794 disk
->fops
= &drbd_ops
;
2795 sprintf(disk
->disk_name
, "drbd%d", minor
);
2796 disk
->private_data
= mdev
;
2798 mdev
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
2799 /* we have no partitions. we contain only ourselves. */
2800 mdev
->this_bdev
->bd_contains
= mdev
->this_bdev
;
2802 q
->backing_dev_info
.congested_fn
= drbd_congested
;
2803 q
->backing_dev_info
.congested_data
= mdev
;
2805 blk_queue_make_request(q
, drbd_make_request
);
2806 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2807 This triggers a max_bio_size message upon first attach or connect */
2808 blk_queue_max_hw_sectors(q
, DRBD_MAX_BIO_SIZE_SAFE
>> 8);
2809 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
2810 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
2811 q
->queue_lock
= &mdev
->tconn
->req_lock
; /* needed since we use */
2813 mdev
->md_io_page
= alloc_page(GFP_KERNEL
);
2814 if (!mdev
->md_io_page
)
2815 goto out_no_io_page
;
2817 if (drbd_bm_init(mdev
))
2819 mdev
->read_requests
= RB_ROOT
;
2820 mdev
->write_requests
= RB_ROOT
;
2822 if (!idr_pre_get(&minors
, GFP_KERNEL
))
2823 goto out_no_minor_idr
;
2824 if (idr_get_new_above(&minors
, mdev
, minor
, &minor_got
))
2825 goto out_no_minor_idr
;
2826 if (minor_got
!= minor
) {
2827 err
= ERR_MINOR_EXISTS
;
2828 drbd_msg_put_info("requested minor exists already");
2829 goto out_idr_remove_minor
;
2832 if (!idr_pre_get(&tconn
->volumes
, GFP_KERNEL
))
2833 goto out_idr_remove_minor
;
2834 if (idr_get_new_above(&tconn
->volumes
, mdev
, vnr
, &vnr_got
))
2835 goto out_idr_remove_minor
;
2836 if (vnr_got
!= vnr
) {
2837 err
= ERR_INVALID_REQUEST
;
2838 drbd_msg_put_info("requested volume exists already");
2839 goto out_idr_remove_vol
;
2842 kref_init(&mdev
->kref
); /* one ref for both idrs and the the add_disk */
2844 /* inherit the connection state */
2845 mdev
->state
.conn
= tconn
->cstate
;
2846 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2847 drbd_connected(mdev
);
2852 idr_remove(&tconn
->volumes
, vnr_got
);
2853 out_idr_remove_minor
:
2854 idr_remove(&minors
, minor_got
);
2857 drbd_bm_cleanup(mdev
);
2859 __free_page(mdev
->md_io_page
);
2863 blk_cleanup_queue(q
);
2866 kref_put(&tconn
->kref
, &conn_destroy
);
2870 int __init
drbd_init(void)
2874 if (minor_count
< DRBD_MINOR_COUNT_MIN
|| minor_count
> DRBD_MINOR_COUNT_MAX
) {
2876 "drbd: invalid minor_count (%d)\n", minor_count
);
2880 minor_count
= DRBD_MINOR_COUNT_DEF
;
2884 err
= register_blkdev(DRBD_MAJOR
, "drbd");
2887 "drbd: unable to register block device major %d\n",
2892 err
= drbd_genl_register();
2894 printk(KERN_ERR
"drbd: unable to register generic netlink family\n");
2899 register_reboot_notifier(&drbd_notifier
);
2902 * allocate all necessary structs
2906 init_waitqueue_head(&drbd_pp_wait
);
2908 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
2911 err
= drbd_create_mempools();
2915 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
2917 printk(KERN_ERR
"drbd: unable to register proc file\n");
2921 rwlock_init(&global_state_lock
);
2922 INIT_LIST_HEAD(&drbd_tconns
);
2924 retry
.wq
= create_singlethread_workqueue("drbd-reissue");
2926 printk(KERN_ERR
"drbd: unable to create retry workqueue\n");
2929 INIT_WORK(&retry
.worker
, do_retry
);
2930 spin_lock_init(&retry
.lock
);
2931 INIT_LIST_HEAD(&retry
.writes
);
2933 printk(KERN_INFO
"drbd: initialized. "
2934 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
2935 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
2936 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
2937 printk(KERN_INFO
"drbd: registered as block device major %d\n",
2940 return 0; /* Success! */
2945 /* currently always the case */
2946 printk(KERN_ERR
"drbd: ran out of memory\n");
2948 printk(KERN_ERR
"drbd: initialization failure\n");
2952 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
2957 blkdev_put(ldev
->backing_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2958 blkdev_put(ldev
->md_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2963 void drbd_free_sock(struct drbd_tconn
*tconn
)
2965 if (tconn
->data
.socket
) {
2966 mutex_lock(&tconn
->data
.mutex
);
2967 kernel_sock_shutdown(tconn
->data
.socket
, SHUT_RDWR
);
2968 sock_release(tconn
->data
.socket
);
2969 tconn
->data
.socket
= NULL
;
2970 mutex_unlock(&tconn
->data
.mutex
);
2972 if (tconn
->meta
.socket
) {
2973 mutex_lock(&tconn
->meta
.mutex
);
2974 kernel_sock_shutdown(tconn
->meta
.socket
, SHUT_RDWR
);
2975 sock_release(tconn
->meta
.socket
);
2976 tconn
->meta
.socket
= NULL
;
2977 mutex_unlock(&tconn
->meta
.mutex
);
2981 /* meta data management */
2983 struct meta_data_on_disk
{
2984 u64 la_size
; /* last agreed size. */
2985 u64 uuid
[UI_SIZE
]; /* UUIDs. */
2988 u32 flags
; /* MDF */
2991 u32 al_offset
; /* offset to this block */
2992 u32 al_nr_extents
; /* important for restoring the AL */
2993 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2994 u32 bm_offset
; /* offset to the bitmap, from here */
2995 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
2996 u32 la_peer_max_bio_size
; /* last peer max_bio_size */
2997 u32 reserved_u32
[3];
3002 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3003 * @mdev: DRBD device.
3005 void drbd_md_sync(struct drbd_conf
*mdev
)
3007 struct meta_data_on_disk
*buffer
;
3011 del_timer(&mdev
->md_sync_timer
);
3012 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3013 if (!test_and_clear_bit(MD_DIRTY
, &mdev
->flags
))
3016 /* We use here D_FAILED and not D_ATTACHING because we try to write
3017 * metadata even if we detach due to a disk failure! */
3018 if (!get_ldev_if_state(mdev
, D_FAILED
))
3021 buffer
= drbd_md_get_buffer(mdev
);
3025 memset(buffer
, 0, 512);
3027 buffer
->la_size
= cpu_to_be64(drbd_get_capacity(mdev
->this_bdev
));
3028 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3029 buffer
->uuid
[i
] = cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]);
3030 buffer
->flags
= cpu_to_be32(mdev
->ldev
->md
.flags
);
3031 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN
);
3033 buffer
->md_size_sect
= cpu_to_be32(mdev
->ldev
->md
.md_size_sect
);
3034 buffer
->al_offset
= cpu_to_be32(mdev
->ldev
->md
.al_offset
);
3035 buffer
->al_nr_extents
= cpu_to_be32(mdev
->act_log
->nr_elements
);
3036 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3037 buffer
->device_uuid
= cpu_to_be64(mdev
->ldev
->md
.device_uuid
);
3039 buffer
->bm_offset
= cpu_to_be32(mdev
->ldev
->md
.bm_offset
);
3040 buffer
->la_peer_max_bio_size
= cpu_to_be32(mdev
->peer_max_bio_size
);
3042 D_ASSERT(drbd_md_ss__(mdev
, mdev
->ldev
) == mdev
->ldev
->md
.md_offset
);
3043 sector
= mdev
->ldev
->md
.md_offset
;
3045 if (drbd_md_sync_page_io(mdev
, mdev
->ldev
, sector
, WRITE
)) {
3046 /* this was a try anyways ... */
3047 dev_err(DEV
, "meta data update failed!\n");
3048 drbd_chk_io_error(mdev
, 1, true);
3051 /* Update mdev->ldev->md.la_size_sect,
3052 * since we updated it on metadata. */
3053 mdev
->ldev
->md
.la_size_sect
= drbd_get_capacity(mdev
->this_bdev
);
3055 drbd_md_put_buffer(mdev
);
3061 * drbd_md_read() - Reads in the meta data super block
3062 * @mdev: DRBD device.
3063 * @bdev: Device from which the meta data should be read in.
3065 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3066 * something goes wrong.
3068 int drbd_md_read(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
)
3070 struct meta_data_on_disk
*buffer
;
3072 int i
, rv
= NO_ERROR
;
3074 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
3075 return ERR_IO_MD_DISK
;
3077 buffer
= drbd_md_get_buffer(mdev
);
3081 if (drbd_md_sync_page_io(mdev
, bdev
, bdev
->md
.md_offset
, READ
)) {
3082 /* NOTE: can't do normal error processing here as this is
3083 called BEFORE disk is attached */
3084 dev_err(DEV
, "Error while reading metadata.\n");
3085 rv
= ERR_IO_MD_DISK
;
3089 magic
= be32_to_cpu(buffer
->magic
);
3090 flags
= be32_to_cpu(buffer
->flags
);
3091 if (magic
== DRBD_MD_MAGIC_84_UNCLEAN
||
3092 (magic
== DRBD_MD_MAGIC_08
&& !(flags
& MDF_AL_CLEAN
))) {
3093 /* btw: that's Activity Log clean, not "all" clean. */
3094 dev_err(DEV
, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3095 rv
= ERR_MD_UNCLEAN
;
3098 if (magic
!= DRBD_MD_MAGIC_08
) {
3099 if (magic
== DRBD_MD_MAGIC_07
)
3100 dev_err(DEV
, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3102 dev_err(DEV
, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3103 rv
= ERR_MD_INVALID
;
3106 if (be32_to_cpu(buffer
->al_offset
) != bdev
->md
.al_offset
) {
3107 dev_err(DEV
, "unexpected al_offset: %d (expected %d)\n",
3108 be32_to_cpu(buffer
->al_offset
), bdev
->md
.al_offset
);
3109 rv
= ERR_MD_INVALID
;
3112 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3113 dev_err(DEV
, "unexpected bm_offset: %d (expected %d)\n",
3114 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3115 rv
= ERR_MD_INVALID
;
3118 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3119 dev_err(DEV
, "unexpected md_size: %u (expected %u)\n",
3120 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3121 rv
= ERR_MD_INVALID
;
3125 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3126 dev_err(DEV
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3127 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3128 rv
= ERR_MD_INVALID
;
3132 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size
);
3133 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3134 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3135 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3136 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3138 spin_lock_irq(&mdev
->tconn
->req_lock
);
3139 if (mdev
->state
.conn
< C_CONNECTED
) {
3141 peer
= be32_to_cpu(buffer
->la_peer_max_bio_size
);
3142 peer
= max_t(int, peer
, DRBD_MAX_BIO_SIZE_SAFE
);
3143 mdev
->peer_max_bio_size
= peer
;
3145 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3148 drbd_md_put_buffer(mdev
);
3156 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3157 * @mdev: DRBD device.
3159 * Call this function if you change anything that should be written to
3160 * the meta-data super block. This function sets MD_DIRTY, and starts a
3161 * timer that ensures that within five seconds you have to call drbd_md_sync().
3164 void drbd_md_mark_dirty_(struct drbd_conf
*mdev
, unsigned int line
, const char *func
)
3166 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
)) {
3167 mod_timer(&mdev
->md_sync_timer
, jiffies
+ HZ
);
3168 mdev
->last_md_mark_dirty
.line
= line
;
3169 mdev
->last_md_mark_dirty
.func
= func
;
3173 void drbd_md_mark_dirty(struct drbd_conf
*mdev
)
3175 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
))
3176 mod_timer(&mdev
->md_sync_timer
, jiffies
+ 5*HZ
);
3180 static void drbd_uuid_move_history(struct drbd_conf
*mdev
) __must_hold(local
)
3184 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++)
3185 mdev
->ldev
->md
.uuid
[i
+1] = mdev
->ldev
->md
.uuid
[i
];
3188 void _drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3190 if (idx
== UI_CURRENT
) {
3191 if (mdev
->state
.role
== R_PRIMARY
)
3196 drbd_set_ed_uuid(mdev
, val
);
3199 mdev
->ldev
->md
.uuid
[idx
] = val
;
3200 drbd_md_mark_dirty(mdev
);
3204 void drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3206 if (mdev
->ldev
->md
.uuid
[idx
]) {
3207 drbd_uuid_move_history(mdev
);
3208 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[idx
];
3210 _drbd_uuid_set(mdev
, idx
, val
);
3214 * drbd_uuid_new_current() - Creates a new current UUID
3215 * @mdev: DRBD device.
3217 * Creates a new current UUID, and rotates the old current UUID into
3218 * the bitmap slot. Causes an incremental resync upon next connect.
3220 void drbd_uuid_new_current(struct drbd_conf
*mdev
) __must_hold(local
)
3223 unsigned long long bm_uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3226 dev_warn(DEV
, "bm UUID was already set: %llX\n", bm_uuid
);
3228 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = mdev
->ldev
->md
.uuid
[UI_CURRENT
];
3230 get_random_bytes(&val
, sizeof(u64
));
3231 _drbd_uuid_set(mdev
, UI_CURRENT
, val
);
3232 drbd_print_uuids(mdev
, "new current UUID");
3233 /* get it to stable storage _now_ */
3237 void drbd_uuid_set_bm(struct drbd_conf
*mdev
, u64 val
) __must_hold(local
)
3239 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3243 drbd_uuid_move_history(mdev
);
3244 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3245 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3247 unsigned long long bm_uuid
= mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3249 dev_warn(DEV
, "bm UUID was already set: %llX\n", bm_uuid
);
3251 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = val
& ~((u64
)1);
3253 drbd_md_mark_dirty(mdev
);
3257 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3258 * @mdev: DRBD device.
3260 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3262 int drbd_bmio_set_n_write(struct drbd_conf
*mdev
)
3266 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3267 drbd_md_set_flag(mdev
, MDF_FULL_SYNC
);
3269 drbd_bm_set_all(mdev
);
3271 rv
= drbd_bm_write(mdev
);
3274 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
3285 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3286 * @mdev: DRBD device.
3288 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3290 int drbd_bmio_clear_n_write(struct drbd_conf
*mdev
)
3294 drbd_resume_al(mdev
);
3295 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3296 drbd_bm_clear_all(mdev
);
3297 rv
= drbd_bm_write(mdev
);
3304 static int w_bitmap_io(struct drbd_work
*w
, int unused
)
3306 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3307 struct drbd_conf
*mdev
= w
->mdev
;
3310 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
3312 if (get_ldev(mdev
)) {
3313 drbd_bm_lock(mdev
, work
->why
, work
->flags
);
3314 rv
= work
->io_fn(mdev
);
3315 drbd_bm_unlock(mdev
);
3319 clear_bit_unlock(BITMAP_IO
, &mdev
->flags
);
3320 wake_up(&mdev
->misc_wait
);
3323 work
->done(mdev
, rv
);
3325 clear_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3332 void drbd_ldev_destroy(struct drbd_conf
*mdev
)
3334 lc_destroy(mdev
->resync
);
3335 mdev
->resync
= NULL
;
3336 lc_destroy(mdev
->act_log
);
3337 mdev
->act_log
= NULL
;
3339 drbd_free_bc(mdev
->ldev
);
3340 mdev
->ldev
= NULL
;);
3342 clear_bit(GO_DISKLESS
, &mdev
->flags
);
3345 static int w_go_diskless(struct drbd_work
*w
, int unused
)
3347 struct drbd_conf
*mdev
= w
->mdev
;
3349 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3350 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3351 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3352 * the protected members anymore, though, so once put_ldev reaches zero
3353 * again, it will be safe to free them. */
3354 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
3358 void drbd_go_diskless(struct drbd_conf
*mdev
)
3360 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3361 if (!test_and_set_bit(GO_DISKLESS
, &mdev
->flags
))
3362 drbd_queue_work(&mdev
->tconn
->data
.work
, &mdev
->go_diskless
);
3366 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3367 * @mdev: DRBD device.
3368 * @io_fn: IO callback to be called when bitmap IO is possible
3369 * @done: callback to be called after the bitmap IO was performed
3370 * @why: Descriptive text of the reason for doing the IO
3372 * While IO on the bitmap happens we freeze application IO thus we ensure
3373 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3374 * called from worker context. It MUST NOT be used while a previous such
3375 * work is still pending!
3377 void drbd_queue_bitmap_io(struct drbd_conf
*mdev
,
3378 int (*io_fn
)(struct drbd_conf
*),
3379 void (*done
)(struct drbd_conf
*, int),
3380 char *why
, enum bm_flag flags
)
3382 D_ASSERT(current
== mdev
->tconn
->worker
.task
);
3384 D_ASSERT(!test_bit(BITMAP_IO_QUEUED
, &mdev
->flags
));
3385 D_ASSERT(!test_bit(BITMAP_IO
, &mdev
->flags
));
3386 D_ASSERT(list_empty(&mdev
->bm_io_work
.w
.list
));
3387 if (mdev
->bm_io_work
.why
)
3388 dev_err(DEV
, "FIXME going to queue '%s' but '%s' still pending?\n",
3389 why
, mdev
->bm_io_work
.why
);
3391 mdev
->bm_io_work
.io_fn
= io_fn
;
3392 mdev
->bm_io_work
.done
= done
;
3393 mdev
->bm_io_work
.why
= why
;
3394 mdev
->bm_io_work
.flags
= flags
;
3396 spin_lock_irq(&mdev
->tconn
->req_lock
);
3397 set_bit(BITMAP_IO
, &mdev
->flags
);
3398 if (atomic_read(&mdev
->ap_bio_cnt
) == 0) {
3399 if (!test_and_set_bit(BITMAP_IO_QUEUED
, &mdev
->flags
))
3400 drbd_queue_work(&mdev
->tconn
->data
.work
, &mdev
->bm_io_work
.w
);
3402 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3406 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3407 * @mdev: DRBD device.
3408 * @io_fn: IO callback to be called when bitmap IO is possible
3409 * @why: Descriptive text of the reason for doing the IO
3411 * freezes application IO while that the actual IO operations runs. This
3412 * functions MAY NOT be called from worker context.
3414 int drbd_bitmap_io(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*),
3415 char *why
, enum bm_flag flags
)
3419 D_ASSERT(current
!= mdev
->tconn
->worker
.task
);
3421 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3422 drbd_suspend_io(mdev
);
3424 drbd_bm_lock(mdev
, why
, flags
);
3426 drbd_bm_unlock(mdev
);
3428 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3429 drbd_resume_io(mdev
);
3434 void drbd_md_set_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3436 if ((mdev
->ldev
->md
.flags
& flag
) != flag
) {
3437 drbd_md_mark_dirty(mdev
);
3438 mdev
->ldev
->md
.flags
|= flag
;
3442 void drbd_md_clear_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3444 if ((mdev
->ldev
->md
.flags
& flag
) != 0) {
3445 drbd_md_mark_dirty(mdev
);
3446 mdev
->ldev
->md
.flags
&= ~flag
;
3449 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3451 return (bdev
->md
.flags
& flag
) != 0;
3454 static void md_sync_timer_fn(unsigned long data
)
3456 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
3458 drbd_queue_work_front(&mdev
->tconn
->data
.work
, &mdev
->md_sync_work
);
3461 static int w_md_sync(struct drbd_work
*w
, int unused
)
3463 struct drbd_conf
*mdev
= w
->mdev
;
3465 dev_warn(DEV
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3467 dev_warn(DEV
, "last md_mark_dirty: %s:%u\n",
3468 mdev
->last_md_mark_dirty
.func
, mdev
->last_md_mark_dirty
.line
);
3474 const char *cmdname(enum drbd_packet cmd
)
3476 /* THINK may need to become several global tables
3477 * when we want to support more than
3478 * one PRO_VERSION */
3479 static const char *cmdnames
[] = {
3481 [P_DATA_REPLY
] = "DataReply",
3482 [P_RS_DATA_REPLY
] = "RSDataReply",
3483 [P_BARRIER
] = "Barrier",
3484 [P_BITMAP
] = "ReportBitMap",
3485 [P_BECOME_SYNC_TARGET
] = "BecomeSyncTarget",
3486 [P_BECOME_SYNC_SOURCE
] = "BecomeSyncSource",
3487 [P_UNPLUG_REMOTE
] = "UnplugRemote",
3488 [P_DATA_REQUEST
] = "DataRequest",
3489 [P_RS_DATA_REQUEST
] = "RSDataRequest",
3490 [P_SYNC_PARAM
] = "SyncParam",
3491 [P_SYNC_PARAM89
] = "SyncParam89",
3492 [P_PROTOCOL
] = "ReportProtocol",
3493 [P_UUIDS
] = "ReportUUIDs",
3494 [P_SIZES
] = "ReportSizes",
3495 [P_STATE
] = "ReportState",
3496 [P_SYNC_UUID
] = "ReportSyncUUID",
3497 [P_AUTH_CHALLENGE
] = "AuthChallenge",
3498 [P_AUTH_RESPONSE
] = "AuthResponse",
3500 [P_PING_ACK
] = "PingAck",
3501 [P_RECV_ACK
] = "RecvAck",
3502 [P_WRITE_ACK
] = "WriteAck",
3503 [P_RS_WRITE_ACK
] = "RSWriteAck",
3504 [P_DISCARD_WRITE
] = "DiscardWrite",
3505 [P_NEG_ACK
] = "NegAck",
3506 [P_NEG_DREPLY
] = "NegDReply",
3507 [P_NEG_RS_DREPLY
] = "NegRSDReply",
3508 [P_BARRIER_ACK
] = "BarrierAck",
3509 [P_STATE_CHG_REQ
] = "StateChgRequest",
3510 [P_STATE_CHG_REPLY
] = "StateChgReply",
3511 [P_OV_REQUEST
] = "OVRequest",
3512 [P_OV_REPLY
] = "OVReply",
3513 [P_OV_RESULT
] = "OVResult",
3514 [P_CSUM_RS_REQUEST
] = "CsumRSRequest",
3515 [P_RS_IS_IN_SYNC
] = "CsumRSIsInSync",
3516 [P_COMPRESSED_BITMAP
] = "CBitmap",
3517 [P_DELAY_PROBE
] = "DelayProbe",
3518 [P_OUT_OF_SYNC
] = "OutOfSync",
3519 [P_RETRY_WRITE
] = "RetryWrite",
3520 [P_RS_CANCEL
] = "RSCancel",
3521 [P_CONN_ST_CHG_REQ
] = "conn_st_chg_req",
3522 [P_CONN_ST_CHG_REPLY
] = "conn_st_chg_reply",
3523 [P_RETRY_WRITE
] = "retry_write",
3524 [P_PROTOCOL_UPDATE
] = "protocol_update",
3526 /* enum drbd_packet, but not commands - obsoleted flags:
3532 /* too big for the array: 0xfffX */
3533 if (cmd
== P_INITIAL_META
)
3534 return "InitialMeta";
3535 if (cmd
== P_INITIAL_DATA
)
3536 return "InitialData";
3537 if (cmd
== P_CONNECTION_FEATURES
)
3538 return "ConnectionFeatures";
3539 if (cmd
>= ARRAY_SIZE(cmdnames
))
3541 return cmdnames
[cmd
];
3545 * drbd_wait_misc - wait for a request to make progress
3546 * @mdev: device associated with the request
3547 * @i: the struct drbd_interval embedded in struct drbd_request or
3548 * struct drbd_peer_request
3550 int drbd_wait_misc(struct drbd_conf
*mdev
, struct drbd_interval
*i
)
3552 struct net_conf
*nc
;
3557 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
3562 timeout
= nc
->ko_count
? nc
->timeout
* HZ
/ 10 * nc
->ko_count
: MAX_SCHEDULE_TIMEOUT
;
3565 /* Indicate to wake up mdev->misc_wait on progress. */
3567 prepare_to_wait(&mdev
->misc_wait
, &wait
, TASK_INTERRUPTIBLE
);
3568 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3569 timeout
= schedule_timeout(timeout
);
3570 finish_wait(&mdev
->misc_wait
, &wait
);
3571 spin_lock_irq(&mdev
->tconn
->req_lock
);
3572 if (!timeout
|| mdev
->state
.conn
< C_CONNECTED
)
3574 if (signal_pending(current
))
3575 return -ERESTARTSYS
;
3579 #ifdef CONFIG_DRBD_FAULT_INJECTION
3580 /* Fault insertion support including random number generator shamelessly
3581 * stolen from kernel/rcutorture.c */
3582 struct fault_random_state
{
3583 unsigned long state
;
3584 unsigned long count
;
3587 #define FAULT_RANDOM_MULT 39916801 /* prime */
3588 #define FAULT_RANDOM_ADD 479001701 /* prime */
3589 #define FAULT_RANDOM_REFRESH 10000
3592 * Crude but fast random-number generator. Uses a linear congruential
3593 * generator, with occasional help from get_random_bytes().
3595 static unsigned long
3596 _drbd_fault_random(struct fault_random_state
*rsp
)
3600 if (!rsp
->count
--) {
3601 get_random_bytes(&refresh
, sizeof(refresh
));
3602 rsp
->state
+= refresh
;
3603 rsp
->count
= FAULT_RANDOM_REFRESH
;
3605 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3606 return swahw32(rsp
->state
);
3610 _drbd_fault_str(unsigned int type
) {
3611 static char *_faults
[] = {
3612 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3613 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3614 [DRBD_FAULT_RS_WR
] = "Resync write",
3615 [DRBD_FAULT_RS_RD
] = "Resync read",
3616 [DRBD_FAULT_DT_WR
] = "Data write",
3617 [DRBD_FAULT_DT_RD
] = "Data read",
3618 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3619 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3620 [DRBD_FAULT_AL_EE
] = "EE allocation",
3621 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3624 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3628 _drbd_insert_fault(struct drbd_conf
*mdev
, unsigned int type
)
3630 static struct fault_random_state rrs
= {0, 0};
3632 unsigned int ret
= (
3634 ((1 << mdev_to_minor(mdev
)) & fault_devs
) != 0) &&
3635 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
3640 if (__ratelimit(&drbd_ratelimit_state
))
3641 dev_warn(DEV
, "***Simulating %s failure\n",
3642 _drbd_fault_str(type
));
3649 const char *drbd_buildtag(void)
3651 /* DRBD built from external sources has here a reference to the
3652 git hash of the source code. */
3654 static char buildtag
[38] = "\0uilt-in";
3656 if (buildtag
[0] == 0) {
3657 #ifdef CONFIG_MODULES
3658 if (THIS_MODULE
!= NULL
)
3659 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3668 module_init(drbd_init
)
3669 module_exit(drbd_cleanup
)
3671 EXPORT_SYMBOL(drbd_conn_str
);
3672 EXPORT_SYMBOL(drbd_role_str
);
3673 EXPORT_SYMBOL(drbd_disk_str
);
3674 EXPORT_SYMBOL(drbd_set_st_err_str
);