]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/drbd/drbd_main.c
drbd: Rename enum drbd_state_ret_codes to enum drbd_state_rv
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_main.c
1 /*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
89 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
90
91 #include <linux/moduleparam.h>
92 /* allow_open_on_secondary */
93 MODULE_PARM_DESC(allow_oos, "DONT USE!");
94 /* thanks to these macros, if compiled into the kernel (not-module),
95 * this becomes the boot parameter drbd.minor_count */
96 module_param(minor_count, uint, 0444);
97 module_param(disable_sendpage, bool, 0644);
98 module_param(allow_oos, bool, 0);
99 module_param(cn_idx, uint, 0444);
100 module_param(proc_details, int, 0644);
101
102 #ifdef CONFIG_DRBD_FAULT_INJECTION
103 int enable_faults;
104 int fault_rate;
105 static int fault_count;
106 int fault_devs;
107 /* bitmap of enabled faults */
108 module_param(enable_faults, int, 0664);
109 /* fault rate % value - applies to all enabled faults */
110 module_param(fault_rate, int, 0664);
111 /* count of faults inserted */
112 module_param(fault_count, int, 0664);
113 /* bitmap of devices to insert faults on */
114 module_param(fault_devs, int, 0644);
115 #endif
116
117 /* module parameter, defined */
118 unsigned int minor_count = 32;
119 int disable_sendpage;
120 int allow_oos;
121 unsigned int cn_idx = CN_IDX_DRBD;
122 int proc_details; /* Detail level in proc drbd*/
123
124 /* Module parameter for setting the user mode helper program
125 * to run. Default is /sbin/drbdadm */
126 char usermode_helper[80] = "/sbin/drbdadm";
127
128 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
129
130 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
131 * as member "struct gendisk *vdisk;"
132 */
133 struct drbd_conf **minor_table;
134
135 struct kmem_cache *drbd_request_cache;
136 struct kmem_cache *drbd_ee_cache; /* epoch entries */
137 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
138 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
139 mempool_t *drbd_request_mempool;
140 mempool_t *drbd_ee_mempool;
141
142 /* I do not use a standard mempool, because:
143 1) I want to hand out the pre-allocated objects first.
144 2) I want to be able to interrupt sleeping allocation with a signal.
145 Note: This is a single linked list, the next pointer is the private
146 member of struct page.
147 */
148 struct page *drbd_pp_pool;
149 spinlock_t drbd_pp_lock;
150 int drbd_pp_vacant;
151 wait_queue_head_t drbd_pp_wait;
152
153 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
154
155 static const struct block_device_operations drbd_ops = {
156 .owner = THIS_MODULE,
157 .open = drbd_open,
158 .release = drbd_release,
159 };
160
161 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
162
163 #ifdef __CHECKER__
164 /* When checking with sparse, and this is an inline function, sparse will
165 give tons of false positives. When this is a real functions sparse works.
166 */
167 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
168 {
169 int io_allowed;
170
171 atomic_inc(&mdev->local_cnt);
172 io_allowed = (mdev->state.disk >= mins);
173 if (!io_allowed) {
174 if (atomic_dec_and_test(&mdev->local_cnt))
175 wake_up(&mdev->misc_wait);
176 }
177 return io_allowed;
178 }
179
180 #endif
181
182 /**
183 * DOC: The transfer log
184 *
185 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
186 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
187 * of the list. There is always at least one &struct drbd_tl_epoch object.
188 *
189 * Each &struct drbd_tl_epoch has a circular double linked list of requests
190 * attached.
191 */
192 static int tl_init(struct drbd_conf *mdev)
193 {
194 struct drbd_tl_epoch *b;
195
196 /* during device minor initialization, we may well use GFP_KERNEL */
197 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
198 if (!b)
199 return 0;
200 INIT_LIST_HEAD(&b->requests);
201 INIT_LIST_HEAD(&b->w.list);
202 b->next = NULL;
203 b->br_number = 4711;
204 b->n_writes = 0;
205 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
206
207 mdev->oldest_tle = b;
208 mdev->newest_tle = b;
209 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
210
211 mdev->tl_hash = NULL;
212 mdev->tl_hash_s = 0;
213
214 return 1;
215 }
216
217 static void tl_cleanup(struct drbd_conf *mdev)
218 {
219 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
220 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
221 kfree(mdev->oldest_tle);
222 mdev->oldest_tle = NULL;
223 kfree(mdev->unused_spare_tle);
224 mdev->unused_spare_tle = NULL;
225 kfree(mdev->tl_hash);
226 mdev->tl_hash = NULL;
227 mdev->tl_hash_s = 0;
228 }
229
230 /**
231 * _tl_add_barrier() - Adds a barrier to the transfer log
232 * @mdev: DRBD device.
233 * @new: Barrier to be added before the current head of the TL.
234 *
235 * The caller must hold the req_lock.
236 */
237 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
238 {
239 struct drbd_tl_epoch *newest_before;
240
241 INIT_LIST_HEAD(&new->requests);
242 INIT_LIST_HEAD(&new->w.list);
243 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
244 new->next = NULL;
245 new->n_writes = 0;
246
247 newest_before = mdev->newest_tle;
248 /* never send a barrier number == 0, because that is special-cased
249 * when using TCQ for our write ordering code */
250 new->br_number = (newest_before->br_number+1) ?: 1;
251 if (mdev->newest_tle != new) {
252 mdev->newest_tle->next = new;
253 mdev->newest_tle = new;
254 }
255 }
256
257 /**
258 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
259 * @mdev: DRBD device.
260 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
261 * @set_size: Expected number of requests before that barrier.
262 *
263 * In case the passed barrier_nr or set_size does not match the oldest
264 * &struct drbd_tl_epoch objects this function will cause a termination
265 * of the connection.
266 */
267 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
268 unsigned int set_size)
269 {
270 struct drbd_tl_epoch *b, *nob; /* next old barrier */
271 struct list_head *le, *tle;
272 struct drbd_request *r;
273
274 spin_lock_irq(&mdev->req_lock);
275
276 b = mdev->oldest_tle;
277
278 /* first some paranoia code */
279 if (b == NULL) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
281 barrier_nr);
282 goto bail;
283 }
284 if (b->br_number != barrier_nr) {
285 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
286 barrier_nr, b->br_number);
287 goto bail;
288 }
289 if (b->n_writes != set_size) {
290 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
291 barrier_nr, set_size, b->n_writes);
292 goto bail;
293 }
294
295 /* Clean up list of requests processed during current epoch */
296 list_for_each_safe(le, tle, &b->requests) {
297 r = list_entry(le, struct drbd_request, tl_requests);
298 _req_mod(r, barrier_acked);
299 }
300 /* There could be requests on the list waiting for completion
301 of the write to the local disk. To avoid corruptions of
302 slab's data structures we have to remove the lists head.
303
304 Also there could have been a barrier ack out of sequence, overtaking
305 the write acks - which would be a bug and violating write ordering.
306 To not deadlock in case we lose connection while such requests are
307 still pending, we need some way to find them for the
308 _req_mode(connection_lost_while_pending).
309
310 These have been list_move'd to the out_of_sequence_requests list in
311 _req_mod(, barrier_acked) above.
312 */
313 list_del_init(&b->requests);
314
315 nob = b->next;
316 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
317 _tl_add_barrier(mdev, b);
318 if (nob)
319 mdev->oldest_tle = nob;
320 /* if nob == NULL b was the only barrier, and becomes the new
321 barrier. Therefore mdev->oldest_tle points already to b */
322 } else {
323 D_ASSERT(nob != NULL);
324 mdev->oldest_tle = nob;
325 kfree(b);
326 }
327
328 spin_unlock_irq(&mdev->req_lock);
329 dec_ap_pending(mdev);
330
331 return;
332
333 bail:
334 spin_unlock_irq(&mdev->req_lock);
335 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
336 }
337
338 /**
339 * _tl_restart() - Walks the transfer log, and applies an action to all requests
340 * @mdev: DRBD device.
341 * @what: The action/event to perform with all request objects
342 *
343 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
344 * restart_frozen_disk_io.
345 */
346 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
347 {
348 struct drbd_tl_epoch *b, *tmp, **pn;
349 struct list_head *le, *tle, carry_reads;
350 struct drbd_request *req;
351 int rv, n_writes, n_reads;
352
353 b = mdev->oldest_tle;
354 pn = &mdev->oldest_tle;
355 while (b) {
356 n_writes = 0;
357 n_reads = 0;
358 INIT_LIST_HEAD(&carry_reads);
359 list_for_each_safe(le, tle, &b->requests) {
360 req = list_entry(le, struct drbd_request, tl_requests);
361 rv = _req_mod(req, what);
362
363 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
364 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
365 }
366 tmp = b->next;
367
368 if (n_writes) {
369 if (what == resend) {
370 b->n_writes = n_writes;
371 if (b->w.cb == NULL) {
372 b->w.cb = w_send_barrier;
373 inc_ap_pending(mdev);
374 set_bit(CREATE_BARRIER, &mdev->flags);
375 }
376
377 drbd_queue_work(&mdev->data.work, &b->w);
378 }
379 pn = &b->next;
380 } else {
381 if (n_reads)
382 list_add(&carry_reads, &b->requests);
383 /* there could still be requests on that ring list,
384 * in case local io is still pending */
385 list_del(&b->requests);
386
387 /* dec_ap_pending corresponding to queue_barrier.
388 * the newest barrier may not have been queued yet,
389 * in which case w.cb is still NULL. */
390 if (b->w.cb != NULL)
391 dec_ap_pending(mdev);
392
393 if (b == mdev->newest_tle) {
394 /* recycle, but reinit! */
395 D_ASSERT(tmp == NULL);
396 INIT_LIST_HEAD(&b->requests);
397 list_splice(&carry_reads, &b->requests);
398 INIT_LIST_HEAD(&b->w.list);
399 b->w.cb = NULL;
400 b->br_number = net_random();
401 b->n_writes = 0;
402
403 *pn = b;
404 break;
405 }
406 *pn = tmp;
407 kfree(b);
408 }
409 b = tmp;
410 list_splice(&carry_reads, &b->requests);
411 }
412 }
413
414
415 /**
416 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
417 * @mdev: DRBD device.
418 *
419 * This is called after the connection to the peer was lost. The storage covered
420 * by the requests on the transfer gets marked as our of sync. Called from the
421 * receiver thread and the worker thread.
422 */
423 void tl_clear(struct drbd_conf *mdev)
424 {
425 struct list_head *le, *tle;
426 struct drbd_request *r;
427
428 spin_lock_irq(&mdev->req_lock);
429
430 _tl_restart(mdev, connection_lost_while_pending);
431
432 /* we expect this list to be empty. */
433 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
434
435 /* but just in case, clean it up anyways! */
436 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
437 r = list_entry(le, struct drbd_request, tl_requests);
438 /* It would be nice to complete outside of spinlock.
439 * But this is easier for now. */
440 _req_mod(r, connection_lost_while_pending);
441 }
442
443 /* ensure bit indicating barrier is required is clear */
444 clear_bit(CREATE_BARRIER, &mdev->flags);
445
446 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
447
448 spin_unlock_irq(&mdev->req_lock);
449 }
450
451 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
452 {
453 spin_lock_irq(&mdev->req_lock);
454 _tl_restart(mdev, what);
455 spin_unlock_irq(&mdev->req_lock);
456 }
457
458 /**
459 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
460 * @mdev: DRBD device.
461 * @os: old (current) state.
462 * @ns: new (wanted) state.
463 */
464 static int cl_wide_st_chg(struct drbd_conf *mdev,
465 union drbd_state os, union drbd_state ns)
466 {
467 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
468 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
469 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
470 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
471 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
472 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
473 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
474 }
475
476 int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
477 union drbd_state mask, union drbd_state val)
478 {
479 unsigned long flags;
480 union drbd_state os, ns;
481 int rv;
482
483 spin_lock_irqsave(&mdev->req_lock, flags);
484 os = mdev->state;
485 ns.i = (os.i & ~mask.i) | val.i;
486 rv = _drbd_set_state(mdev, ns, f, NULL);
487 ns = mdev->state;
488 spin_unlock_irqrestore(&mdev->req_lock, flags);
489
490 return rv;
491 }
492
493 /**
494 * drbd_force_state() - Impose a change which happens outside our control on our state
495 * @mdev: DRBD device.
496 * @mask: mask of state bits to change.
497 * @val: value of new state bits.
498 */
499 void drbd_force_state(struct drbd_conf *mdev,
500 union drbd_state mask, union drbd_state val)
501 {
502 drbd_change_state(mdev, CS_HARD, mask, val);
503 }
504
505 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
506 static int is_valid_state_transition(struct drbd_conf *,
507 union drbd_state, union drbd_state);
508 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
509 union drbd_state ns, const char **warn_sync_abort);
510 int drbd_send_state_req(struct drbd_conf *,
511 union drbd_state, union drbd_state);
512
513 static enum drbd_state_rv
514 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
515 union drbd_state val)
516 {
517 union drbd_state os, ns;
518 unsigned long flags;
519 int rv;
520
521 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
522 return SS_CW_SUCCESS;
523
524 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
525 return SS_CW_FAILED_BY_PEER;
526
527 rv = 0;
528 spin_lock_irqsave(&mdev->req_lock, flags);
529 os = mdev->state;
530 ns.i = (os.i & ~mask.i) | val.i;
531 ns = sanitize_state(mdev, os, ns, NULL);
532
533 if (!cl_wide_st_chg(mdev, os, ns))
534 rv = SS_CW_NO_NEED;
535 if (!rv) {
536 rv = is_valid_state(mdev, ns);
537 if (rv == SS_SUCCESS) {
538 rv = is_valid_state_transition(mdev, ns, os);
539 if (rv == SS_SUCCESS)
540 rv = 0; /* cont waiting, otherwise fail. */
541 }
542 }
543 spin_unlock_irqrestore(&mdev->req_lock, flags);
544
545 return rv;
546 }
547
548 /**
549 * drbd_req_state() - Perform an eventually cluster wide state change
550 * @mdev: DRBD device.
551 * @mask: mask of state bits to change.
552 * @val: value of new state bits.
553 * @f: flags
554 *
555 * Should not be called directly, use drbd_request_state() or
556 * _drbd_request_state().
557 */
558 static int drbd_req_state(struct drbd_conf *mdev,
559 union drbd_state mask, union drbd_state val,
560 enum chg_state_flags f)
561 {
562 struct completion done;
563 unsigned long flags;
564 union drbd_state os, ns;
565 int rv;
566
567 init_completion(&done);
568
569 if (f & CS_SERIALIZE)
570 mutex_lock(&mdev->state_mutex);
571
572 spin_lock_irqsave(&mdev->req_lock, flags);
573 os = mdev->state;
574 ns.i = (os.i & ~mask.i) | val.i;
575 ns = sanitize_state(mdev, os, ns, NULL);
576
577 if (cl_wide_st_chg(mdev, os, ns)) {
578 rv = is_valid_state(mdev, ns);
579 if (rv == SS_SUCCESS)
580 rv = is_valid_state_transition(mdev, ns, os);
581 spin_unlock_irqrestore(&mdev->req_lock, flags);
582
583 if (rv < SS_SUCCESS) {
584 if (f & CS_VERBOSE)
585 print_st_err(mdev, os, ns, rv);
586 goto abort;
587 }
588
589 drbd_state_lock(mdev);
590 if (!drbd_send_state_req(mdev, mask, val)) {
591 drbd_state_unlock(mdev);
592 rv = SS_CW_FAILED_BY_PEER;
593 if (f & CS_VERBOSE)
594 print_st_err(mdev, os, ns, rv);
595 goto abort;
596 }
597
598 wait_event(mdev->state_wait,
599 (rv = _req_st_cond(mdev, mask, val)));
600
601 if (rv < SS_SUCCESS) {
602 drbd_state_unlock(mdev);
603 if (f & CS_VERBOSE)
604 print_st_err(mdev, os, ns, rv);
605 goto abort;
606 }
607 spin_lock_irqsave(&mdev->req_lock, flags);
608 os = mdev->state;
609 ns.i = (os.i & ~mask.i) | val.i;
610 rv = _drbd_set_state(mdev, ns, f, &done);
611 drbd_state_unlock(mdev);
612 } else {
613 rv = _drbd_set_state(mdev, ns, f, &done);
614 }
615
616 spin_unlock_irqrestore(&mdev->req_lock, flags);
617
618 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
619 D_ASSERT(current != mdev->worker.task);
620 wait_for_completion(&done);
621 }
622
623 abort:
624 if (f & CS_SERIALIZE)
625 mutex_unlock(&mdev->state_mutex);
626
627 return rv;
628 }
629
630 /**
631 * _drbd_request_state() - Request a state change (with flags)
632 * @mdev: DRBD device.
633 * @mask: mask of state bits to change.
634 * @val: value of new state bits.
635 * @f: flags
636 *
637 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
638 * flag, or when logging of failed state change requests is not desired.
639 */
640 int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
641 union drbd_state val, enum chg_state_flags f)
642 {
643 int rv;
644
645 wait_event(mdev->state_wait,
646 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
647
648 return rv;
649 }
650
651 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
652 {
653 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
654 name,
655 drbd_conn_str(ns.conn),
656 drbd_role_str(ns.role),
657 drbd_role_str(ns.peer),
658 drbd_disk_str(ns.disk),
659 drbd_disk_str(ns.pdsk),
660 is_susp(ns) ? 's' : 'r',
661 ns.aftr_isp ? 'a' : '-',
662 ns.peer_isp ? 'p' : '-',
663 ns.user_isp ? 'u' : '-'
664 );
665 }
666
667 void print_st_err(struct drbd_conf *mdev,
668 union drbd_state os, union drbd_state ns, int err)
669 {
670 if (err == SS_IN_TRANSIENT_STATE)
671 return;
672 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
673 print_st(mdev, " state", os);
674 print_st(mdev, "wanted", ns);
675 }
676
677
678 /**
679 * is_valid_state() - Returns an SS_ error code if ns is not valid
680 * @mdev: DRBD device.
681 * @ns: State to consider.
682 */
683 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
684 {
685 /* See drbd_state_sw_errors in drbd_strings.c */
686
687 enum drbd_fencing_p fp;
688 int rv = SS_SUCCESS;
689
690 fp = FP_DONT_CARE;
691 if (get_ldev(mdev)) {
692 fp = mdev->ldev->dc.fencing;
693 put_ldev(mdev);
694 }
695
696 if (get_net_conf(mdev)) {
697 if (!mdev->net_conf->two_primaries &&
698 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
699 rv = SS_TWO_PRIMARIES;
700 put_net_conf(mdev);
701 }
702
703 if (rv <= 0)
704 /* already found a reason to abort */;
705 else if (ns.role == R_SECONDARY && mdev->open_cnt)
706 rv = SS_DEVICE_IN_USE;
707
708 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
709 rv = SS_NO_UP_TO_DATE_DISK;
710
711 else if (fp >= FP_RESOURCE &&
712 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
713 rv = SS_PRIMARY_NOP;
714
715 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
716 rv = SS_NO_UP_TO_DATE_DISK;
717
718 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
719 rv = SS_NO_LOCAL_DISK;
720
721 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
722 rv = SS_NO_REMOTE_DISK;
723
724 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
725 rv = SS_NO_UP_TO_DATE_DISK;
726
727 else if ((ns.conn == C_CONNECTED ||
728 ns.conn == C_WF_BITMAP_S ||
729 ns.conn == C_SYNC_SOURCE ||
730 ns.conn == C_PAUSED_SYNC_S) &&
731 ns.disk == D_OUTDATED)
732 rv = SS_CONNECTED_OUTDATES;
733
734 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
735 (mdev->sync_conf.verify_alg[0] == 0))
736 rv = SS_NO_VERIFY_ALG;
737
738 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
739 mdev->agreed_pro_version < 88)
740 rv = SS_NOT_SUPPORTED;
741
742 return rv;
743 }
744
745 /**
746 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
747 * @mdev: DRBD device.
748 * @ns: new state.
749 * @os: old state.
750 */
751 static int is_valid_state_transition(struct drbd_conf *mdev,
752 union drbd_state ns, union drbd_state os)
753 {
754 int rv = SS_SUCCESS;
755
756 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
757 os.conn > C_CONNECTED)
758 rv = SS_RESYNC_RUNNING;
759
760 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
761 rv = SS_ALREADY_STANDALONE;
762
763 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
764 rv = SS_IS_DISKLESS;
765
766 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
767 rv = SS_NO_NET_CONFIG;
768
769 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
770 rv = SS_LOWER_THAN_OUTDATED;
771
772 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
773 rv = SS_IN_TRANSIENT_STATE;
774
775 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
776 rv = SS_IN_TRANSIENT_STATE;
777
778 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
779 rv = SS_NEED_CONNECTION;
780
781 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
782 ns.conn != os.conn && os.conn > C_CONNECTED)
783 rv = SS_RESYNC_RUNNING;
784
785 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
786 os.conn < C_CONNECTED)
787 rv = SS_NEED_CONNECTION;
788
789 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
790 && os.conn < C_WF_REPORT_PARAMS)
791 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
792
793 return rv;
794 }
795
796 /**
797 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
798 * @mdev: DRBD device.
799 * @os: old state.
800 * @ns: new state.
801 * @warn_sync_abort:
802 *
803 * When we loose connection, we have to set the state of the peers disk (pdsk)
804 * to D_UNKNOWN. This rule and many more along those lines are in this function.
805 */
806 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
807 union drbd_state ns, const char **warn_sync_abort)
808 {
809 enum drbd_fencing_p fp;
810 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
811
812 fp = FP_DONT_CARE;
813 if (get_ldev(mdev)) {
814 fp = mdev->ldev->dc.fencing;
815 put_ldev(mdev);
816 }
817
818 /* Disallow Network errors to configure a device's network part */
819 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
820 os.conn <= C_DISCONNECTING)
821 ns.conn = os.conn;
822
823 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
824 * If you try to go into some Sync* state, that shall fail (elsewhere). */
825 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
826 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
827 ns.conn = os.conn;
828
829 /* we cannot fail (again) if we already detached */
830 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
831 ns.disk = D_DISKLESS;
832
833 /* if we are only D_ATTACHING yet,
834 * we can (and should) go directly to D_DISKLESS. */
835 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
836 ns.disk = D_DISKLESS;
837
838 /* After C_DISCONNECTING only C_STANDALONE may follow */
839 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
840 ns.conn = os.conn;
841
842 if (ns.conn < C_CONNECTED) {
843 ns.peer_isp = 0;
844 ns.peer = R_UNKNOWN;
845 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
846 ns.pdsk = D_UNKNOWN;
847 }
848
849 /* Clear the aftr_isp when becoming unconfigured */
850 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
851 ns.aftr_isp = 0;
852
853 /* Abort resync if a disk fails/detaches */
854 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
855 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
856 if (warn_sync_abort)
857 *warn_sync_abort =
858 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
859 "Online-verify" : "Resync";
860 ns.conn = C_CONNECTED;
861 }
862
863 /* Connection breaks down before we finished "Negotiating" */
864 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
865 get_ldev_if_state(mdev, D_NEGOTIATING)) {
866 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
867 ns.disk = mdev->new_state_tmp.disk;
868 ns.pdsk = mdev->new_state_tmp.pdsk;
869 } else {
870 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
871 ns.disk = D_DISKLESS;
872 ns.pdsk = D_UNKNOWN;
873 }
874 put_ldev(mdev);
875 }
876
877 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
878 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
879 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
880 ns.disk = D_UP_TO_DATE;
881 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
882 ns.pdsk = D_UP_TO_DATE;
883 }
884
885 /* Implications of the connection stat on the disk states */
886 disk_min = D_DISKLESS;
887 disk_max = D_UP_TO_DATE;
888 pdsk_min = D_INCONSISTENT;
889 pdsk_max = D_UNKNOWN;
890 switch ((enum drbd_conns)ns.conn) {
891 case C_WF_BITMAP_T:
892 case C_PAUSED_SYNC_T:
893 case C_STARTING_SYNC_T:
894 case C_WF_SYNC_UUID:
895 case C_BEHIND:
896 disk_min = D_INCONSISTENT;
897 disk_max = D_OUTDATED;
898 pdsk_min = D_UP_TO_DATE;
899 pdsk_max = D_UP_TO_DATE;
900 break;
901 case C_VERIFY_S:
902 case C_VERIFY_T:
903 disk_min = D_UP_TO_DATE;
904 disk_max = D_UP_TO_DATE;
905 pdsk_min = D_UP_TO_DATE;
906 pdsk_max = D_UP_TO_DATE;
907 break;
908 case C_CONNECTED:
909 disk_min = D_DISKLESS;
910 disk_max = D_UP_TO_DATE;
911 pdsk_min = D_DISKLESS;
912 pdsk_max = D_UP_TO_DATE;
913 break;
914 case C_WF_BITMAP_S:
915 case C_PAUSED_SYNC_S:
916 case C_STARTING_SYNC_S:
917 case C_AHEAD:
918 disk_min = D_UP_TO_DATE;
919 disk_max = D_UP_TO_DATE;
920 pdsk_min = D_INCONSISTENT;
921 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
922 break;
923 case C_SYNC_TARGET:
924 disk_min = D_INCONSISTENT;
925 disk_max = D_INCONSISTENT;
926 pdsk_min = D_UP_TO_DATE;
927 pdsk_max = D_UP_TO_DATE;
928 break;
929 case C_SYNC_SOURCE:
930 disk_min = D_UP_TO_DATE;
931 disk_max = D_UP_TO_DATE;
932 pdsk_min = D_INCONSISTENT;
933 pdsk_max = D_INCONSISTENT;
934 break;
935 case C_STANDALONE:
936 case C_DISCONNECTING:
937 case C_UNCONNECTED:
938 case C_TIMEOUT:
939 case C_BROKEN_PIPE:
940 case C_NETWORK_FAILURE:
941 case C_PROTOCOL_ERROR:
942 case C_TEAR_DOWN:
943 case C_WF_CONNECTION:
944 case C_WF_REPORT_PARAMS:
945 case C_MASK:
946 break;
947 }
948 if (ns.disk > disk_max)
949 ns.disk = disk_max;
950
951 if (ns.disk < disk_min) {
952 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
953 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
954 ns.disk = disk_min;
955 }
956 if (ns.pdsk > pdsk_max)
957 ns.pdsk = pdsk_max;
958
959 if (ns.pdsk < pdsk_min) {
960 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
961 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
962 ns.pdsk = pdsk_min;
963 }
964
965 if (fp == FP_STONITH &&
966 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
967 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
968 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
969
970 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
971 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
972 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
973 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
974
975 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
976 if (ns.conn == C_SYNC_SOURCE)
977 ns.conn = C_PAUSED_SYNC_S;
978 if (ns.conn == C_SYNC_TARGET)
979 ns.conn = C_PAUSED_SYNC_T;
980 } else {
981 if (ns.conn == C_PAUSED_SYNC_S)
982 ns.conn = C_SYNC_SOURCE;
983 if (ns.conn == C_PAUSED_SYNC_T)
984 ns.conn = C_SYNC_TARGET;
985 }
986
987 return ns;
988 }
989
990 /* helper for __drbd_set_state */
991 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
992 {
993 if (mdev->agreed_pro_version < 90)
994 mdev->ov_start_sector = 0;
995 mdev->rs_total = drbd_bm_bits(mdev);
996 mdev->ov_position = 0;
997 if (cs == C_VERIFY_T) {
998 /* starting online verify from an arbitrary position
999 * does not fit well into the existing protocol.
1000 * on C_VERIFY_T, we initialize ov_left and friends
1001 * implicitly in receive_DataRequest once the
1002 * first P_OV_REQUEST is received */
1003 mdev->ov_start_sector = ~(sector_t)0;
1004 } else {
1005 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1006 if (bit >= mdev->rs_total) {
1007 mdev->ov_start_sector =
1008 BM_BIT_TO_SECT(mdev->rs_total - 1);
1009 mdev->rs_total = 1;
1010 } else
1011 mdev->rs_total -= bit;
1012 mdev->ov_position = mdev->ov_start_sector;
1013 }
1014 mdev->ov_left = mdev->rs_total;
1015 }
1016
1017 static void drbd_resume_al(struct drbd_conf *mdev)
1018 {
1019 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1020 dev_info(DEV, "Resumed AL updates\n");
1021 }
1022
1023 /**
1024 * __drbd_set_state() - Set a new DRBD state
1025 * @mdev: DRBD device.
1026 * @ns: new state.
1027 * @flags: Flags
1028 * @done: Optional completion, that will get completed after the after_state_ch() finished
1029 *
1030 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1031 */
1032 int __drbd_set_state(struct drbd_conf *mdev,
1033 union drbd_state ns, enum chg_state_flags flags,
1034 struct completion *done)
1035 {
1036 union drbd_state os;
1037 int rv = SS_SUCCESS;
1038 const char *warn_sync_abort = NULL;
1039 struct after_state_chg_work *ascw;
1040
1041 os = mdev->state;
1042
1043 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1044
1045 if (ns.i == os.i)
1046 return SS_NOTHING_TO_DO;
1047
1048 if (!(flags & CS_HARD)) {
1049 /* pre-state-change checks ; only look at ns */
1050 /* See drbd_state_sw_errors in drbd_strings.c */
1051
1052 rv = is_valid_state(mdev, ns);
1053 if (rv < SS_SUCCESS) {
1054 /* If the old state was illegal as well, then let
1055 this happen...*/
1056
1057 if (is_valid_state(mdev, os) == rv)
1058 rv = is_valid_state_transition(mdev, ns, os);
1059 } else
1060 rv = is_valid_state_transition(mdev, ns, os);
1061 }
1062
1063 if (rv < SS_SUCCESS) {
1064 if (flags & CS_VERBOSE)
1065 print_st_err(mdev, os, ns, rv);
1066 return rv;
1067 }
1068
1069 if (warn_sync_abort)
1070 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
1071
1072 {
1073 char *pbp, pb[300];
1074 pbp = pb;
1075 *pbp = 0;
1076 if (ns.role != os.role)
1077 pbp += sprintf(pbp, "role( %s -> %s ) ",
1078 drbd_role_str(os.role),
1079 drbd_role_str(ns.role));
1080 if (ns.peer != os.peer)
1081 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1082 drbd_role_str(os.peer),
1083 drbd_role_str(ns.peer));
1084 if (ns.conn != os.conn)
1085 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1086 drbd_conn_str(os.conn),
1087 drbd_conn_str(ns.conn));
1088 if (ns.disk != os.disk)
1089 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1090 drbd_disk_str(os.disk),
1091 drbd_disk_str(ns.disk));
1092 if (ns.pdsk != os.pdsk)
1093 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1094 drbd_disk_str(os.pdsk),
1095 drbd_disk_str(ns.pdsk));
1096 if (is_susp(ns) != is_susp(os))
1097 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1098 is_susp(os),
1099 is_susp(ns));
1100 if (ns.aftr_isp != os.aftr_isp)
1101 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1102 os.aftr_isp,
1103 ns.aftr_isp);
1104 if (ns.peer_isp != os.peer_isp)
1105 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1106 os.peer_isp,
1107 ns.peer_isp);
1108 if (ns.user_isp != os.user_isp)
1109 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1110 os.user_isp,
1111 ns.user_isp);
1112 dev_info(DEV, "%s\n", pb);
1113 }
1114
1115 /* solve the race between becoming unconfigured,
1116 * worker doing the cleanup, and
1117 * admin reconfiguring us:
1118 * on (re)configure, first set CONFIG_PENDING,
1119 * then wait for a potentially exiting worker,
1120 * start the worker, and schedule one no_op.
1121 * then proceed with configuration.
1122 */
1123 if (ns.disk == D_DISKLESS &&
1124 ns.conn == C_STANDALONE &&
1125 ns.role == R_SECONDARY &&
1126 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1127 set_bit(DEVICE_DYING, &mdev->flags);
1128
1129 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1130 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1131 * drbd_ldev_destroy() won't happen before our corresponding
1132 * after_state_ch works run, where we put_ldev again. */
1133 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1134 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1135 atomic_inc(&mdev->local_cnt);
1136
1137 mdev->state = ns;
1138 wake_up(&mdev->misc_wait);
1139 wake_up(&mdev->state_wait);
1140
1141 /* aborted verify run. log the last position */
1142 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1143 ns.conn < C_CONNECTED) {
1144 mdev->ov_start_sector =
1145 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1146 dev_info(DEV, "Online Verify reached sector %llu\n",
1147 (unsigned long long)mdev->ov_start_sector);
1148 }
1149
1150 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1151 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1152 dev_info(DEV, "Syncer continues.\n");
1153 mdev->rs_paused += (long)jiffies
1154 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1155 if (ns.conn == C_SYNC_TARGET)
1156 mod_timer(&mdev->resync_timer, jiffies);
1157 }
1158
1159 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1160 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1161 dev_info(DEV, "Resync suspended\n");
1162 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1163 }
1164
1165 if (os.conn == C_CONNECTED &&
1166 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1167 unsigned long now = jiffies;
1168 int i;
1169
1170 set_ov_position(mdev, ns.conn);
1171 mdev->rs_start = now;
1172 mdev->rs_last_events = 0;
1173 mdev->rs_last_sect_ev = 0;
1174 mdev->ov_last_oos_size = 0;
1175 mdev->ov_last_oos_start = 0;
1176
1177 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1178 mdev->rs_mark_left[i] = mdev->ov_left;
1179 mdev->rs_mark_time[i] = now;
1180 }
1181
1182 drbd_rs_controller_reset(mdev);
1183
1184 if (ns.conn == C_VERIFY_S) {
1185 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1186 (unsigned long long)mdev->ov_position);
1187 mod_timer(&mdev->resync_timer, jiffies);
1188 }
1189 }
1190
1191 if (get_ldev(mdev)) {
1192 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1193 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1194 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1195
1196 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1197 mdf |= MDF_CRASHED_PRIMARY;
1198 if (mdev->state.role == R_PRIMARY ||
1199 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1200 mdf |= MDF_PRIMARY_IND;
1201 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1202 mdf |= MDF_CONNECTED_IND;
1203 if (mdev->state.disk > D_INCONSISTENT)
1204 mdf |= MDF_CONSISTENT;
1205 if (mdev->state.disk > D_OUTDATED)
1206 mdf |= MDF_WAS_UP_TO_DATE;
1207 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1208 mdf |= MDF_PEER_OUT_DATED;
1209 if (mdf != mdev->ldev->md.flags) {
1210 mdev->ldev->md.flags = mdf;
1211 drbd_md_mark_dirty(mdev);
1212 }
1213 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1214 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1215 put_ldev(mdev);
1216 }
1217
1218 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1219 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1220 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1221 set_bit(CONSIDER_RESYNC, &mdev->flags);
1222
1223 /* Receiver should clean up itself */
1224 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1225 drbd_thread_stop_nowait(&mdev->receiver);
1226
1227 /* Now the receiver finished cleaning up itself, it should die */
1228 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1229 drbd_thread_stop_nowait(&mdev->receiver);
1230
1231 /* Upon network failure, we need to restart the receiver. */
1232 if (os.conn > C_TEAR_DOWN &&
1233 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1234 drbd_thread_restart_nowait(&mdev->receiver);
1235
1236 /* Resume AL writing if we get a connection */
1237 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1238 drbd_resume_al(mdev);
1239
1240 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1241 if (ascw) {
1242 ascw->os = os;
1243 ascw->ns = ns;
1244 ascw->flags = flags;
1245 ascw->w.cb = w_after_state_ch;
1246 ascw->done = done;
1247 drbd_queue_work(&mdev->data.work, &ascw->w);
1248 } else {
1249 dev_warn(DEV, "Could not kmalloc an ascw\n");
1250 }
1251
1252 return rv;
1253 }
1254
1255 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1256 {
1257 struct after_state_chg_work *ascw =
1258 container_of(w, struct after_state_chg_work, w);
1259 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1260 if (ascw->flags & CS_WAIT_COMPLETE) {
1261 D_ASSERT(ascw->done != NULL);
1262 complete(ascw->done);
1263 }
1264 kfree(ascw);
1265
1266 return 1;
1267 }
1268
1269 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1270 {
1271 if (rv) {
1272 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1273 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1274 return;
1275 }
1276
1277 switch (mdev->state.conn) {
1278 case C_STARTING_SYNC_T:
1279 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1280 break;
1281 case C_STARTING_SYNC_S:
1282 drbd_start_resync(mdev, C_SYNC_SOURCE);
1283 break;
1284 }
1285 }
1286
1287 /**
1288 * after_state_ch() - Perform after state change actions that may sleep
1289 * @mdev: DRBD device.
1290 * @os: old state.
1291 * @ns: new state.
1292 * @flags: Flags
1293 */
1294 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1295 union drbd_state ns, enum chg_state_flags flags)
1296 {
1297 enum drbd_fencing_p fp;
1298 enum drbd_req_event what = nothing;
1299 union drbd_state nsm = (union drbd_state){ .i = -1 };
1300
1301 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1302 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1303 if (mdev->p_uuid)
1304 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1305 }
1306
1307 fp = FP_DONT_CARE;
1308 if (get_ldev(mdev)) {
1309 fp = mdev->ldev->dc.fencing;
1310 put_ldev(mdev);
1311 }
1312
1313 /* Inform userspace about the change... */
1314 drbd_bcast_state(mdev, ns);
1315
1316 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1317 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1318 drbd_khelper(mdev, "pri-on-incon-degr");
1319
1320 /* Here we have the actions that are performed after a
1321 state change. This function might sleep */
1322
1323 nsm.i = -1;
1324 if (ns.susp_nod) {
1325 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1326 if (ns.conn == C_CONNECTED)
1327 what = resend, nsm.susp_nod = 0;
1328 else /* ns.conn > C_CONNECTED */
1329 dev_err(DEV, "Unexpected Resynd going on!\n");
1330 }
1331
1332 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1333 what = restart_frozen_disk_io, nsm.susp_nod = 0;
1334
1335 }
1336
1337 if (ns.susp_fen) {
1338 /* case1: The outdate peer handler is successful: */
1339 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1340 tl_clear(mdev);
1341 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1342 drbd_uuid_new_current(mdev);
1343 clear_bit(NEW_CUR_UUID, &mdev->flags);
1344 }
1345 spin_lock_irq(&mdev->req_lock);
1346 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1347 spin_unlock_irq(&mdev->req_lock);
1348 }
1349 /* case2: The connection was established again: */
1350 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1351 clear_bit(NEW_CUR_UUID, &mdev->flags);
1352 what = resend;
1353 nsm.susp_fen = 0;
1354 }
1355 }
1356
1357 if (what != nothing) {
1358 spin_lock_irq(&mdev->req_lock);
1359 _tl_restart(mdev, what);
1360 nsm.i &= mdev->state.i;
1361 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1362 spin_unlock_irq(&mdev->req_lock);
1363 }
1364
1365 /* Do not change the order of the if above and the two below... */
1366 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1367 drbd_send_uuids(mdev);
1368 drbd_send_state(mdev);
1369 }
1370 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1371 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1372
1373 /* Lost contact to peer's copy of the data */
1374 if ((os.pdsk >= D_INCONSISTENT &&
1375 os.pdsk != D_UNKNOWN &&
1376 os.pdsk != D_OUTDATED)
1377 && (ns.pdsk < D_INCONSISTENT ||
1378 ns.pdsk == D_UNKNOWN ||
1379 ns.pdsk == D_OUTDATED)) {
1380 if (get_ldev(mdev)) {
1381 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1382 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1383 if (is_susp(mdev->state)) {
1384 set_bit(NEW_CUR_UUID, &mdev->flags);
1385 } else {
1386 drbd_uuid_new_current(mdev);
1387 drbd_send_uuids(mdev);
1388 }
1389 }
1390 put_ldev(mdev);
1391 }
1392 }
1393
1394 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1395 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1396 drbd_uuid_new_current(mdev);
1397 drbd_send_uuids(mdev);
1398 }
1399
1400 /* D_DISKLESS Peer becomes secondary */
1401 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1402 drbd_al_to_on_disk_bm(mdev);
1403 put_ldev(mdev);
1404 }
1405
1406 /* Last part of the attaching process ... */
1407 if (ns.conn >= C_CONNECTED &&
1408 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1409 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1410 drbd_send_uuids(mdev);
1411 drbd_send_state(mdev);
1412 }
1413
1414 /* We want to pause/continue resync, tell peer. */
1415 if (ns.conn >= C_CONNECTED &&
1416 ((os.aftr_isp != ns.aftr_isp) ||
1417 (os.user_isp != ns.user_isp)))
1418 drbd_send_state(mdev);
1419
1420 /* In case one of the isp bits got set, suspend other devices. */
1421 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1422 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1423 suspend_other_sg(mdev);
1424
1425 /* Make sure the peer gets informed about eventual state
1426 changes (ISP bits) while we were in WFReportParams. */
1427 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1428 drbd_send_state(mdev);
1429
1430 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1431 drbd_send_state(mdev);
1432
1433 /* We are in the progress to start a full sync... */
1434 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1435 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1436 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1437
1438 /* We are invalidating our self... */
1439 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1440 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1441 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1442
1443 /* first half of local IO error, failure to attach,
1444 * or administrative detach */
1445 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1446 enum drbd_io_error_p eh;
1447 int was_io_error;
1448 /* corresponding get_ldev was in __drbd_set_state, to serialize
1449 * our cleanup here with the transition to D_DISKLESS,
1450 * so it is safe to dreference ldev here. */
1451 eh = mdev->ldev->dc.on_io_error;
1452 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1453
1454 /* current state still has to be D_FAILED,
1455 * there is only one way out: to D_DISKLESS,
1456 * and that may only happen after our put_ldev below. */
1457 if (mdev->state.disk != D_FAILED)
1458 dev_err(DEV,
1459 "ASSERT FAILED: disk is %s during detach\n",
1460 drbd_disk_str(mdev->state.disk));
1461
1462 if (drbd_send_state(mdev))
1463 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1464 else
1465 dev_err(DEV, "Sending state for detaching disk failed\n");
1466
1467 drbd_rs_cancel_all(mdev);
1468
1469 /* In case we want to get something to stable storage still,
1470 * this may be the last chance.
1471 * Following put_ldev may transition to D_DISKLESS. */
1472 drbd_md_sync(mdev);
1473 put_ldev(mdev);
1474
1475 if (was_io_error && eh == EP_CALL_HELPER)
1476 drbd_khelper(mdev, "local-io-error");
1477 }
1478
1479 /* second half of local IO error, failure to attach,
1480 * or administrative detach,
1481 * after local_cnt references have reached zero again */
1482 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1483 /* We must still be diskless,
1484 * re-attach has to be serialized with this! */
1485 if (mdev->state.disk != D_DISKLESS)
1486 dev_err(DEV,
1487 "ASSERT FAILED: disk is %s while going diskless\n",
1488 drbd_disk_str(mdev->state.disk));
1489
1490 mdev->rs_total = 0;
1491 mdev->rs_failed = 0;
1492 atomic_set(&mdev->rs_pending_cnt, 0);
1493
1494 if (drbd_send_state(mdev))
1495 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1496 else
1497 dev_err(DEV, "Sending state for being diskless failed\n");
1498 /* corresponding get_ldev in __drbd_set_state
1499 * this may finaly trigger drbd_ldev_destroy. */
1500 put_ldev(mdev);
1501 }
1502
1503 /* Disks got bigger while they were detached */
1504 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1505 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1506 if (ns.conn == C_CONNECTED)
1507 resync_after_online_grow(mdev);
1508 }
1509
1510 /* A resync finished or aborted, wake paused devices... */
1511 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1512 (os.peer_isp && !ns.peer_isp) ||
1513 (os.user_isp && !ns.user_isp))
1514 resume_next_sg(mdev);
1515
1516 /* sync target done with resync. Explicitly notify peer, even though
1517 * it should (at least for non-empty resyncs) already know itself. */
1518 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1519 drbd_send_state(mdev);
1520
1521 /* free tl_hash if we Got thawed and are C_STANDALONE */
1522 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1523 drbd_free_tl_hash(mdev);
1524
1525 /* Upon network connection, we need to start the receiver */
1526 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1527 drbd_thread_start(&mdev->receiver);
1528
1529 /* Terminate worker thread if we are unconfigured - it will be
1530 restarted as needed... */
1531 if (ns.disk == D_DISKLESS &&
1532 ns.conn == C_STANDALONE &&
1533 ns.role == R_SECONDARY) {
1534 if (os.aftr_isp != ns.aftr_isp)
1535 resume_next_sg(mdev);
1536 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1537 if (test_bit(DEVICE_DYING, &mdev->flags))
1538 drbd_thread_stop_nowait(&mdev->worker);
1539 }
1540
1541 drbd_md_sync(mdev);
1542 }
1543
1544
1545 static int drbd_thread_setup(void *arg)
1546 {
1547 struct drbd_thread *thi = (struct drbd_thread *) arg;
1548 struct drbd_conf *mdev = thi->mdev;
1549 unsigned long flags;
1550 int retval;
1551
1552 restart:
1553 retval = thi->function(thi);
1554
1555 spin_lock_irqsave(&thi->t_lock, flags);
1556
1557 /* if the receiver has been "Exiting", the last thing it did
1558 * was set the conn state to "StandAlone",
1559 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1560 * and receiver thread will be "started".
1561 * drbd_thread_start needs to set "Restarting" in that case.
1562 * t_state check and assignment needs to be within the same spinlock,
1563 * so either thread_start sees Exiting, and can remap to Restarting,
1564 * or thread_start see None, and can proceed as normal.
1565 */
1566
1567 if (thi->t_state == Restarting) {
1568 dev_info(DEV, "Restarting %s\n", current->comm);
1569 thi->t_state = Running;
1570 spin_unlock_irqrestore(&thi->t_lock, flags);
1571 goto restart;
1572 }
1573
1574 thi->task = NULL;
1575 thi->t_state = None;
1576 smp_mb();
1577 complete(&thi->stop);
1578 spin_unlock_irqrestore(&thi->t_lock, flags);
1579
1580 dev_info(DEV, "Terminating %s\n", current->comm);
1581
1582 /* Release mod reference taken when thread was started */
1583 module_put(THIS_MODULE);
1584 return retval;
1585 }
1586
1587 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1588 int (*func) (struct drbd_thread *))
1589 {
1590 spin_lock_init(&thi->t_lock);
1591 thi->task = NULL;
1592 thi->t_state = None;
1593 thi->function = func;
1594 thi->mdev = mdev;
1595 }
1596
1597 int drbd_thread_start(struct drbd_thread *thi)
1598 {
1599 struct drbd_conf *mdev = thi->mdev;
1600 struct task_struct *nt;
1601 unsigned long flags;
1602
1603 const char *me =
1604 thi == &mdev->receiver ? "receiver" :
1605 thi == &mdev->asender ? "asender" :
1606 thi == &mdev->worker ? "worker" : "NONSENSE";
1607
1608 /* is used from state engine doing drbd_thread_stop_nowait,
1609 * while holding the req lock irqsave */
1610 spin_lock_irqsave(&thi->t_lock, flags);
1611
1612 switch (thi->t_state) {
1613 case None:
1614 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1615 me, current->comm, current->pid);
1616
1617 /* Get ref on module for thread - this is released when thread exits */
1618 if (!try_module_get(THIS_MODULE)) {
1619 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1620 spin_unlock_irqrestore(&thi->t_lock, flags);
1621 return FALSE;
1622 }
1623
1624 init_completion(&thi->stop);
1625 D_ASSERT(thi->task == NULL);
1626 thi->reset_cpu_mask = 1;
1627 thi->t_state = Running;
1628 spin_unlock_irqrestore(&thi->t_lock, flags);
1629 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1630
1631 nt = kthread_create(drbd_thread_setup, (void *) thi,
1632 "drbd%d_%s", mdev_to_minor(mdev), me);
1633
1634 if (IS_ERR(nt)) {
1635 dev_err(DEV, "Couldn't start thread\n");
1636
1637 module_put(THIS_MODULE);
1638 return FALSE;
1639 }
1640 spin_lock_irqsave(&thi->t_lock, flags);
1641 thi->task = nt;
1642 thi->t_state = Running;
1643 spin_unlock_irqrestore(&thi->t_lock, flags);
1644 wake_up_process(nt);
1645 break;
1646 case Exiting:
1647 thi->t_state = Restarting;
1648 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1649 me, current->comm, current->pid);
1650 /* fall through */
1651 case Running:
1652 case Restarting:
1653 default:
1654 spin_unlock_irqrestore(&thi->t_lock, flags);
1655 break;
1656 }
1657
1658 return TRUE;
1659 }
1660
1661
1662 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1663 {
1664 unsigned long flags;
1665
1666 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1667
1668 /* may be called from state engine, holding the req lock irqsave */
1669 spin_lock_irqsave(&thi->t_lock, flags);
1670
1671 if (thi->t_state == None) {
1672 spin_unlock_irqrestore(&thi->t_lock, flags);
1673 if (restart)
1674 drbd_thread_start(thi);
1675 return;
1676 }
1677
1678 if (thi->t_state != ns) {
1679 if (thi->task == NULL) {
1680 spin_unlock_irqrestore(&thi->t_lock, flags);
1681 return;
1682 }
1683
1684 thi->t_state = ns;
1685 smp_mb();
1686 init_completion(&thi->stop);
1687 if (thi->task != current)
1688 force_sig(DRBD_SIGKILL, thi->task);
1689
1690 }
1691
1692 spin_unlock_irqrestore(&thi->t_lock, flags);
1693
1694 if (wait)
1695 wait_for_completion(&thi->stop);
1696 }
1697
1698 #ifdef CONFIG_SMP
1699 /**
1700 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1701 * @mdev: DRBD device.
1702 *
1703 * Forces all threads of a device onto the same CPU. This is beneficial for
1704 * DRBD's performance. May be overwritten by user's configuration.
1705 */
1706 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1707 {
1708 int ord, cpu;
1709
1710 /* user override. */
1711 if (cpumask_weight(mdev->cpu_mask))
1712 return;
1713
1714 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1715 for_each_online_cpu(cpu) {
1716 if (ord-- == 0) {
1717 cpumask_set_cpu(cpu, mdev->cpu_mask);
1718 return;
1719 }
1720 }
1721 /* should not be reached */
1722 cpumask_setall(mdev->cpu_mask);
1723 }
1724
1725 /**
1726 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1727 * @mdev: DRBD device.
1728 *
1729 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1730 * prematurely.
1731 */
1732 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1733 {
1734 struct task_struct *p = current;
1735 struct drbd_thread *thi =
1736 p == mdev->asender.task ? &mdev->asender :
1737 p == mdev->receiver.task ? &mdev->receiver :
1738 p == mdev->worker.task ? &mdev->worker :
1739 NULL;
1740 ERR_IF(thi == NULL)
1741 return;
1742 if (!thi->reset_cpu_mask)
1743 return;
1744 thi->reset_cpu_mask = 0;
1745 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1746 }
1747 #endif
1748
1749 /* the appropriate socket mutex must be held already */
1750 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1751 enum drbd_packets cmd, struct p_header80 *h,
1752 size_t size, unsigned msg_flags)
1753 {
1754 int sent, ok;
1755
1756 ERR_IF(!h) return FALSE;
1757 ERR_IF(!size) return FALSE;
1758
1759 h->magic = BE_DRBD_MAGIC;
1760 h->command = cpu_to_be16(cmd);
1761 h->length = cpu_to_be16(size-sizeof(struct p_header80));
1762
1763 sent = drbd_send(mdev, sock, h, size, msg_flags);
1764
1765 ok = (sent == size);
1766 if (!ok)
1767 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1768 cmdname(cmd), (int)size, sent);
1769 return ok;
1770 }
1771
1772 /* don't pass the socket. we may only look at it
1773 * when we hold the appropriate socket mutex.
1774 */
1775 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1776 enum drbd_packets cmd, struct p_header80 *h, size_t size)
1777 {
1778 int ok = 0;
1779 struct socket *sock;
1780
1781 if (use_data_socket) {
1782 mutex_lock(&mdev->data.mutex);
1783 sock = mdev->data.socket;
1784 } else {
1785 mutex_lock(&mdev->meta.mutex);
1786 sock = mdev->meta.socket;
1787 }
1788
1789 /* drbd_disconnect() could have called drbd_free_sock()
1790 * while we were waiting in down()... */
1791 if (likely(sock != NULL))
1792 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1793
1794 if (use_data_socket)
1795 mutex_unlock(&mdev->data.mutex);
1796 else
1797 mutex_unlock(&mdev->meta.mutex);
1798 return ok;
1799 }
1800
1801 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1802 size_t size)
1803 {
1804 struct p_header80 h;
1805 int ok;
1806
1807 h.magic = BE_DRBD_MAGIC;
1808 h.command = cpu_to_be16(cmd);
1809 h.length = cpu_to_be16(size);
1810
1811 if (!drbd_get_data_sock(mdev))
1812 return 0;
1813
1814 ok = (sizeof(h) ==
1815 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1816 ok = ok && (size ==
1817 drbd_send(mdev, mdev->data.socket, data, size, 0));
1818
1819 drbd_put_data_sock(mdev);
1820
1821 return ok;
1822 }
1823
1824 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1825 {
1826 struct p_rs_param_95 *p;
1827 struct socket *sock;
1828 int size, rv;
1829 const int apv = mdev->agreed_pro_version;
1830
1831 size = apv <= 87 ? sizeof(struct p_rs_param)
1832 : apv == 88 ? sizeof(struct p_rs_param)
1833 + strlen(mdev->sync_conf.verify_alg) + 1
1834 : apv <= 94 ? sizeof(struct p_rs_param_89)
1835 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1836
1837 /* used from admin command context and receiver/worker context.
1838 * to avoid kmalloc, grab the socket right here,
1839 * then use the pre-allocated sbuf there */
1840 mutex_lock(&mdev->data.mutex);
1841 sock = mdev->data.socket;
1842
1843 if (likely(sock != NULL)) {
1844 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1845
1846 p = &mdev->data.sbuf.rs_param_95;
1847
1848 /* initialize verify_alg and csums_alg */
1849 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1850
1851 p->rate = cpu_to_be32(sc->rate);
1852 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1853 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1854 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1855 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1856
1857 if (apv >= 88)
1858 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1859 if (apv >= 89)
1860 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1861
1862 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1863 } else
1864 rv = 0; /* not ok */
1865
1866 mutex_unlock(&mdev->data.mutex);
1867
1868 return rv;
1869 }
1870
1871 int drbd_send_protocol(struct drbd_conf *mdev)
1872 {
1873 struct p_protocol *p;
1874 int size, cf, rv;
1875
1876 size = sizeof(struct p_protocol);
1877
1878 if (mdev->agreed_pro_version >= 87)
1879 size += strlen(mdev->net_conf->integrity_alg) + 1;
1880
1881 /* we must not recurse into our own queue,
1882 * as that is blocked during handshake */
1883 p = kmalloc(size, GFP_NOIO);
1884 if (p == NULL)
1885 return 0;
1886
1887 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1888 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1889 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1890 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
1891 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1892
1893 cf = 0;
1894 if (mdev->net_conf->want_lose)
1895 cf |= CF_WANT_LOSE;
1896 if (mdev->net_conf->dry_run) {
1897 if (mdev->agreed_pro_version >= 92)
1898 cf |= CF_DRY_RUN;
1899 else {
1900 dev_err(DEV, "--dry-run is not supported by peer");
1901 kfree(p);
1902 return 0;
1903 }
1904 }
1905 p->conn_flags = cpu_to_be32(cf);
1906
1907 if (mdev->agreed_pro_version >= 87)
1908 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1909
1910 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1911 (struct p_header80 *)p, size);
1912 kfree(p);
1913 return rv;
1914 }
1915
1916 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1917 {
1918 struct p_uuids p;
1919 int i;
1920
1921 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1922 return 1;
1923
1924 for (i = UI_CURRENT; i < UI_SIZE; i++)
1925 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1926
1927 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1928 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1929 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1930 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1931 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1932 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1933
1934 put_ldev(mdev);
1935
1936 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1937 (struct p_header80 *)&p, sizeof(p));
1938 }
1939
1940 int drbd_send_uuids(struct drbd_conf *mdev)
1941 {
1942 return _drbd_send_uuids(mdev, 0);
1943 }
1944
1945 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1946 {
1947 return _drbd_send_uuids(mdev, 8);
1948 }
1949
1950
1951 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1952 {
1953 struct p_rs_uuid p;
1954
1955 p.uuid = cpu_to_be64(val);
1956
1957 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1958 (struct p_header80 *)&p, sizeof(p));
1959 }
1960
1961 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1962 {
1963 struct p_sizes p;
1964 sector_t d_size, u_size;
1965 int q_order_type;
1966 int ok;
1967
1968 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1969 D_ASSERT(mdev->ldev->backing_bdev);
1970 d_size = drbd_get_max_capacity(mdev->ldev);
1971 u_size = mdev->ldev->dc.disk_size;
1972 q_order_type = drbd_queue_order_type(mdev);
1973 put_ldev(mdev);
1974 } else {
1975 d_size = 0;
1976 u_size = 0;
1977 q_order_type = QUEUE_ORDERED_NONE;
1978 }
1979
1980 p.d_size = cpu_to_be64(d_size);
1981 p.u_size = cpu_to_be64(u_size);
1982 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1983 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
1984 p.queue_order_type = cpu_to_be16(q_order_type);
1985 p.dds_flags = cpu_to_be16(flags);
1986
1987 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1988 (struct p_header80 *)&p, sizeof(p));
1989 return ok;
1990 }
1991
1992 /**
1993 * drbd_send_state() - Sends the drbd state to the peer
1994 * @mdev: DRBD device.
1995 */
1996 int drbd_send_state(struct drbd_conf *mdev)
1997 {
1998 struct socket *sock;
1999 struct p_state p;
2000 int ok = 0;
2001
2002 /* Grab state lock so we wont send state if we're in the middle
2003 * of a cluster wide state change on another thread */
2004 drbd_state_lock(mdev);
2005
2006 mutex_lock(&mdev->data.mutex);
2007
2008 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2009 sock = mdev->data.socket;
2010
2011 if (likely(sock != NULL)) {
2012 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2013 (struct p_header80 *)&p, sizeof(p), 0);
2014 }
2015
2016 mutex_unlock(&mdev->data.mutex);
2017
2018 drbd_state_unlock(mdev);
2019 return ok;
2020 }
2021
2022 int drbd_send_state_req(struct drbd_conf *mdev,
2023 union drbd_state mask, union drbd_state val)
2024 {
2025 struct p_req_state p;
2026
2027 p.mask = cpu_to_be32(mask.i);
2028 p.val = cpu_to_be32(val.i);
2029
2030 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2031 (struct p_header80 *)&p, sizeof(p));
2032 }
2033
2034 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
2035 {
2036 struct p_req_state_reply p;
2037
2038 p.retcode = cpu_to_be32(retcode);
2039
2040 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2041 (struct p_header80 *)&p, sizeof(p));
2042 }
2043
2044 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2045 struct p_compressed_bm *p,
2046 struct bm_xfer_ctx *c)
2047 {
2048 struct bitstream bs;
2049 unsigned long plain_bits;
2050 unsigned long tmp;
2051 unsigned long rl;
2052 unsigned len;
2053 unsigned toggle;
2054 int bits;
2055
2056 /* may we use this feature? */
2057 if ((mdev->sync_conf.use_rle == 0) ||
2058 (mdev->agreed_pro_version < 90))
2059 return 0;
2060
2061 if (c->bit_offset >= c->bm_bits)
2062 return 0; /* nothing to do. */
2063
2064 /* use at most thus many bytes */
2065 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2066 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2067 /* plain bits covered in this code string */
2068 plain_bits = 0;
2069
2070 /* p->encoding & 0x80 stores whether the first run length is set.
2071 * bit offset is implicit.
2072 * start with toggle == 2 to be able to tell the first iteration */
2073 toggle = 2;
2074
2075 /* see how much plain bits we can stuff into one packet
2076 * using RLE and VLI. */
2077 do {
2078 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2079 : _drbd_bm_find_next(mdev, c->bit_offset);
2080 if (tmp == -1UL)
2081 tmp = c->bm_bits;
2082 rl = tmp - c->bit_offset;
2083
2084 if (toggle == 2) { /* first iteration */
2085 if (rl == 0) {
2086 /* the first checked bit was set,
2087 * store start value, */
2088 DCBP_set_start(p, 1);
2089 /* but skip encoding of zero run length */
2090 toggle = !toggle;
2091 continue;
2092 }
2093 DCBP_set_start(p, 0);
2094 }
2095
2096 /* paranoia: catch zero runlength.
2097 * can only happen if bitmap is modified while we scan it. */
2098 if (rl == 0) {
2099 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2100 "t:%u bo:%lu\n", toggle, c->bit_offset);
2101 return -1;
2102 }
2103
2104 bits = vli_encode_bits(&bs, rl);
2105 if (bits == -ENOBUFS) /* buffer full */
2106 break;
2107 if (bits <= 0) {
2108 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2109 return 0;
2110 }
2111
2112 toggle = !toggle;
2113 plain_bits += rl;
2114 c->bit_offset = tmp;
2115 } while (c->bit_offset < c->bm_bits);
2116
2117 len = bs.cur.b - p->code + !!bs.cur.bit;
2118
2119 if (plain_bits < (len << 3)) {
2120 /* incompressible with this method.
2121 * we need to rewind both word and bit position. */
2122 c->bit_offset -= plain_bits;
2123 bm_xfer_ctx_bit_to_word_offset(c);
2124 c->bit_offset = c->word_offset * BITS_PER_LONG;
2125 return 0;
2126 }
2127
2128 /* RLE + VLI was able to compress it just fine.
2129 * update c->word_offset. */
2130 bm_xfer_ctx_bit_to_word_offset(c);
2131
2132 /* store pad_bits */
2133 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2134
2135 return len;
2136 }
2137
2138 enum { OK, FAILED, DONE }
2139 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2140 struct p_header80 *h, struct bm_xfer_ctx *c)
2141 {
2142 struct p_compressed_bm *p = (void*)h;
2143 unsigned long num_words;
2144 int len;
2145 int ok;
2146
2147 len = fill_bitmap_rle_bits(mdev, p, c);
2148
2149 if (len < 0)
2150 return FAILED;
2151
2152 if (len) {
2153 DCBP_set_code(p, RLE_VLI_Bits);
2154 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2155 sizeof(*p) + len, 0);
2156
2157 c->packets[0]++;
2158 c->bytes[0] += sizeof(*p) + len;
2159
2160 if (c->bit_offset >= c->bm_bits)
2161 len = 0; /* DONE */
2162 } else {
2163 /* was not compressible.
2164 * send a buffer full of plain text bits instead. */
2165 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2166 len = num_words * sizeof(long);
2167 if (len)
2168 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2169 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2170 h, sizeof(struct p_header80) + len, 0);
2171 c->word_offset += num_words;
2172 c->bit_offset = c->word_offset * BITS_PER_LONG;
2173
2174 c->packets[1]++;
2175 c->bytes[1] += sizeof(struct p_header80) + len;
2176
2177 if (c->bit_offset > c->bm_bits)
2178 c->bit_offset = c->bm_bits;
2179 }
2180 ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
2181
2182 if (ok == DONE)
2183 INFO_bm_xfer_stats(mdev, "send", c);
2184 return ok;
2185 }
2186
2187 /* See the comment at receive_bitmap() */
2188 int _drbd_send_bitmap(struct drbd_conf *mdev)
2189 {
2190 struct bm_xfer_ctx c;
2191 struct p_header80 *p;
2192 int ret;
2193
2194 ERR_IF(!mdev->bitmap) return FALSE;
2195
2196 /* maybe we should use some per thread scratch page,
2197 * and allocate that during initial device creation? */
2198 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2199 if (!p) {
2200 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2201 return FALSE;
2202 }
2203
2204 if (get_ldev(mdev)) {
2205 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2206 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2207 drbd_bm_set_all(mdev);
2208 if (drbd_bm_write(mdev)) {
2209 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2210 * but otherwise process as per normal - need to tell other
2211 * side that a full resync is required! */
2212 dev_err(DEV, "Failed to write bitmap to disk!\n");
2213 } else {
2214 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2215 drbd_md_sync(mdev);
2216 }
2217 }
2218 put_ldev(mdev);
2219 }
2220
2221 c = (struct bm_xfer_ctx) {
2222 .bm_bits = drbd_bm_bits(mdev),
2223 .bm_words = drbd_bm_words(mdev),
2224 };
2225
2226 do {
2227 ret = send_bitmap_rle_or_plain(mdev, p, &c);
2228 } while (ret == OK);
2229
2230 free_page((unsigned long) p);
2231 return (ret == DONE);
2232 }
2233
2234 int drbd_send_bitmap(struct drbd_conf *mdev)
2235 {
2236 int err;
2237
2238 if (!drbd_get_data_sock(mdev))
2239 return -1;
2240 err = !_drbd_send_bitmap(mdev);
2241 drbd_put_data_sock(mdev);
2242 return err;
2243 }
2244
2245 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2246 {
2247 int ok;
2248 struct p_barrier_ack p;
2249
2250 p.barrier = barrier_nr;
2251 p.set_size = cpu_to_be32(set_size);
2252
2253 if (mdev->state.conn < C_CONNECTED)
2254 return FALSE;
2255 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2256 (struct p_header80 *)&p, sizeof(p));
2257 return ok;
2258 }
2259
2260 /**
2261 * _drbd_send_ack() - Sends an ack packet
2262 * @mdev: DRBD device.
2263 * @cmd: Packet command code.
2264 * @sector: sector, needs to be in big endian byte order
2265 * @blksize: size in byte, needs to be in big endian byte order
2266 * @block_id: Id, big endian byte order
2267 */
2268 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2269 u64 sector,
2270 u32 blksize,
2271 u64 block_id)
2272 {
2273 int ok;
2274 struct p_block_ack p;
2275
2276 p.sector = sector;
2277 p.block_id = block_id;
2278 p.blksize = blksize;
2279 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2280
2281 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2282 return FALSE;
2283 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2284 (struct p_header80 *)&p, sizeof(p));
2285 return ok;
2286 }
2287
2288 /* dp->sector and dp->block_id already/still in network byte order,
2289 * data_size is payload size according to dp->head,
2290 * and may need to be corrected for digest size. */
2291 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2292 struct p_data *dp, int data_size)
2293 {
2294 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2295 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2296 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2297 dp->block_id);
2298 }
2299
2300 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2301 struct p_block_req *rp)
2302 {
2303 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2304 }
2305
2306 /**
2307 * drbd_send_ack() - Sends an ack packet
2308 * @mdev: DRBD device.
2309 * @cmd: Packet command code.
2310 * @e: Epoch entry.
2311 */
2312 int drbd_send_ack(struct drbd_conf *mdev,
2313 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2314 {
2315 return _drbd_send_ack(mdev, cmd,
2316 cpu_to_be64(e->sector),
2317 cpu_to_be32(e->size),
2318 e->block_id);
2319 }
2320
2321 /* This function misuses the block_id field to signal if the blocks
2322 * are is sync or not. */
2323 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2324 sector_t sector, int blksize, u64 block_id)
2325 {
2326 return _drbd_send_ack(mdev, cmd,
2327 cpu_to_be64(sector),
2328 cpu_to_be32(blksize),
2329 cpu_to_be64(block_id));
2330 }
2331
2332 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2333 sector_t sector, int size, u64 block_id)
2334 {
2335 int ok;
2336 struct p_block_req p;
2337
2338 p.sector = cpu_to_be64(sector);
2339 p.block_id = block_id;
2340 p.blksize = cpu_to_be32(size);
2341
2342 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2343 (struct p_header80 *)&p, sizeof(p));
2344 return ok;
2345 }
2346
2347 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2348 sector_t sector, int size,
2349 void *digest, int digest_size,
2350 enum drbd_packets cmd)
2351 {
2352 int ok;
2353 struct p_block_req p;
2354
2355 p.sector = cpu_to_be64(sector);
2356 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2357 p.blksize = cpu_to_be32(size);
2358
2359 p.head.magic = BE_DRBD_MAGIC;
2360 p.head.command = cpu_to_be16(cmd);
2361 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2362
2363 mutex_lock(&mdev->data.mutex);
2364
2365 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2366 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2367
2368 mutex_unlock(&mdev->data.mutex);
2369
2370 return ok;
2371 }
2372
2373 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2374 {
2375 int ok;
2376 struct p_block_req p;
2377
2378 p.sector = cpu_to_be64(sector);
2379 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2380 p.blksize = cpu_to_be32(size);
2381
2382 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2383 (struct p_header80 *)&p, sizeof(p));
2384 return ok;
2385 }
2386
2387 /* called on sndtimeo
2388 * returns FALSE if we should retry,
2389 * TRUE if we think connection is dead
2390 */
2391 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2392 {
2393 int drop_it;
2394 /* long elapsed = (long)(jiffies - mdev->last_received); */
2395
2396 drop_it = mdev->meta.socket == sock
2397 || !mdev->asender.task
2398 || get_t_state(&mdev->asender) != Running
2399 || mdev->state.conn < C_CONNECTED;
2400
2401 if (drop_it)
2402 return TRUE;
2403
2404 drop_it = !--mdev->ko_count;
2405 if (!drop_it) {
2406 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2407 current->comm, current->pid, mdev->ko_count);
2408 request_ping(mdev);
2409 }
2410
2411 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2412 }
2413
2414 /* The idea of sendpage seems to be to put some kind of reference
2415 * to the page into the skb, and to hand it over to the NIC. In
2416 * this process get_page() gets called.
2417 *
2418 * As soon as the page was really sent over the network put_page()
2419 * gets called by some part of the network layer. [ NIC driver? ]
2420 *
2421 * [ get_page() / put_page() increment/decrement the count. If count
2422 * reaches 0 the page will be freed. ]
2423 *
2424 * This works nicely with pages from FSs.
2425 * But this means that in protocol A we might signal IO completion too early!
2426 *
2427 * In order not to corrupt data during a resync we must make sure
2428 * that we do not reuse our own buffer pages (EEs) to early, therefore
2429 * we have the net_ee list.
2430 *
2431 * XFS seems to have problems, still, it submits pages with page_count == 0!
2432 * As a workaround, we disable sendpage on pages
2433 * with page_count == 0 or PageSlab.
2434 */
2435 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2436 int offset, size_t size, unsigned msg_flags)
2437 {
2438 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2439 kunmap(page);
2440 if (sent == size)
2441 mdev->send_cnt += size>>9;
2442 return sent == size;
2443 }
2444
2445 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2446 int offset, size_t size, unsigned msg_flags)
2447 {
2448 mm_segment_t oldfs = get_fs();
2449 int sent, ok;
2450 int len = size;
2451
2452 /* e.g. XFS meta- & log-data is in slab pages, which have a
2453 * page_count of 0 and/or have PageSlab() set.
2454 * we cannot use send_page for those, as that does get_page();
2455 * put_page(); and would cause either a VM_BUG directly, or
2456 * __page_cache_release a page that would actually still be referenced
2457 * by someone, leading to some obscure delayed Oops somewhere else. */
2458 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2459 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2460
2461 msg_flags |= MSG_NOSIGNAL;
2462 drbd_update_congested(mdev);
2463 set_fs(KERNEL_DS);
2464 do {
2465 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2466 offset, len,
2467 msg_flags);
2468 if (sent == -EAGAIN) {
2469 if (we_should_drop_the_connection(mdev,
2470 mdev->data.socket))
2471 break;
2472 else
2473 continue;
2474 }
2475 if (sent <= 0) {
2476 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2477 __func__, (int)size, len, sent);
2478 break;
2479 }
2480 len -= sent;
2481 offset += sent;
2482 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2483 set_fs(oldfs);
2484 clear_bit(NET_CONGESTED, &mdev->flags);
2485
2486 ok = (len == 0);
2487 if (likely(ok))
2488 mdev->send_cnt += size>>9;
2489 return ok;
2490 }
2491
2492 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2493 {
2494 struct bio_vec *bvec;
2495 int i;
2496 /* hint all but last page with MSG_MORE */
2497 __bio_for_each_segment(bvec, bio, i, 0) {
2498 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2499 bvec->bv_offset, bvec->bv_len,
2500 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2501 return 0;
2502 }
2503 return 1;
2504 }
2505
2506 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2507 {
2508 struct bio_vec *bvec;
2509 int i;
2510 /* hint all but last page with MSG_MORE */
2511 __bio_for_each_segment(bvec, bio, i, 0) {
2512 if (!_drbd_send_page(mdev, bvec->bv_page,
2513 bvec->bv_offset, bvec->bv_len,
2514 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2515 return 0;
2516 }
2517 return 1;
2518 }
2519
2520 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2521 {
2522 struct page *page = e->pages;
2523 unsigned len = e->size;
2524 /* hint all but last page with MSG_MORE */
2525 page_chain_for_each(page) {
2526 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2527 if (!_drbd_send_page(mdev, page, 0, l,
2528 page_chain_next(page) ? MSG_MORE : 0))
2529 return 0;
2530 len -= l;
2531 }
2532 return 1;
2533 }
2534
2535 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2536 {
2537 if (mdev->agreed_pro_version >= 95)
2538 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2539 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2540 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2541 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2542 else
2543 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2544 }
2545
2546 /* Used to send write requests
2547 * R_PRIMARY -> Peer (P_DATA)
2548 */
2549 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2550 {
2551 int ok = 1;
2552 struct p_data p;
2553 unsigned int dp_flags = 0;
2554 void *dgb;
2555 int dgs;
2556
2557 if (!drbd_get_data_sock(mdev))
2558 return 0;
2559
2560 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2561 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2562
2563 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2564 p.head.h80.magic = BE_DRBD_MAGIC;
2565 p.head.h80.command = cpu_to_be16(P_DATA);
2566 p.head.h80.length =
2567 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2568 } else {
2569 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2570 p.head.h95.command = cpu_to_be16(P_DATA);
2571 p.head.h95.length =
2572 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2573 }
2574
2575 p.sector = cpu_to_be64(req->sector);
2576 p.block_id = (unsigned long)req;
2577 p.seq_num = cpu_to_be32(req->seq_num =
2578 atomic_add_return(1, &mdev->packet_seq));
2579
2580 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2581
2582 if (mdev->state.conn >= C_SYNC_SOURCE &&
2583 mdev->state.conn <= C_PAUSED_SYNC_T)
2584 dp_flags |= DP_MAY_SET_IN_SYNC;
2585
2586 p.dp_flags = cpu_to_be32(dp_flags);
2587 set_bit(UNPLUG_REMOTE, &mdev->flags);
2588 ok = (sizeof(p) ==
2589 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2590 if (ok && dgs) {
2591 dgb = mdev->int_dig_out;
2592 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2593 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2594 }
2595 if (ok) {
2596 /* For protocol A, we have to memcpy the payload into
2597 * socket buffers, as we may complete right away
2598 * as soon as we handed it over to tcp, at which point the data
2599 * pages may become invalid.
2600 *
2601 * For data-integrity enabled, we copy it as well, so we can be
2602 * sure that even if the bio pages may still be modified, it
2603 * won't change the data on the wire, thus if the digest checks
2604 * out ok after sending on this side, but does not fit on the
2605 * receiving side, we sure have detected corruption elsewhere.
2606 */
2607 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2608 ok = _drbd_send_bio(mdev, req->master_bio);
2609 else
2610 ok = _drbd_send_zc_bio(mdev, req->master_bio);
2611
2612 /* double check digest, sometimes buffers have been modified in flight. */
2613 if (dgs > 0 && dgs <= 64) {
2614 /* 64 byte, 512 bit, is the larges digest size
2615 * currently supported in kernel crypto. */
2616 unsigned char digest[64];
2617 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2618 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2619 dev_warn(DEV,
2620 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2621 (unsigned long long)req->sector, req->size);
2622 }
2623 } /* else if (dgs > 64) {
2624 ... Be noisy about digest too large ...
2625 } */
2626 }
2627
2628 drbd_put_data_sock(mdev);
2629
2630 return ok;
2631 }
2632
2633 /* answer packet, used to send data back for read requests:
2634 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2635 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2636 */
2637 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2638 struct drbd_epoch_entry *e)
2639 {
2640 int ok;
2641 struct p_data p;
2642 void *dgb;
2643 int dgs;
2644
2645 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2646 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2647
2648 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2649 p.head.h80.magic = BE_DRBD_MAGIC;
2650 p.head.h80.command = cpu_to_be16(cmd);
2651 p.head.h80.length =
2652 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2653 } else {
2654 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2655 p.head.h95.command = cpu_to_be16(cmd);
2656 p.head.h95.length =
2657 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2658 }
2659
2660 p.sector = cpu_to_be64(e->sector);
2661 p.block_id = e->block_id;
2662 /* p.seq_num = 0; No sequence numbers here.. */
2663
2664 /* Only called by our kernel thread.
2665 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2666 * in response to admin command or module unload.
2667 */
2668 if (!drbd_get_data_sock(mdev))
2669 return 0;
2670
2671 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2672 if (ok && dgs) {
2673 dgb = mdev->int_dig_out;
2674 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2675 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2676 }
2677 if (ok)
2678 ok = _drbd_send_zc_ee(mdev, e);
2679
2680 drbd_put_data_sock(mdev);
2681
2682 return ok;
2683 }
2684
2685 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2686 {
2687 struct p_block_desc p;
2688
2689 p.sector = cpu_to_be64(req->sector);
2690 p.blksize = cpu_to_be32(req->size);
2691
2692 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2693 }
2694
2695 /*
2696 drbd_send distinguishes two cases:
2697
2698 Packets sent via the data socket "sock"
2699 and packets sent via the meta data socket "msock"
2700
2701 sock msock
2702 -----------------+-------------------------+------------------------------
2703 timeout conf.timeout / 2 conf.timeout / 2
2704 timeout action send a ping via msock Abort communication
2705 and close all sockets
2706 */
2707
2708 /*
2709 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2710 */
2711 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2712 void *buf, size_t size, unsigned msg_flags)
2713 {
2714 struct kvec iov;
2715 struct msghdr msg;
2716 int rv, sent = 0;
2717
2718 if (!sock)
2719 return -1000;
2720
2721 /* THINK if (signal_pending) return ... ? */
2722
2723 iov.iov_base = buf;
2724 iov.iov_len = size;
2725
2726 msg.msg_name = NULL;
2727 msg.msg_namelen = 0;
2728 msg.msg_control = NULL;
2729 msg.msg_controllen = 0;
2730 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2731
2732 if (sock == mdev->data.socket) {
2733 mdev->ko_count = mdev->net_conf->ko_count;
2734 drbd_update_congested(mdev);
2735 }
2736 do {
2737 /* STRANGE
2738 * tcp_sendmsg does _not_ use its size parameter at all ?
2739 *
2740 * -EAGAIN on timeout, -EINTR on signal.
2741 */
2742 /* THINK
2743 * do we need to block DRBD_SIG if sock == &meta.socket ??
2744 * otherwise wake_asender() might interrupt some send_*Ack !
2745 */
2746 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2747 if (rv == -EAGAIN) {
2748 if (we_should_drop_the_connection(mdev, sock))
2749 break;
2750 else
2751 continue;
2752 }
2753 D_ASSERT(rv != 0);
2754 if (rv == -EINTR) {
2755 flush_signals(current);
2756 rv = 0;
2757 }
2758 if (rv < 0)
2759 break;
2760 sent += rv;
2761 iov.iov_base += rv;
2762 iov.iov_len -= rv;
2763 } while (sent < size);
2764
2765 if (sock == mdev->data.socket)
2766 clear_bit(NET_CONGESTED, &mdev->flags);
2767
2768 if (rv <= 0) {
2769 if (rv != -EAGAIN) {
2770 dev_err(DEV, "%s_sendmsg returned %d\n",
2771 sock == mdev->meta.socket ? "msock" : "sock",
2772 rv);
2773 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2774 } else
2775 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2776 }
2777
2778 return sent;
2779 }
2780
2781 static int drbd_open(struct block_device *bdev, fmode_t mode)
2782 {
2783 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2784 unsigned long flags;
2785 int rv = 0;
2786
2787 mutex_lock(&drbd_main_mutex);
2788 spin_lock_irqsave(&mdev->req_lock, flags);
2789 /* to have a stable mdev->state.role
2790 * and no race with updating open_cnt */
2791
2792 if (mdev->state.role != R_PRIMARY) {
2793 if (mode & FMODE_WRITE)
2794 rv = -EROFS;
2795 else if (!allow_oos)
2796 rv = -EMEDIUMTYPE;
2797 }
2798
2799 if (!rv)
2800 mdev->open_cnt++;
2801 spin_unlock_irqrestore(&mdev->req_lock, flags);
2802 mutex_unlock(&drbd_main_mutex);
2803
2804 return rv;
2805 }
2806
2807 static int drbd_release(struct gendisk *gd, fmode_t mode)
2808 {
2809 struct drbd_conf *mdev = gd->private_data;
2810 mutex_lock(&drbd_main_mutex);
2811 mdev->open_cnt--;
2812 mutex_unlock(&drbd_main_mutex);
2813 return 0;
2814 }
2815
2816 static void drbd_set_defaults(struct drbd_conf *mdev)
2817 {
2818 /* This way we get a compile error when sync_conf grows,
2819 and we forgot to initialize it here */
2820 mdev->sync_conf = (struct syncer_conf) {
2821 /* .rate = */ DRBD_RATE_DEF,
2822 /* .after = */ DRBD_AFTER_DEF,
2823 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
2824 /* .verify_alg = */ {}, 0,
2825 /* .cpu_mask = */ {}, 0,
2826 /* .csums_alg = */ {}, 0,
2827 /* .use_rle = */ 0,
2828 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2829 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2830 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2831 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
2832 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2833 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
2834 };
2835
2836 /* Have to use that way, because the layout differs between
2837 big endian and little endian */
2838 mdev->state = (union drbd_state) {
2839 { .role = R_SECONDARY,
2840 .peer = R_UNKNOWN,
2841 .conn = C_STANDALONE,
2842 .disk = D_DISKLESS,
2843 .pdsk = D_UNKNOWN,
2844 .susp = 0,
2845 .susp_nod = 0,
2846 .susp_fen = 0
2847 } };
2848 }
2849
2850 void drbd_init_set_defaults(struct drbd_conf *mdev)
2851 {
2852 /* the memset(,0,) did most of this.
2853 * note: only assignments, no allocation in here */
2854
2855 drbd_set_defaults(mdev);
2856
2857 atomic_set(&mdev->ap_bio_cnt, 0);
2858 atomic_set(&mdev->ap_pending_cnt, 0);
2859 atomic_set(&mdev->rs_pending_cnt, 0);
2860 atomic_set(&mdev->unacked_cnt, 0);
2861 atomic_set(&mdev->local_cnt, 0);
2862 atomic_set(&mdev->net_cnt, 0);
2863 atomic_set(&mdev->packet_seq, 0);
2864 atomic_set(&mdev->pp_in_use, 0);
2865 atomic_set(&mdev->pp_in_use_by_net, 0);
2866 atomic_set(&mdev->rs_sect_in, 0);
2867 atomic_set(&mdev->rs_sect_ev, 0);
2868 atomic_set(&mdev->ap_in_flight, 0);
2869
2870 mutex_init(&mdev->md_io_mutex);
2871 mutex_init(&mdev->data.mutex);
2872 mutex_init(&mdev->meta.mutex);
2873 sema_init(&mdev->data.work.s, 0);
2874 sema_init(&mdev->meta.work.s, 0);
2875 mutex_init(&mdev->state_mutex);
2876
2877 spin_lock_init(&mdev->data.work.q_lock);
2878 spin_lock_init(&mdev->meta.work.q_lock);
2879
2880 spin_lock_init(&mdev->al_lock);
2881 spin_lock_init(&mdev->req_lock);
2882 spin_lock_init(&mdev->peer_seq_lock);
2883 spin_lock_init(&mdev->epoch_lock);
2884
2885 INIT_LIST_HEAD(&mdev->active_ee);
2886 INIT_LIST_HEAD(&mdev->sync_ee);
2887 INIT_LIST_HEAD(&mdev->done_ee);
2888 INIT_LIST_HEAD(&mdev->read_ee);
2889 INIT_LIST_HEAD(&mdev->net_ee);
2890 INIT_LIST_HEAD(&mdev->resync_reads);
2891 INIT_LIST_HEAD(&mdev->data.work.q);
2892 INIT_LIST_HEAD(&mdev->meta.work.q);
2893 INIT_LIST_HEAD(&mdev->resync_work.list);
2894 INIT_LIST_HEAD(&mdev->unplug_work.list);
2895 INIT_LIST_HEAD(&mdev->go_diskless.list);
2896 INIT_LIST_HEAD(&mdev->md_sync_work.list);
2897 INIT_LIST_HEAD(&mdev->start_resync_work.list);
2898 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2899
2900 mdev->resync_work.cb = w_resync_inactive;
2901 mdev->unplug_work.cb = w_send_write_hint;
2902 mdev->go_diskless.cb = w_go_diskless;
2903 mdev->md_sync_work.cb = w_md_sync;
2904 mdev->bm_io_work.w.cb = w_bitmap_io;
2905 init_timer(&mdev->resync_timer);
2906 init_timer(&mdev->md_sync_timer);
2907 mdev->resync_timer.function = resync_timer_fn;
2908 mdev->resync_timer.data = (unsigned long) mdev;
2909 mdev->md_sync_timer.function = md_sync_timer_fn;
2910 mdev->md_sync_timer.data = (unsigned long) mdev;
2911
2912 init_waitqueue_head(&mdev->misc_wait);
2913 init_waitqueue_head(&mdev->state_wait);
2914 init_waitqueue_head(&mdev->net_cnt_wait);
2915 init_waitqueue_head(&mdev->ee_wait);
2916 init_waitqueue_head(&mdev->al_wait);
2917 init_waitqueue_head(&mdev->seq_wait);
2918
2919 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2920 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2921 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2922
2923 mdev->agreed_pro_version = PRO_VERSION_MAX;
2924 mdev->write_ordering = WO_bdev_flush;
2925 mdev->resync_wenr = LC_FREE;
2926 }
2927
2928 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2929 {
2930 int i;
2931 if (mdev->receiver.t_state != None)
2932 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2933 mdev->receiver.t_state);
2934
2935 /* no need to lock it, I'm the only thread alive */
2936 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2937 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2938 mdev->al_writ_cnt =
2939 mdev->bm_writ_cnt =
2940 mdev->read_cnt =
2941 mdev->recv_cnt =
2942 mdev->send_cnt =
2943 mdev->writ_cnt =
2944 mdev->p_size =
2945 mdev->rs_start =
2946 mdev->rs_total =
2947 mdev->rs_failed = 0;
2948 mdev->rs_last_events = 0;
2949 mdev->rs_last_sect_ev = 0;
2950 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2951 mdev->rs_mark_left[i] = 0;
2952 mdev->rs_mark_time[i] = 0;
2953 }
2954 D_ASSERT(mdev->net_conf == NULL);
2955
2956 drbd_set_my_capacity(mdev, 0);
2957 if (mdev->bitmap) {
2958 /* maybe never allocated. */
2959 drbd_bm_resize(mdev, 0, 1);
2960 drbd_bm_cleanup(mdev);
2961 }
2962
2963 drbd_free_resources(mdev);
2964 clear_bit(AL_SUSPENDED, &mdev->flags);
2965
2966 /*
2967 * currently we drbd_init_ee only on module load, so
2968 * we may do drbd_release_ee only on module unload!
2969 */
2970 D_ASSERT(list_empty(&mdev->active_ee));
2971 D_ASSERT(list_empty(&mdev->sync_ee));
2972 D_ASSERT(list_empty(&mdev->done_ee));
2973 D_ASSERT(list_empty(&mdev->read_ee));
2974 D_ASSERT(list_empty(&mdev->net_ee));
2975 D_ASSERT(list_empty(&mdev->resync_reads));
2976 D_ASSERT(list_empty(&mdev->data.work.q));
2977 D_ASSERT(list_empty(&mdev->meta.work.q));
2978 D_ASSERT(list_empty(&mdev->resync_work.list));
2979 D_ASSERT(list_empty(&mdev->unplug_work.list));
2980 D_ASSERT(list_empty(&mdev->go_diskless.list));
2981 }
2982
2983
2984 static void drbd_destroy_mempools(void)
2985 {
2986 struct page *page;
2987
2988 while (drbd_pp_pool) {
2989 page = drbd_pp_pool;
2990 drbd_pp_pool = (struct page *)page_private(page);
2991 __free_page(page);
2992 drbd_pp_vacant--;
2993 }
2994
2995 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2996
2997 if (drbd_ee_mempool)
2998 mempool_destroy(drbd_ee_mempool);
2999 if (drbd_request_mempool)
3000 mempool_destroy(drbd_request_mempool);
3001 if (drbd_ee_cache)
3002 kmem_cache_destroy(drbd_ee_cache);
3003 if (drbd_request_cache)
3004 kmem_cache_destroy(drbd_request_cache);
3005 if (drbd_bm_ext_cache)
3006 kmem_cache_destroy(drbd_bm_ext_cache);
3007 if (drbd_al_ext_cache)
3008 kmem_cache_destroy(drbd_al_ext_cache);
3009
3010 drbd_ee_mempool = NULL;
3011 drbd_request_mempool = NULL;
3012 drbd_ee_cache = NULL;
3013 drbd_request_cache = NULL;
3014 drbd_bm_ext_cache = NULL;
3015 drbd_al_ext_cache = NULL;
3016
3017 return;
3018 }
3019
3020 static int drbd_create_mempools(void)
3021 {
3022 struct page *page;
3023 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3024 int i;
3025
3026 /* prepare our caches and mempools */
3027 drbd_request_mempool = NULL;
3028 drbd_ee_cache = NULL;
3029 drbd_request_cache = NULL;
3030 drbd_bm_ext_cache = NULL;
3031 drbd_al_ext_cache = NULL;
3032 drbd_pp_pool = NULL;
3033
3034 /* caches */
3035 drbd_request_cache = kmem_cache_create(
3036 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3037 if (drbd_request_cache == NULL)
3038 goto Enomem;
3039
3040 drbd_ee_cache = kmem_cache_create(
3041 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3042 if (drbd_ee_cache == NULL)
3043 goto Enomem;
3044
3045 drbd_bm_ext_cache = kmem_cache_create(
3046 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3047 if (drbd_bm_ext_cache == NULL)
3048 goto Enomem;
3049
3050 drbd_al_ext_cache = kmem_cache_create(
3051 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3052 if (drbd_al_ext_cache == NULL)
3053 goto Enomem;
3054
3055 /* mempools */
3056 drbd_request_mempool = mempool_create(number,
3057 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3058 if (drbd_request_mempool == NULL)
3059 goto Enomem;
3060
3061 drbd_ee_mempool = mempool_create(number,
3062 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3063 if (drbd_ee_mempool == NULL)
3064 goto Enomem;
3065
3066 /* drbd's page pool */
3067 spin_lock_init(&drbd_pp_lock);
3068
3069 for (i = 0; i < number; i++) {
3070 page = alloc_page(GFP_HIGHUSER);
3071 if (!page)
3072 goto Enomem;
3073 set_page_private(page, (unsigned long)drbd_pp_pool);
3074 drbd_pp_pool = page;
3075 }
3076 drbd_pp_vacant = number;
3077
3078 return 0;
3079
3080 Enomem:
3081 drbd_destroy_mempools(); /* in case we allocated some */
3082 return -ENOMEM;
3083 }
3084
3085 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3086 void *unused)
3087 {
3088 /* just so we have it. you never know what interesting things we
3089 * might want to do here some day...
3090 */
3091
3092 return NOTIFY_DONE;
3093 }
3094
3095 static struct notifier_block drbd_notifier = {
3096 .notifier_call = drbd_notify_sys,
3097 };
3098
3099 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3100 {
3101 int rr;
3102
3103 rr = drbd_release_ee(mdev, &mdev->active_ee);
3104 if (rr)
3105 dev_err(DEV, "%d EEs in active list found!\n", rr);
3106
3107 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3108 if (rr)
3109 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3110
3111 rr = drbd_release_ee(mdev, &mdev->read_ee);
3112 if (rr)
3113 dev_err(DEV, "%d EEs in read list found!\n", rr);
3114
3115 rr = drbd_release_ee(mdev, &mdev->done_ee);
3116 if (rr)
3117 dev_err(DEV, "%d EEs in done list found!\n", rr);
3118
3119 rr = drbd_release_ee(mdev, &mdev->net_ee);
3120 if (rr)
3121 dev_err(DEV, "%d EEs in net list found!\n", rr);
3122 }
3123
3124 /* caution. no locking.
3125 * currently only used from module cleanup code. */
3126 static void drbd_delete_device(unsigned int minor)
3127 {
3128 struct drbd_conf *mdev = minor_to_mdev(minor);
3129
3130 if (!mdev)
3131 return;
3132
3133 /* paranoia asserts */
3134 if (mdev->open_cnt != 0)
3135 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3136 __FILE__ , __LINE__);
3137
3138 ERR_IF (!list_empty(&mdev->data.work.q)) {
3139 struct list_head *lp;
3140 list_for_each(lp, &mdev->data.work.q) {
3141 dev_err(DEV, "lp = %p\n", lp);
3142 }
3143 };
3144 /* end paranoia asserts */
3145
3146 del_gendisk(mdev->vdisk);
3147
3148 /* cleanup stuff that may have been allocated during
3149 * device (re-)configuration or state changes */
3150
3151 if (mdev->this_bdev)
3152 bdput(mdev->this_bdev);
3153
3154 drbd_free_resources(mdev);
3155
3156 drbd_release_ee_lists(mdev);
3157
3158 /* should be free'd on disconnect? */
3159 kfree(mdev->ee_hash);
3160 /*
3161 mdev->ee_hash_s = 0;
3162 mdev->ee_hash = NULL;
3163 */
3164
3165 lc_destroy(mdev->act_log);
3166 lc_destroy(mdev->resync);
3167
3168 kfree(mdev->p_uuid);
3169 /* mdev->p_uuid = NULL; */
3170
3171 kfree(mdev->int_dig_out);
3172 kfree(mdev->int_dig_in);
3173 kfree(mdev->int_dig_vv);
3174
3175 /* cleanup the rest that has been
3176 * allocated from drbd_new_device
3177 * and actually free the mdev itself */
3178 drbd_free_mdev(mdev);
3179 }
3180
3181 static void drbd_cleanup(void)
3182 {
3183 unsigned int i;
3184
3185 unregister_reboot_notifier(&drbd_notifier);
3186
3187 /* first remove proc,
3188 * drbdsetup uses it's presence to detect
3189 * whether DRBD is loaded.
3190 * If we would get stuck in proc removal,
3191 * but have netlink already deregistered,
3192 * some drbdsetup commands may wait forever
3193 * for an answer.
3194 */
3195 if (drbd_proc)
3196 remove_proc_entry("drbd", NULL);
3197
3198 drbd_nl_cleanup();
3199
3200 if (minor_table) {
3201 i = minor_count;
3202 while (i--)
3203 drbd_delete_device(i);
3204 drbd_destroy_mempools();
3205 }
3206
3207 kfree(minor_table);
3208
3209 unregister_blkdev(DRBD_MAJOR, "drbd");
3210
3211 printk(KERN_INFO "drbd: module cleanup done.\n");
3212 }
3213
3214 /**
3215 * drbd_congested() - Callback for pdflush
3216 * @congested_data: User data
3217 * @bdi_bits: Bits pdflush is currently interested in
3218 *
3219 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3220 */
3221 static int drbd_congested(void *congested_data, int bdi_bits)
3222 {
3223 struct drbd_conf *mdev = congested_data;
3224 struct request_queue *q;
3225 char reason = '-';
3226 int r = 0;
3227
3228 if (!__inc_ap_bio_cond(mdev)) {
3229 /* DRBD has frozen IO */
3230 r = bdi_bits;
3231 reason = 'd';
3232 goto out;
3233 }
3234
3235 if (get_ldev(mdev)) {
3236 q = bdev_get_queue(mdev->ldev->backing_bdev);
3237 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3238 put_ldev(mdev);
3239 if (r)
3240 reason = 'b';
3241 }
3242
3243 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3244 r |= (1 << BDI_async_congested);
3245 reason = reason == 'b' ? 'a' : 'n';
3246 }
3247
3248 out:
3249 mdev->congestion_reason = reason;
3250 return r;
3251 }
3252
3253 struct drbd_conf *drbd_new_device(unsigned int minor)
3254 {
3255 struct drbd_conf *mdev;
3256 struct gendisk *disk;
3257 struct request_queue *q;
3258
3259 /* GFP_KERNEL, we are outside of all write-out paths */
3260 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3261 if (!mdev)
3262 return NULL;
3263 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3264 goto out_no_cpumask;
3265
3266 mdev->minor = minor;
3267
3268 drbd_init_set_defaults(mdev);
3269
3270 q = blk_alloc_queue(GFP_KERNEL);
3271 if (!q)
3272 goto out_no_q;
3273 mdev->rq_queue = q;
3274 q->queuedata = mdev;
3275
3276 disk = alloc_disk(1);
3277 if (!disk)
3278 goto out_no_disk;
3279 mdev->vdisk = disk;
3280
3281 set_disk_ro(disk, TRUE);
3282
3283 disk->queue = q;
3284 disk->major = DRBD_MAJOR;
3285 disk->first_minor = minor;
3286 disk->fops = &drbd_ops;
3287 sprintf(disk->disk_name, "drbd%d", minor);
3288 disk->private_data = mdev;
3289
3290 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3291 /* we have no partitions. we contain only ourselves. */
3292 mdev->this_bdev->bd_contains = mdev->this_bdev;
3293
3294 q->backing_dev_info.congested_fn = drbd_congested;
3295 q->backing_dev_info.congested_data = mdev;
3296
3297 blk_queue_make_request(q, drbd_make_request);
3298 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3299 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3300 blk_queue_merge_bvec(q, drbd_merge_bvec);
3301 q->queue_lock = &mdev->req_lock;
3302
3303 mdev->md_io_page = alloc_page(GFP_KERNEL);
3304 if (!mdev->md_io_page)
3305 goto out_no_io_page;
3306
3307 if (drbd_bm_init(mdev))
3308 goto out_no_bitmap;
3309 /* no need to lock access, we are still initializing this minor device. */
3310 if (!tl_init(mdev))
3311 goto out_no_tl;
3312
3313 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3314 if (!mdev->app_reads_hash)
3315 goto out_no_app_reads;
3316
3317 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3318 if (!mdev->current_epoch)
3319 goto out_no_epoch;
3320
3321 INIT_LIST_HEAD(&mdev->current_epoch->list);
3322 mdev->epochs = 1;
3323
3324 return mdev;
3325
3326 /* out_whatever_else:
3327 kfree(mdev->current_epoch); */
3328 out_no_epoch:
3329 kfree(mdev->app_reads_hash);
3330 out_no_app_reads:
3331 tl_cleanup(mdev);
3332 out_no_tl:
3333 drbd_bm_cleanup(mdev);
3334 out_no_bitmap:
3335 __free_page(mdev->md_io_page);
3336 out_no_io_page:
3337 put_disk(disk);
3338 out_no_disk:
3339 blk_cleanup_queue(q);
3340 out_no_q:
3341 free_cpumask_var(mdev->cpu_mask);
3342 out_no_cpumask:
3343 kfree(mdev);
3344 return NULL;
3345 }
3346
3347 /* counterpart of drbd_new_device.
3348 * last part of drbd_delete_device. */
3349 void drbd_free_mdev(struct drbd_conf *mdev)
3350 {
3351 kfree(mdev->current_epoch);
3352 kfree(mdev->app_reads_hash);
3353 tl_cleanup(mdev);
3354 if (mdev->bitmap) /* should no longer be there. */
3355 drbd_bm_cleanup(mdev);
3356 __free_page(mdev->md_io_page);
3357 put_disk(mdev->vdisk);
3358 blk_cleanup_queue(mdev->rq_queue);
3359 free_cpumask_var(mdev->cpu_mask);
3360 drbd_free_tl_hash(mdev);
3361 kfree(mdev);
3362 }
3363
3364
3365 int __init drbd_init(void)
3366 {
3367 int err;
3368
3369 if (sizeof(struct p_handshake) != 80) {
3370 printk(KERN_ERR
3371 "drbd: never change the size or layout "
3372 "of the HandShake packet.\n");
3373 return -EINVAL;
3374 }
3375
3376 if (1 > minor_count || minor_count > 255) {
3377 printk(KERN_ERR
3378 "drbd: invalid minor_count (%d)\n", minor_count);
3379 #ifdef MODULE
3380 return -EINVAL;
3381 #else
3382 minor_count = 8;
3383 #endif
3384 }
3385
3386 err = drbd_nl_init();
3387 if (err)
3388 return err;
3389
3390 err = register_blkdev(DRBD_MAJOR, "drbd");
3391 if (err) {
3392 printk(KERN_ERR
3393 "drbd: unable to register block device major %d\n",
3394 DRBD_MAJOR);
3395 return err;
3396 }
3397
3398 register_reboot_notifier(&drbd_notifier);
3399
3400 /*
3401 * allocate all necessary structs
3402 */
3403 err = -ENOMEM;
3404
3405 init_waitqueue_head(&drbd_pp_wait);
3406
3407 drbd_proc = NULL; /* play safe for drbd_cleanup */
3408 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3409 GFP_KERNEL);
3410 if (!minor_table)
3411 goto Enomem;
3412
3413 err = drbd_create_mempools();
3414 if (err)
3415 goto Enomem;
3416
3417 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3418 if (!drbd_proc) {
3419 printk(KERN_ERR "drbd: unable to register proc file\n");
3420 goto Enomem;
3421 }
3422
3423 rwlock_init(&global_state_lock);
3424
3425 printk(KERN_INFO "drbd: initialized. "
3426 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3427 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3428 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3429 printk(KERN_INFO "drbd: registered as block device major %d\n",
3430 DRBD_MAJOR);
3431 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3432
3433 return 0; /* Success! */
3434
3435 Enomem:
3436 drbd_cleanup();
3437 if (err == -ENOMEM)
3438 /* currently always the case */
3439 printk(KERN_ERR "drbd: ran out of memory\n");
3440 else
3441 printk(KERN_ERR "drbd: initialization failure\n");
3442 return err;
3443 }
3444
3445 void drbd_free_bc(struct drbd_backing_dev *ldev)
3446 {
3447 if (ldev == NULL)
3448 return;
3449
3450 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3451 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3452
3453 kfree(ldev);
3454 }
3455
3456 void drbd_free_sock(struct drbd_conf *mdev)
3457 {
3458 if (mdev->data.socket) {
3459 mutex_lock(&mdev->data.mutex);
3460 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3461 sock_release(mdev->data.socket);
3462 mdev->data.socket = NULL;
3463 mutex_unlock(&mdev->data.mutex);
3464 }
3465 if (mdev->meta.socket) {
3466 mutex_lock(&mdev->meta.mutex);
3467 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3468 sock_release(mdev->meta.socket);
3469 mdev->meta.socket = NULL;
3470 mutex_unlock(&mdev->meta.mutex);
3471 }
3472 }
3473
3474
3475 void drbd_free_resources(struct drbd_conf *mdev)
3476 {
3477 crypto_free_hash(mdev->csums_tfm);
3478 mdev->csums_tfm = NULL;
3479 crypto_free_hash(mdev->verify_tfm);
3480 mdev->verify_tfm = NULL;
3481 crypto_free_hash(mdev->cram_hmac_tfm);
3482 mdev->cram_hmac_tfm = NULL;
3483 crypto_free_hash(mdev->integrity_w_tfm);
3484 mdev->integrity_w_tfm = NULL;
3485 crypto_free_hash(mdev->integrity_r_tfm);
3486 mdev->integrity_r_tfm = NULL;
3487
3488 drbd_free_sock(mdev);
3489
3490 __no_warn(local,
3491 drbd_free_bc(mdev->ldev);
3492 mdev->ldev = NULL;);
3493 }
3494
3495 /* meta data management */
3496
3497 struct meta_data_on_disk {
3498 u64 la_size; /* last agreed size. */
3499 u64 uuid[UI_SIZE]; /* UUIDs. */
3500 u64 device_uuid;
3501 u64 reserved_u64_1;
3502 u32 flags; /* MDF */
3503 u32 magic;
3504 u32 md_size_sect;
3505 u32 al_offset; /* offset to this block */
3506 u32 al_nr_extents; /* important for restoring the AL */
3507 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3508 u32 bm_offset; /* offset to the bitmap, from here */
3509 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3510 u32 reserved_u32[4];
3511
3512 } __packed;
3513
3514 /**
3515 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3516 * @mdev: DRBD device.
3517 */
3518 void drbd_md_sync(struct drbd_conf *mdev)
3519 {
3520 struct meta_data_on_disk *buffer;
3521 sector_t sector;
3522 int i;
3523
3524 del_timer(&mdev->md_sync_timer);
3525 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3526 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3527 return;
3528
3529 /* We use here D_FAILED and not D_ATTACHING because we try to write
3530 * metadata even if we detach due to a disk failure! */
3531 if (!get_ldev_if_state(mdev, D_FAILED))
3532 return;
3533
3534 mutex_lock(&mdev->md_io_mutex);
3535 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3536 memset(buffer, 0, 512);
3537
3538 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3539 for (i = UI_CURRENT; i < UI_SIZE; i++)
3540 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3541 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3542 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3543
3544 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3545 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3546 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3547 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3548 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3549
3550 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3551
3552 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3553 sector = mdev->ldev->md.md_offset;
3554
3555 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3556 /* this was a try anyways ... */
3557 dev_err(DEV, "meta data update failed!\n");
3558 drbd_chk_io_error(mdev, 1, TRUE);
3559 }
3560
3561 /* Update mdev->ldev->md.la_size_sect,
3562 * since we updated it on metadata. */
3563 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3564
3565 mutex_unlock(&mdev->md_io_mutex);
3566 put_ldev(mdev);
3567 }
3568
3569 /**
3570 * drbd_md_read() - Reads in the meta data super block
3571 * @mdev: DRBD device.
3572 * @bdev: Device from which the meta data should be read in.
3573 *
3574 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3575 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3576 */
3577 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3578 {
3579 struct meta_data_on_disk *buffer;
3580 int i, rv = NO_ERROR;
3581
3582 if (!get_ldev_if_state(mdev, D_ATTACHING))
3583 return ERR_IO_MD_DISK;
3584
3585 mutex_lock(&mdev->md_io_mutex);
3586 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3587
3588 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3589 /* NOTE: cant do normal error processing here as this is
3590 called BEFORE disk is attached */
3591 dev_err(DEV, "Error while reading metadata.\n");
3592 rv = ERR_IO_MD_DISK;
3593 goto err;
3594 }
3595
3596 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3597 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3598 rv = ERR_MD_INVALID;
3599 goto err;
3600 }
3601 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3602 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3603 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3604 rv = ERR_MD_INVALID;
3605 goto err;
3606 }
3607 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3608 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3609 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3610 rv = ERR_MD_INVALID;
3611 goto err;
3612 }
3613 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3614 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3615 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3616 rv = ERR_MD_INVALID;
3617 goto err;
3618 }
3619
3620 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3621 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3622 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3623 rv = ERR_MD_INVALID;
3624 goto err;
3625 }
3626
3627 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3628 for (i = UI_CURRENT; i < UI_SIZE; i++)
3629 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3630 bdev->md.flags = be32_to_cpu(buffer->flags);
3631 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3632 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3633
3634 if (mdev->sync_conf.al_extents < 7)
3635 mdev->sync_conf.al_extents = 127;
3636
3637 err:
3638 mutex_unlock(&mdev->md_io_mutex);
3639 put_ldev(mdev);
3640
3641 return rv;
3642 }
3643
3644 static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3645 {
3646 static char *uuid_str[UI_EXTENDED_SIZE] = {
3647 [UI_CURRENT] = "CURRENT",
3648 [UI_BITMAP] = "BITMAP",
3649 [UI_HISTORY_START] = "HISTORY_START",
3650 [UI_HISTORY_END] = "HISTORY_END",
3651 [UI_SIZE] = "SIZE",
3652 [UI_FLAGS] = "FLAGS",
3653 };
3654
3655 if (index >= UI_EXTENDED_SIZE) {
3656 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3657 return;
3658 }
3659
3660 dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3661 uuid_str[index],
3662 (unsigned long long)mdev->ldev->md.uuid[index]);
3663 }
3664
3665
3666 /**
3667 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3668 * @mdev: DRBD device.
3669 *
3670 * Call this function if you change anything that should be written to
3671 * the meta-data super block. This function sets MD_DIRTY, and starts a
3672 * timer that ensures that within five seconds you have to call drbd_md_sync().
3673 */
3674 #ifdef DEBUG
3675 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3676 {
3677 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3678 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3679 mdev->last_md_mark_dirty.line = line;
3680 mdev->last_md_mark_dirty.func = func;
3681 }
3682 }
3683 #else
3684 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3685 {
3686 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3687 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3688 }
3689 #endif
3690
3691 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3692 {
3693 int i;
3694
3695 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3696 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3697 debug_drbd_uuid(mdev, i+1);
3698 }
3699 }
3700
3701 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3702 {
3703 if (idx == UI_CURRENT) {
3704 if (mdev->state.role == R_PRIMARY)
3705 val |= 1;
3706 else
3707 val &= ~((u64)1);
3708
3709 drbd_set_ed_uuid(mdev, val);
3710 }
3711
3712 mdev->ldev->md.uuid[idx] = val;
3713 debug_drbd_uuid(mdev, idx);
3714 drbd_md_mark_dirty(mdev);
3715 }
3716
3717
3718 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3719 {
3720 if (mdev->ldev->md.uuid[idx]) {
3721 drbd_uuid_move_history(mdev);
3722 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3723 debug_drbd_uuid(mdev, UI_HISTORY_START);
3724 }
3725 _drbd_uuid_set(mdev, idx, val);
3726 }
3727
3728 /**
3729 * drbd_uuid_new_current() - Creates a new current UUID
3730 * @mdev: DRBD device.
3731 *
3732 * Creates a new current UUID, and rotates the old current UUID into
3733 * the bitmap slot. Causes an incremental resync upon next connect.
3734 */
3735 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3736 {
3737 u64 val;
3738
3739 dev_info(DEV, "Creating new current UUID\n");
3740 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3741 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3742 debug_drbd_uuid(mdev, UI_BITMAP);
3743
3744 get_random_bytes(&val, sizeof(u64));
3745 _drbd_uuid_set(mdev, UI_CURRENT, val);
3746 /* get it to stable storage _now_ */
3747 drbd_md_sync(mdev);
3748 }
3749
3750 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3751 {
3752 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3753 return;
3754
3755 if (val == 0) {
3756 drbd_uuid_move_history(mdev);
3757 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3758 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3759 debug_drbd_uuid(mdev, UI_HISTORY_START);
3760 debug_drbd_uuid(mdev, UI_BITMAP);
3761 } else {
3762 if (mdev->ldev->md.uuid[UI_BITMAP])
3763 dev_warn(DEV, "bm UUID already set");
3764
3765 mdev->ldev->md.uuid[UI_BITMAP] = val;
3766 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3767
3768 debug_drbd_uuid(mdev, UI_BITMAP);
3769 }
3770 drbd_md_mark_dirty(mdev);
3771 }
3772
3773 /**
3774 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3775 * @mdev: DRBD device.
3776 *
3777 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3778 */
3779 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3780 {
3781 int rv = -EIO;
3782
3783 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3784 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3785 drbd_md_sync(mdev);
3786 drbd_bm_set_all(mdev);
3787
3788 rv = drbd_bm_write(mdev);
3789
3790 if (!rv) {
3791 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3792 drbd_md_sync(mdev);
3793 }
3794
3795 put_ldev(mdev);
3796 }
3797
3798 return rv;
3799 }
3800
3801 /**
3802 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3803 * @mdev: DRBD device.
3804 *
3805 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3806 */
3807 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3808 {
3809 int rv = -EIO;
3810
3811 drbd_resume_al(mdev);
3812 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3813 drbd_bm_clear_all(mdev);
3814 rv = drbd_bm_write(mdev);
3815 put_ldev(mdev);
3816 }
3817
3818 return rv;
3819 }
3820
3821 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3822 {
3823 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3824 int rv;
3825
3826 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3827
3828 drbd_bm_lock(mdev, work->why);
3829 rv = work->io_fn(mdev);
3830 drbd_bm_unlock(mdev);
3831
3832 clear_bit(BITMAP_IO, &mdev->flags);
3833 smp_mb__after_clear_bit();
3834 wake_up(&mdev->misc_wait);
3835
3836 if (work->done)
3837 work->done(mdev, rv);
3838
3839 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3840 work->why = NULL;
3841
3842 return 1;
3843 }
3844
3845 void drbd_ldev_destroy(struct drbd_conf *mdev)
3846 {
3847 lc_destroy(mdev->resync);
3848 mdev->resync = NULL;
3849 lc_destroy(mdev->act_log);
3850 mdev->act_log = NULL;
3851 __no_warn(local,
3852 drbd_free_bc(mdev->ldev);
3853 mdev->ldev = NULL;);
3854
3855 if (mdev->md_io_tmpp) {
3856 __free_page(mdev->md_io_tmpp);
3857 mdev->md_io_tmpp = NULL;
3858 }
3859 clear_bit(GO_DISKLESS, &mdev->flags);
3860 }
3861
3862 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3863 {
3864 D_ASSERT(mdev->state.disk == D_FAILED);
3865 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3866 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3867 * the protected members anymore, though, so once put_ldev reaches zero
3868 * again, it will be safe to free them. */
3869 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3870 return 1;
3871 }
3872
3873 void drbd_go_diskless(struct drbd_conf *mdev)
3874 {
3875 D_ASSERT(mdev->state.disk == D_FAILED);
3876 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3877 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3878 }
3879
3880 /**
3881 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3882 * @mdev: DRBD device.
3883 * @io_fn: IO callback to be called when bitmap IO is possible
3884 * @done: callback to be called after the bitmap IO was performed
3885 * @why: Descriptive text of the reason for doing the IO
3886 *
3887 * While IO on the bitmap happens we freeze application IO thus we ensure
3888 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3889 * called from worker context. It MUST NOT be used while a previous such
3890 * work is still pending!
3891 */
3892 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3893 int (*io_fn)(struct drbd_conf *),
3894 void (*done)(struct drbd_conf *, int),
3895 char *why)
3896 {
3897 D_ASSERT(current == mdev->worker.task);
3898
3899 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3900 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3901 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3902 if (mdev->bm_io_work.why)
3903 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3904 why, mdev->bm_io_work.why);
3905
3906 mdev->bm_io_work.io_fn = io_fn;
3907 mdev->bm_io_work.done = done;
3908 mdev->bm_io_work.why = why;
3909
3910 spin_lock_irq(&mdev->req_lock);
3911 set_bit(BITMAP_IO, &mdev->flags);
3912 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3913 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
3914 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3915 }
3916 spin_unlock_irq(&mdev->req_lock);
3917 }
3918
3919 /**
3920 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3921 * @mdev: DRBD device.
3922 * @io_fn: IO callback to be called when bitmap IO is possible
3923 * @why: Descriptive text of the reason for doing the IO
3924 *
3925 * freezes application IO while that the actual IO operations runs. This
3926 * functions MAY NOT be called from worker context.
3927 */
3928 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3929 {
3930 int rv;
3931
3932 D_ASSERT(current != mdev->worker.task);
3933
3934 drbd_suspend_io(mdev);
3935
3936 drbd_bm_lock(mdev, why);
3937 rv = io_fn(mdev);
3938 drbd_bm_unlock(mdev);
3939
3940 drbd_resume_io(mdev);
3941
3942 return rv;
3943 }
3944
3945 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3946 {
3947 if ((mdev->ldev->md.flags & flag) != flag) {
3948 drbd_md_mark_dirty(mdev);
3949 mdev->ldev->md.flags |= flag;
3950 }
3951 }
3952
3953 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3954 {
3955 if ((mdev->ldev->md.flags & flag) != 0) {
3956 drbd_md_mark_dirty(mdev);
3957 mdev->ldev->md.flags &= ~flag;
3958 }
3959 }
3960 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3961 {
3962 return (bdev->md.flags & flag) != 0;
3963 }
3964
3965 static void md_sync_timer_fn(unsigned long data)
3966 {
3967 struct drbd_conf *mdev = (struct drbd_conf *) data;
3968
3969 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
3970 }
3971
3972 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3973 {
3974 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3975 #ifdef DEBUG
3976 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3977 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3978 #endif
3979 drbd_md_sync(mdev);
3980 return 1;
3981 }
3982
3983 #ifdef CONFIG_DRBD_FAULT_INJECTION
3984 /* Fault insertion support including random number generator shamelessly
3985 * stolen from kernel/rcutorture.c */
3986 struct fault_random_state {
3987 unsigned long state;
3988 unsigned long count;
3989 };
3990
3991 #define FAULT_RANDOM_MULT 39916801 /* prime */
3992 #define FAULT_RANDOM_ADD 479001701 /* prime */
3993 #define FAULT_RANDOM_REFRESH 10000
3994
3995 /*
3996 * Crude but fast random-number generator. Uses a linear congruential
3997 * generator, with occasional help from get_random_bytes().
3998 */
3999 static unsigned long
4000 _drbd_fault_random(struct fault_random_state *rsp)
4001 {
4002 long refresh;
4003
4004 if (!rsp->count--) {
4005 get_random_bytes(&refresh, sizeof(refresh));
4006 rsp->state += refresh;
4007 rsp->count = FAULT_RANDOM_REFRESH;
4008 }
4009 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4010 return swahw32(rsp->state);
4011 }
4012
4013 static char *
4014 _drbd_fault_str(unsigned int type) {
4015 static char *_faults[] = {
4016 [DRBD_FAULT_MD_WR] = "Meta-data write",
4017 [DRBD_FAULT_MD_RD] = "Meta-data read",
4018 [DRBD_FAULT_RS_WR] = "Resync write",
4019 [DRBD_FAULT_RS_RD] = "Resync read",
4020 [DRBD_FAULT_DT_WR] = "Data write",
4021 [DRBD_FAULT_DT_RD] = "Data read",
4022 [DRBD_FAULT_DT_RA] = "Data read ahead",
4023 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4024 [DRBD_FAULT_AL_EE] = "EE allocation",
4025 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4026 };
4027
4028 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4029 }
4030
4031 unsigned int
4032 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4033 {
4034 static struct fault_random_state rrs = {0, 0};
4035
4036 unsigned int ret = (
4037 (fault_devs == 0 ||
4038 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4039 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4040
4041 if (ret) {
4042 fault_count++;
4043
4044 if (__ratelimit(&drbd_ratelimit_state))
4045 dev_warn(DEV, "***Simulating %s failure\n",
4046 _drbd_fault_str(type));
4047 }
4048
4049 return ret;
4050 }
4051 #endif
4052
4053 const char *drbd_buildtag(void)
4054 {
4055 /* DRBD built from external sources has here a reference to the
4056 git hash of the source code. */
4057
4058 static char buildtag[38] = "\0uilt-in";
4059
4060 if (buildtag[0] == 0) {
4061 #ifdef CONFIG_MODULES
4062 if (THIS_MODULE != NULL)
4063 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4064 else
4065 #endif
4066 buildtag[0] = 'b';
4067 }
4068
4069 return buildtag;
4070 }
4071
4072 module_init(drbd_init)
4073 module_exit(drbd_cleanup)
4074
4075 EXPORT_SYMBOL(drbd_conn_str);
4076 EXPORT_SYMBOL(drbd_role_str);
4077 EXPORT_SYMBOL(drbd_disk_str);
4078 EXPORT_SYMBOL(drbd_set_st_err_str);