]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/drbd/drbd_main.c
drbd: Consider the disk-timeout also for meta-data IO operations
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_main.c
1 /*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count, uint, 0444);
98 module_param(disable_sendpage, bool, 0644);
99 module_param(allow_oos, bool, 0);
100 module_param(cn_idx, uint, 0444);
101 module_param(proc_details, int, 0644);
102
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
104 int enable_faults;
105 int fault_rate;
106 static int fault_count;
107 int fault_devs;
108 /* bitmap of enabled faults */
109 module_param(enable_faults, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs, int, 0644);
116 #endif
117
118 /* module parameter, defined */
119 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
120 bool disable_sendpage;
121 bool allow_oos;
122 unsigned int cn_idx = CN_IDX_DRBD;
123 int proc_details; /* Detail level in proc drbd*/
124
125 /* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127 char usermode_helper[80] = "/sbin/drbdadm";
128
129 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134 struct drbd_conf **minor_table;
135
136 struct kmem_cache *drbd_request_cache;
137 struct kmem_cache *drbd_ee_cache; /* epoch entries */
138 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140 mempool_t *drbd_request_mempool;
141 mempool_t *drbd_ee_mempool;
142
143 /* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149 struct page *drbd_pp_pool;
150 spinlock_t drbd_pp_lock;
151 int drbd_pp_vacant;
152 wait_queue_head_t drbd_pp_wait;
153
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
156 static const struct block_device_operations drbd_ops = {
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160 };
161
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 {
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179 }
180
181 #endif
182
183 /**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193 static int tl_init(struct drbd_conf *mdev)
194 {
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
205 b->n_writes = 0;
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211 INIT_LIST_HEAD(&mdev->barrier_acked_requests);
212
213 mdev->tl_hash = NULL;
214 mdev->tl_hash_s = 0;
215
216 return 1;
217 }
218
219 static void tl_cleanup(struct drbd_conf *mdev)
220 {
221 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
222 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
223 kfree(mdev->oldest_tle);
224 mdev->oldest_tle = NULL;
225 kfree(mdev->unused_spare_tle);
226 mdev->unused_spare_tle = NULL;
227 kfree(mdev->tl_hash);
228 mdev->tl_hash = NULL;
229 mdev->tl_hash_s = 0;
230 }
231
232 /**
233 * _tl_add_barrier() - Adds a barrier to the transfer log
234 * @mdev: DRBD device.
235 * @new: Barrier to be added before the current head of the TL.
236 *
237 * The caller must hold the req_lock.
238 */
239 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240 {
241 struct drbd_tl_epoch *newest_before;
242
243 INIT_LIST_HEAD(&new->requests);
244 INIT_LIST_HEAD(&new->w.list);
245 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
246 new->next = NULL;
247 new->n_writes = 0;
248
249 newest_before = mdev->newest_tle;
250 /* never send a barrier number == 0, because that is special-cased
251 * when using TCQ for our write ordering code */
252 new->br_number = (newest_before->br_number+1) ?: 1;
253 if (mdev->newest_tle != new) {
254 mdev->newest_tle->next = new;
255 mdev->newest_tle = new;
256 }
257 }
258
259 /**
260 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
261 * @mdev: DRBD device.
262 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
263 * @set_size: Expected number of requests before that barrier.
264 *
265 * In case the passed barrier_nr or set_size does not match the oldest
266 * &struct drbd_tl_epoch objects this function will cause a termination
267 * of the connection.
268 */
269 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
270 unsigned int set_size)
271 {
272 struct drbd_tl_epoch *b, *nob; /* next old barrier */
273 struct list_head *le, *tle;
274 struct drbd_request *r;
275
276 spin_lock_irq(&mdev->req_lock);
277
278 b = mdev->oldest_tle;
279
280 /* first some paranoia code */
281 if (b == NULL) {
282 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
283 barrier_nr);
284 goto bail;
285 }
286 if (b->br_number != barrier_nr) {
287 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
288 barrier_nr, b->br_number);
289 goto bail;
290 }
291 if (b->n_writes != set_size) {
292 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
293 barrier_nr, set_size, b->n_writes);
294 goto bail;
295 }
296
297 /* Clean up list of requests processed during current epoch */
298 list_for_each_safe(le, tle, &b->requests) {
299 r = list_entry(le, struct drbd_request, tl_requests);
300 _req_mod(r, barrier_acked);
301 }
302 /* There could be requests on the list waiting for completion
303 of the write to the local disk. To avoid corruptions of
304 slab's data structures we have to remove the lists head.
305
306 Also there could have been a barrier ack out of sequence, overtaking
307 the write acks - which would be a bug and violating write ordering.
308 To not deadlock in case we lose connection while such requests are
309 still pending, we need some way to find them for the
310 _req_mode(connection_lost_while_pending).
311
312 These have been list_move'd to the out_of_sequence_requests list in
313 _req_mod(, barrier_acked) above.
314 */
315 list_splice_init(&b->requests, &mdev->barrier_acked_requests);
316
317 nob = b->next;
318 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
319 _tl_add_barrier(mdev, b);
320 if (nob)
321 mdev->oldest_tle = nob;
322 /* if nob == NULL b was the only barrier, and becomes the new
323 barrier. Therefore mdev->oldest_tle points already to b */
324 } else {
325 D_ASSERT(nob != NULL);
326 mdev->oldest_tle = nob;
327 kfree(b);
328 }
329
330 spin_unlock_irq(&mdev->req_lock);
331 dec_ap_pending(mdev);
332
333 return;
334
335 bail:
336 spin_unlock_irq(&mdev->req_lock);
337 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
338 }
339
340
341 /**
342 * _tl_restart() - Walks the transfer log, and applies an action to all requests
343 * @mdev: DRBD device.
344 * @what: The action/event to perform with all request objects
345 *
346 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
347 * restart_frozen_disk_io.
348 */
349 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
350 {
351 struct drbd_tl_epoch *b, *tmp, **pn;
352 struct list_head *le, *tle, carry_reads;
353 struct drbd_request *req;
354 int rv, n_writes, n_reads;
355
356 b = mdev->oldest_tle;
357 pn = &mdev->oldest_tle;
358 while (b) {
359 n_writes = 0;
360 n_reads = 0;
361 INIT_LIST_HEAD(&carry_reads);
362 list_for_each_safe(le, tle, &b->requests) {
363 req = list_entry(le, struct drbd_request, tl_requests);
364 rv = _req_mod(req, what);
365
366 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
367 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
368 }
369 tmp = b->next;
370
371 if (n_writes) {
372 if (what == resend) {
373 b->n_writes = n_writes;
374 if (b->w.cb == NULL) {
375 b->w.cb = w_send_barrier;
376 inc_ap_pending(mdev);
377 set_bit(CREATE_BARRIER, &mdev->flags);
378 }
379
380 drbd_queue_work(&mdev->data.work, &b->w);
381 }
382 pn = &b->next;
383 } else {
384 if (n_reads)
385 list_add(&carry_reads, &b->requests);
386 /* there could still be requests on that ring list,
387 * in case local io is still pending */
388 list_del(&b->requests);
389
390 /* dec_ap_pending corresponding to queue_barrier.
391 * the newest barrier may not have been queued yet,
392 * in which case w.cb is still NULL. */
393 if (b->w.cb != NULL)
394 dec_ap_pending(mdev);
395
396 if (b == mdev->newest_tle) {
397 /* recycle, but reinit! */
398 D_ASSERT(tmp == NULL);
399 INIT_LIST_HEAD(&b->requests);
400 list_splice(&carry_reads, &b->requests);
401 INIT_LIST_HEAD(&b->w.list);
402 b->w.cb = NULL;
403 b->br_number = net_random();
404 b->n_writes = 0;
405
406 *pn = b;
407 break;
408 }
409 *pn = tmp;
410 kfree(b);
411 }
412 b = tmp;
413 list_splice(&carry_reads, &b->requests);
414 }
415
416 /* Actions operating on the disk state, also want to work on
417 requests that got barrier acked. */
418 switch (what) {
419 case fail_frozen_disk_io:
420 case restart_frozen_disk_io:
421 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
422 req = list_entry(le, struct drbd_request, tl_requests);
423 _req_mod(req, what);
424 }
425
426 case connection_lost_while_pending:
427 case resend:
428 break;
429 default:
430 dev_err(DEV, "what = %d in _tl_restart()\n", what);
431 }
432 }
433
434
435 /**
436 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
437 * @mdev: DRBD device.
438 *
439 * This is called after the connection to the peer was lost. The storage covered
440 * by the requests on the transfer gets marked as our of sync. Called from the
441 * receiver thread and the worker thread.
442 */
443 void tl_clear(struct drbd_conf *mdev)
444 {
445 struct list_head *le, *tle;
446 struct drbd_request *r;
447
448 spin_lock_irq(&mdev->req_lock);
449
450 _tl_restart(mdev, connection_lost_while_pending);
451
452 /* we expect this list to be empty. */
453 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
454
455 /* but just in case, clean it up anyways! */
456 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
457 r = list_entry(le, struct drbd_request, tl_requests);
458 /* It would be nice to complete outside of spinlock.
459 * But this is easier for now. */
460 _req_mod(r, connection_lost_while_pending);
461 }
462
463 /* ensure bit indicating barrier is required is clear */
464 clear_bit(CREATE_BARRIER, &mdev->flags);
465
466 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
467
468 spin_unlock_irq(&mdev->req_lock);
469 }
470
471 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
472 {
473 spin_lock_irq(&mdev->req_lock);
474 _tl_restart(mdev, what);
475 spin_unlock_irq(&mdev->req_lock);
476 }
477
478 /**
479 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
480 * @mdev: DRBD device.
481 */
482 void tl_abort_disk_io(struct drbd_conf *mdev)
483 {
484 struct drbd_tl_epoch *b;
485 struct list_head *le, *tle;
486 struct drbd_request *req;
487
488 spin_lock_irq(&mdev->req_lock);
489 b = mdev->oldest_tle;
490 while (b) {
491 list_for_each_safe(le, tle, &b->requests) {
492 req = list_entry(le, struct drbd_request, tl_requests);
493 if (!(req->rq_state & RQ_LOCAL_PENDING))
494 continue;
495 _req_mod(req, abort_disk_io);
496 }
497 b = b->next;
498 }
499
500 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
501 req = list_entry(le, struct drbd_request, tl_requests);
502 if (!(req->rq_state & RQ_LOCAL_PENDING))
503 continue;
504 _req_mod(req, abort_disk_io);
505 }
506
507 spin_unlock_irq(&mdev->req_lock);
508 }
509
510 /**
511 * cl_wide_st_chg() - true if the state change is a cluster wide one
512 * @mdev: DRBD device.
513 * @os: old (current) state.
514 * @ns: new (wanted) state.
515 */
516 static int cl_wide_st_chg(struct drbd_conf *mdev,
517 union drbd_state os, union drbd_state ns)
518 {
519 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
520 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
521 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
522 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
523 (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
524 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
525 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
526 }
527
528 enum drbd_state_rv
529 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
530 union drbd_state mask, union drbd_state val)
531 {
532 unsigned long flags;
533 union drbd_state os, ns;
534 enum drbd_state_rv rv;
535
536 spin_lock_irqsave(&mdev->req_lock, flags);
537 os = mdev->state;
538 ns.i = (os.i & ~mask.i) | val.i;
539 rv = _drbd_set_state(mdev, ns, f, NULL);
540 ns = mdev->state;
541 spin_unlock_irqrestore(&mdev->req_lock, flags);
542
543 return rv;
544 }
545
546 /**
547 * drbd_force_state() - Impose a change which happens outside our control on our state
548 * @mdev: DRBD device.
549 * @mask: mask of state bits to change.
550 * @val: value of new state bits.
551 */
552 void drbd_force_state(struct drbd_conf *mdev,
553 union drbd_state mask, union drbd_state val)
554 {
555 drbd_change_state(mdev, CS_HARD, mask, val);
556 }
557
558 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
559 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
560 union drbd_state,
561 union drbd_state);
562 enum sanitize_state_warnings {
563 NO_WARNING,
564 ABORTED_ONLINE_VERIFY,
565 ABORTED_RESYNC,
566 CONNECTION_LOST_NEGOTIATING,
567 IMPLICITLY_UPGRADED_DISK,
568 IMPLICITLY_UPGRADED_PDSK,
569 };
570 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
571 union drbd_state ns, enum sanitize_state_warnings *warn);
572 int drbd_send_state_req(struct drbd_conf *,
573 union drbd_state, union drbd_state);
574
575 static enum drbd_state_rv
576 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
577 union drbd_state val)
578 {
579 union drbd_state os, ns;
580 unsigned long flags;
581 enum drbd_state_rv rv;
582
583 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
584 return SS_CW_SUCCESS;
585
586 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
587 return SS_CW_FAILED_BY_PEER;
588
589 rv = 0;
590 spin_lock_irqsave(&mdev->req_lock, flags);
591 os = mdev->state;
592 ns.i = (os.i & ~mask.i) | val.i;
593 ns = sanitize_state(mdev, os, ns, NULL);
594
595 if (!cl_wide_st_chg(mdev, os, ns))
596 rv = SS_CW_NO_NEED;
597 if (!rv) {
598 rv = is_valid_state(mdev, ns);
599 if (rv == SS_SUCCESS) {
600 rv = is_valid_state_transition(mdev, ns, os);
601 if (rv == SS_SUCCESS)
602 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
603 }
604 }
605 spin_unlock_irqrestore(&mdev->req_lock, flags);
606
607 return rv;
608 }
609
610 /**
611 * drbd_req_state() - Perform an eventually cluster wide state change
612 * @mdev: DRBD device.
613 * @mask: mask of state bits to change.
614 * @val: value of new state bits.
615 * @f: flags
616 *
617 * Should not be called directly, use drbd_request_state() or
618 * _drbd_request_state().
619 */
620 static enum drbd_state_rv
621 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
622 union drbd_state val, enum chg_state_flags f)
623 {
624 struct completion done;
625 unsigned long flags;
626 union drbd_state os, ns;
627 enum drbd_state_rv rv;
628
629 init_completion(&done);
630
631 if (f & CS_SERIALIZE)
632 mutex_lock(&mdev->state_mutex);
633
634 spin_lock_irqsave(&mdev->req_lock, flags);
635 os = mdev->state;
636 ns.i = (os.i & ~mask.i) | val.i;
637 ns = sanitize_state(mdev, os, ns, NULL);
638
639 if (cl_wide_st_chg(mdev, os, ns)) {
640 rv = is_valid_state(mdev, ns);
641 if (rv == SS_SUCCESS)
642 rv = is_valid_state_transition(mdev, ns, os);
643 spin_unlock_irqrestore(&mdev->req_lock, flags);
644
645 if (rv < SS_SUCCESS) {
646 if (f & CS_VERBOSE)
647 print_st_err(mdev, os, ns, rv);
648 goto abort;
649 }
650
651 drbd_state_lock(mdev);
652 if (!drbd_send_state_req(mdev, mask, val)) {
653 drbd_state_unlock(mdev);
654 rv = SS_CW_FAILED_BY_PEER;
655 if (f & CS_VERBOSE)
656 print_st_err(mdev, os, ns, rv);
657 goto abort;
658 }
659
660 wait_event(mdev->state_wait,
661 (rv = _req_st_cond(mdev, mask, val)));
662
663 if (rv < SS_SUCCESS) {
664 drbd_state_unlock(mdev);
665 if (f & CS_VERBOSE)
666 print_st_err(mdev, os, ns, rv);
667 goto abort;
668 }
669 spin_lock_irqsave(&mdev->req_lock, flags);
670 os = mdev->state;
671 ns.i = (os.i & ~mask.i) | val.i;
672 rv = _drbd_set_state(mdev, ns, f, &done);
673 drbd_state_unlock(mdev);
674 } else {
675 rv = _drbd_set_state(mdev, ns, f, &done);
676 }
677
678 spin_unlock_irqrestore(&mdev->req_lock, flags);
679
680 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
681 D_ASSERT(current != mdev->worker.task);
682 wait_for_completion(&done);
683 }
684
685 abort:
686 if (f & CS_SERIALIZE)
687 mutex_unlock(&mdev->state_mutex);
688
689 return rv;
690 }
691
692 /**
693 * _drbd_request_state() - Request a state change (with flags)
694 * @mdev: DRBD device.
695 * @mask: mask of state bits to change.
696 * @val: value of new state bits.
697 * @f: flags
698 *
699 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
700 * flag, or when logging of failed state change requests is not desired.
701 */
702 enum drbd_state_rv
703 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
704 union drbd_state val, enum chg_state_flags f)
705 {
706 enum drbd_state_rv rv;
707
708 wait_event(mdev->state_wait,
709 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
710
711 return rv;
712 }
713
714 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
715 {
716 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
717 name,
718 drbd_conn_str(ns.conn),
719 drbd_role_str(ns.role),
720 drbd_role_str(ns.peer),
721 drbd_disk_str(ns.disk),
722 drbd_disk_str(ns.pdsk),
723 is_susp(ns) ? 's' : 'r',
724 ns.aftr_isp ? 'a' : '-',
725 ns.peer_isp ? 'p' : '-',
726 ns.user_isp ? 'u' : '-'
727 );
728 }
729
730 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
731 union drbd_state ns, enum drbd_state_rv err)
732 {
733 if (err == SS_IN_TRANSIENT_STATE)
734 return;
735 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
736 print_st(mdev, " state", os);
737 print_st(mdev, "wanted", ns);
738 }
739
740
741 /**
742 * is_valid_state() - Returns an SS_ error code if ns is not valid
743 * @mdev: DRBD device.
744 * @ns: State to consider.
745 */
746 static enum drbd_state_rv
747 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
748 {
749 /* See drbd_state_sw_errors in drbd_strings.c */
750
751 enum drbd_fencing_p fp;
752 enum drbd_state_rv rv = SS_SUCCESS;
753
754 fp = FP_DONT_CARE;
755 if (get_ldev(mdev)) {
756 fp = mdev->ldev->dc.fencing;
757 put_ldev(mdev);
758 }
759
760 if (get_net_conf(mdev)) {
761 if (!mdev->net_conf->two_primaries &&
762 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
763 rv = SS_TWO_PRIMARIES;
764 put_net_conf(mdev);
765 }
766
767 if (rv <= 0)
768 /* already found a reason to abort */;
769 else if (ns.role == R_SECONDARY && mdev->open_cnt)
770 rv = SS_DEVICE_IN_USE;
771
772 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
773 rv = SS_NO_UP_TO_DATE_DISK;
774
775 else if (fp >= FP_RESOURCE &&
776 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
777 rv = SS_PRIMARY_NOP;
778
779 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
780 rv = SS_NO_UP_TO_DATE_DISK;
781
782 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
783 rv = SS_NO_LOCAL_DISK;
784
785 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
786 rv = SS_NO_REMOTE_DISK;
787
788 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
789 rv = SS_NO_UP_TO_DATE_DISK;
790
791 else if ((ns.conn == C_CONNECTED ||
792 ns.conn == C_WF_BITMAP_S ||
793 ns.conn == C_SYNC_SOURCE ||
794 ns.conn == C_PAUSED_SYNC_S) &&
795 ns.disk == D_OUTDATED)
796 rv = SS_CONNECTED_OUTDATES;
797
798 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
799 (mdev->sync_conf.verify_alg[0] == 0))
800 rv = SS_NO_VERIFY_ALG;
801
802 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
803 mdev->agreed_pro_version < 88)
804 rv = SS_NOT_SUPPORTED;
805
806 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
807 rv = SS_CONNECTED_OUTDATES;
808
809 return rv;
810 }
811
812 /**
813 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
814 * @mdev: DRBD device.
815 * @ns: new state.
816 * @os: old state.
817 */
818 static enum drbd_state_rv
819 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
820 union drbd_state os)
821 {
822 enum drbd_state_rv rv = SS_SUCCESS;
823
824 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
825 os.conn > C_CONNECTED)
826 rv = SS_RESYNC_RUNNING;
827
828 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
829 rv = SS_ALREADY_STANDALONE;
830
831 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
832 rv = SS_IS_DISKLESS;
833
834 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
835 rv = SS_NO_NET_CONFIG;
836
837 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
838 rv = SS_LOWER_THAN_OUTDATED;
839
840 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
841 rv = SS_IN_TRANSIENT_STATE;
842
843 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
844 rv = SS_IN_TRANSIENT_STATE;
845
846 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
847 rv = SS_NEED_CONNECTION;
848
849 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
850 ns.conn != os.conn && os.conn > C_CONNECTED)
851 rv = SS_RESYNC_RUNNING;
852
853 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
854 os.conn < C_CONNECTED)
855 rv = SS_NEED_CONNECTION;
856
857 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
858 && os.conn < C_WF_REPORT_PARAMS)
859 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
860
861 return rv;
862 }
863
864 static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
865 {
866 static const char *msg_table[] = {
867 [NO_WARNING] = "",
868 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
869 [ABORTED_RESYNC] = "Resync aborted.",
870 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
871 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
872 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
873 };
874
875 if (warn != NO_WARNING)
876 dev_warn(DEV, "%s\n", msg_table[warn]);
877 }
878
879 /**
880 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
881 * @mdev: DRBD device.
882 * @os: old state.
883 * @ns: new state.
884 * @warn_sync_abort:
885 *
886 * When we loose connection, we have to set the state of the peers disk (pdsk)
887 * to D_UNKNOWN. This rule and many more along those lines are in this function.
888 */
889 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
890 union drbd_state ns, enum sanitize_state_warnings *warn)
891 {
892 enum drbd_fencing_p fp;
893 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
894
895 if (warn)
896 *warn = NO_WARNING;
897
898 fp = FP_DONT_CARE;
899 if (get_ldev(mdev)) {
900 fp = mdev->ldev->dc.fencing;
901 put_ldev(mdev);
902 }
903
904 /* Disallow Network errors to configure a device's network part */
905 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
906 os.conn <= C_DISCONNECTING)
907 ns.conn = os.conn;
908
909 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
910 * If you try to go into some Sync* state, that shall fail (elsewhere). */
911 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
912 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
913 ns.conn = os.conn;
914
915 /* we cannot fail (again) if we already detached */
916 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
917 ns.disk = D_DISKLESS;
918
919 /* After C_DISCONNECTING only C_STANDALONE may follow */
920 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
921 ns.conn = os.conn;
922
923 if (ns.conn < C_CONNECTED) {
924 ns.peer_isp = 0;
925 ns.peer = R_UNKNOWN;
926 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
927 ns.pdsk = D_UNKNOWN;
928 }
929
930 /* Clear the aftr_isp when becoming unconfigured */
931 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
932 ns.aftr_isp = 0;
933
934 /* Abort resync if a disk fails/detaches */
935 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
936 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
937 if (warn)
938 *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
939 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
940 ns.conn = C_CONNECTED;
941 }
942
943 /* Connection breaks down before we finished "Negotiating" */
944 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
945 get_ldev_if_state(mdev, D_NEGOTIATING)) {
946 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
947 ns.disk = mdev->new_state_tmp.disk;
948 ns.pdsk = mdev->new_state_tmp.pdsk;
949 } else {
950 if (warn)
951 *warn = CONNECTION_LOST_NEGOTIATING;
952 ns.disk = D_DISKLESS;
953 ns.pdsk = D_UNKNOWN;
954 }
955 put_ldev(mdev);
956 }
957
958 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
959 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
960 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
961 ns.disk = D_UP_TO_DATE;
962 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
963 ns.pdsk = D_UP_TO_DATE;
964 }
965
966 /* Implications of the connection stat on the disk states */
967 disk_min = D_DISKLESS;
968 disk_max = D_UP_TO_DATE;
969 pdsk_min = D_INCONSISTENT;
970 pdsk_max = D_UNKNOWN;
971 switch ((enum drbd_conns)ns.conn) {
972 case C_WF_BITMAP_T:
973 case C_PAUSED_SYNC_T:
974 case C_STARTING_SYNC_T:
975 case C_WF_SYNC_UUID:
976 case C_BEHIND:
977 disk_min = D_INCONSISTENT;
978 disk_max = D_OUTDATED;
979 pdsk_min = D_UP_TO_DATE;
980 pdsk_max = D_UP_TO_DATE;
981 break;
982 case C_VERIFY_S:
983 case C_VERIFY_T:
984 disk_min = D_UP_TO_DATE;
985 disk_max = D_UP_TO_DATE;
986 pdsk_min = D_UP_TO_DATE;
987 pdsk_max = D_UP_TO_DATE;
988 break;
989 case C_CONNECTED:
990 disk_min = D_DISKLESS;
991 disk_max = D_UP_TO_DATE;
992 pdsk_min = D_DISKLESS;
993 pdsk_max = D_UP_TO_DATE;
994 break;
995 case C_WF_BITMAP_S:
996 case C_PAUSED_SYNC_S:
997 case C_STARTING_SYNC_S:
998 case C_AHEAD:
999 disk_min = D_UP_TO_DATE;
1000 disk_max = D_UP_TO_DATE;
1001 pdsk_min = D_INCONSISTENT;
1002 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1003 break;
1004 case C_SYNC_TARGET:
1005 disk_min = D_INCONSISTENT;
1006 disk_max = D_INCONSISTENT;
1007 pdsk_min = D_UP_TO_DATE;
1008 pdsk_max = D_UP_TO_DATE;
1009 break;
1010 case C_SYNC_SOURCE:
1011 disk_min = D_UP_TO_DATE;
1012 disk_max = D_UP_TO_DATE;
1013 pdsk_min = D_INCONSISTENT;
1014 pdsk_max = D_INCONSISTENT;
1015 break;
1016 case C_STANDALONE:
1017 case C_DISCONNECTING:
1018 case C_UNCONNECTED:
1019 case C_TIMEOUT:
1020 case C_BROKEN_PIPE:
1021 case C_NETWORK_FAILURE:
1022 case C_PROTOCOL_ERROR:
1023 case C_TEAR_DOWN:
1024 case C_WF_CONNECTION:
1025 case C_WF_REPORT_PARAMS:
1026 case C_MASK:
1027 break;
1028 }
1029 if (ns.disk > disk_max)
1030 ns.disk = disk_max;
1031
1032 if (ns.disk < disk_min) {
1033 if (warn)
1034 *warn = IMPLICITLY_UPGRADED_DISK;
1035 ns.disk = disk_min;
1036 }
1037 if (ns.pdsk > pdsk_max)
1038 ns.pdsk = pdsk_max;
1039
1040 if (ns.pdsk < pdsk_min) {
1041 if (warn)
1042 *warn = IMPLICITLY_UPGRADED_PDSK;
1043 ns.pdsk = pdsk_min;
1044 }
1045
1046 if (fp == FP_STONITH &&
1047 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1048 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
1049 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
1050
1051 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
1052 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1053 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
1054 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
1055
1056 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1057 if (ns.conn == C_SYNC_SOURCE)
1058 ns.conn = C_PAUSED_SYNC_S;
1059 if (ns.conn == C_SYNC_TARGET)
1060 ns.conn = C_PAUSED_SYNC_T;
1061 } else {
1062 if (ns.conn == C_PAUSED_SYNC_S)
1063 ns.conn = C_SYNC_SOURCE;
1064 if (ns.conn == C_PAUSED_SYNC_T)
1065 ns.conn = C_SYNC_TARGET;
1066 }
1067
1068 return ns;
1069 }
1070
1071 /* helper for __drbd_set_state */
1072 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1073 {
1074 if (mdev->agreed_pro_version < 90)
1075 mdev->ov_start_sector = 0;
1076 mdev->rs_total = drbd_bm_bits(mdev);
1077 mdev->ov_position = 0;
1078 if (cs == C_VERIFY_T) {
1079 /* starting online verify from an arbitrary position
1080 * does not fit well into the existing protocol.
1081 * on C_VERIFY_T, we initialize ov_left and friends
1082 * implicitly in receive_DataRequest once the
1083 * first P_OV_REQUEST is received */
1084 mdev->ov_start_sector = ~(sector_t)0;
1085 } else {
1086 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1087 if (bit >= mdev->rs_total) {
1088 mdev->ov_start_sector =
1089 BM_BIT_TO_SECT(mdev->rs_total - 1);
1090 mdev->rs_total = 1;
1091 } else
1092 mdev->rs_total -= bit;
1093 mdev->ov_position = mdev->ov_start_sector;
1094 }
1095 mdev->ov_left = mdev->rs_total;
1096 }
1097
1098 static void drbd_resume_al(struct drbd_conf *mdev)
1099 {
1100 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1101 dev_info(DEV, "Resumed AL updates\n");
1102 }
1103
1104 /**
1105 * __drbd_set_state() - Set a new DRBD state
1106 * @mdev: DRBD device.
1107 * @ns: new state.
1108 * @flags: Flags
1109 * @done: Optional completion, that will get completed after the after_state_ch() finished
1110 *
1111 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1112 */
1113 enum drbd_state_rv
1114 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1115 enum chg_state_flags flags, struct completion *done)
1116 {
1117 union drbd_state os;
1118 enum drbd_state_rv rv = SS_SUCCESS;
1119 enum sanitize_state_warnings ssw;
1120 struct after_state_chg_work *ascw;
1121
1122 os = mdev->state;
1123
1124 ns = sanitize_state(mdev, os, ns, &ssw);
1125
1126 if (ns.i == os.i)
1127 return SS_NOTHING_TO_DO;
1128
1129 if (!(flags & CS_HARD)) {
1130 /* pre-state-change checks ; only look at ns */
1131 /* See drbd_state_sw_errors in drbd_strings.c */
1132
1133 rv = is_valid_state(mdev, ns);
1134 if (rv < SS_SUCCESS) {
1135 /* If the old state was illegal as well, then let
1136 this happen...*/
1137
1138 if (is_valid_state(mdev, os) == rv)
1139 rv = is_valid_state_transition(mdev, ns, os);
1140 } else
1141 rv = is_valid_state_transition(mdev, ns, os);
1142 }
1143
1144 if (rv < SS_SUCCESS) {
1145 if (flags & CS_VERBOSE)
1146 print_st_err(mdev, os, ns, rv);
1147 return rv;
1148 }
1149
1150 print_sanitize_warnings(mdev, ssw);
1151
1152 {
1153 char *pbp, pb[300];
1154 pbp = pb;
1155 *pbp = 0;
1156 if (ns.role != os.role)
1157 pbp += sprintf(pbp, "role( %s -> %s ) ",
1158 drbd_role_str(os.role),
1159 drbd_role_str(ns.role));
1160 if (ns.peer != os.peer)
1161 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1162 drbd_role_str(os.peer),
1163 drbd_role_str(ns.peer));
1164 if (ns.conn != os.conn)
1165 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1166 drbd_conn_str(os.conn),
1167 drbd_conn_str(ns.conn));
1168 if (ns.disk != os.disk)
1169 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1170 drbd_disk_str(os.disk),
1171 drbd_disk_str(ns.disk));
1172 if (ns.pdsk != os.pdsk)
1173 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1174 drbd_disk_str(os.pdsk),
1175 drbd_disk_str(ns.pdsk));
1176 if (is_susp(ns) != is_susp(os))
1177 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1178 is_susp(os),
1179 is_susp(ns));
1180 if (ns.aftr_isp != os.aftr_isp)
1181 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1182 os.aftr_isp,
1183 ns.aftr_isp);
1184 if (ns.peer_isp != os.peer_isp)
1185 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1186 os.peer_isp,
1187 ns.peer_isp);
1188 if (ns.user_isp != os.user_isp)
1189 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1190 os.user_isp,
1191 ns.user_isp);
1192 dev_info(DEV, "%s\n", pb);
1193 }
1194
1195 /* solve the race between becoming unconfigured,
1196 * worker doing the cleanup, and
1197 * admin reconfiguring us:
1198 * on (re)configure, first set CONFIG_PENDING,
1199 * then wait for a potentially exiting worker,
1200 * start the worker, and schedule one no_op.
1201 * then proceed with configuration.
1202 */
1203 if (ns.disk == D_DISKLESS &&
1204 ns.conn == C_STANDALONE &&
1205 ns.role == R_SECONDARY &&
1206 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1207 set_bit(DEVICE_DYING, &mdev->flags);
1208
1209 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1210 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1211 * drbd_ldev_destroy() won't happen before our corresponding
1212 * after_state_ch works run, where we put_ldev again. */
1213 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1214 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1215 atomic_inc(&mdev->local_cnt);
1216
1217 mdev->state = ns;
1218
1219 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1220 drbd_print_uuids(mdev, "attached to UUIDs");
1221
1222 wake_up(&mdev->misc_wait);
1223 wake_up(&mdev->state_wait);
1224
1225 /* aborted verify run. log the last position */
1226 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1227 ns.conn < C_CONNECTED) {
1228 mdev->ov_start_sector =
1229 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1230 dev_info(DEV, "Online Verify reached sector %llu\n",
1231 (unsigned long long)mdev->ov_start_sector);
1232 }
1233
1234 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1235 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1236 dev_info(DEV, "Syncer continues.\n");
1237 mdev->rs_paused += (long)jiffies
1238 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1239 if (ns.conn == C_SYNC_TARGET)
1240 mod_timer(&mdev->resync_timer, jiffies);
1241 }
1242
1243 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1244 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1245 dev_info(DEV, "Resync suspended\n");
1246 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1247 }
1248
1249 if (os.conn == C_CONNECTED &&
1250 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1251 unsigned long now = jiffies;
1252 int i;
1253
1254 set_ov_position(mdev, ns.conn);
1255 mdev->rs_start = now;
1256 mdev->rs_last_events = 0;
1257 mdev->rs_last_sect_ev = 0;
1258 mdev->ov_last_oos_size = 0;
1259 mdev->ov_last_oos_start = 0;
1260
1261 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1262 mdev->rs_mark_left[i] = mdev->ov_left;
1263 mdev->rs_mark_time[i] = now;
1264 }
1265
1266 drbd_rs_controller_reset(mdev);
1267
1268 if (ns.conn == C_VERIFY_S) {
1269 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1270 (unsigned long long)mdev->ov_position);
1271 mod_timer(&mdev->resync_timer, jiffies);
1272 }
1273 }
1274
1275 if (get_ldev(mdev)) {
1276 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1277 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1278 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1279
1280 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1281 mdf |= MDF_CRASHED_PRIMARY;
1282 if (mdev->state.role == R_PRIMARY ||
1283 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1284 mdf |= MDF_PRIMARY_IND;
1285 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1286 mdf |= MDF_CONNECTED_IND;
1287 if (mdev->state.disk > D_INCONSISTENT)
1288 mdf |= MDF_CONSISTENT;
1289 if (mdev->state.disk > D_OUTDATED)
1290 mdf |= MDF_WAS_UP_TO_DATE;
1291 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1292 mdf |= MDF_PEER_OUT_DATED;
1293 if (mdf != mdev->ldev->md.flags) {
1294 mdev->ldev->md.flags = mdf;
1295 drbd_md_mark_dirty(mdev);
1296 }
1297 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1298 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1299 put_ldev(mdev);
1300 }
1301
1302 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1303 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1304 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1305 set_bit(CONSIDER_RESYNC, &mdev->flags);
1306
1307 /* Receiver should clean up itself */
1308 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1309 drbd_thread_stop_nowait(&mdev->receiver);
1310
1311 /* Now the receiver finished cleaning up itself, it should die */
1312 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1313 drbd_thread_stop_nowait(&mdev->receiver);
1314
1315 /* Upon network failure, we need to restart the receiver. */
1316 if (os.conn > C_WF_CONNECTION &&
1317 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1318 drbd_thread_restart_nowait(&mdev->receiver);
1319
1320 /* Resume AL writing if we get a connection */
1321 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1322 drbd_resume_al(mdev);
1323
1324 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1325 if (ascw) {
1326 ascw->os = os;
1327 ascw->ns = ns;
1328 ascw->flags = flags;
1329 ascw->w.cb = w_after_state_ch;
1330 ascw->done = done;
1331 drbd_queue_work(&mdev->data.work, &ascw->w);
1332 } else {
1333 dev_warn(DEV, "Could not kmalloc an ascw\n");
1334 }
1335
1336 return rv;
1337 }
1338
1339 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1340 {
1341 struct after_state_chg_work *ascw =
1342 container_of(w, struct after_state_chg_work, w);
1343 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1344 if (ascw->flags & CS_WAIT_COMPLETE) {
1345 D_ASSERT(ascw->done != NULL);
1346 complete(ascw->done);
1347 }
1348 kfree(ascw);
1349
1350 return 1;
1351 }
1352
1353 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1354 {
1355 if (rv) {
1356 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1357 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1358 return;
1359 }
1360
1361 switch (mdev->state.conn) {
1362 case C_STARTING_SYNC_T:
1363 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1364 break;
1365 case C_STARTING_SYNC_S:
1366 drbd_start_resync(mdev, C_SYNC_SOURCE);
1367 break;
1368 }
1369 }
1370
1371 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1372 int (*io_fn)(struct drbd_conf *),
1373 char *why, enum bm_flag flags)
1374 {
1375 int rv;
1376
1377 D_ASSERT(current == mdev->worker.task);
1378
1379 /* open coded non-blocking drbd_suspend_io(mdev); */
1380 set_bit(SUSPEND_IO, &mdev->flags);
1381
1382 drbd_bm_lock(mdev, why, flags);
1383 rv = io_fn(mdev);
1384 drbd_bm_unlock(mdev);
1385
1386 drbd_resume_io(mdev);
1387
1388 return rv;
1389 }
1390
1391 /**
1392 * after_state_ch() - Perform after state change actions that may sleep
1393 * @mdev: DRBD device.
1394 * @os: old state.
1395 * @ns: new state.
1396 * @flags: Flags
1397 */
1398 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1399 union drbd_state ns, enum chg_state_flags flags)
1400 {
1401 enum drbd_fencing_p fp;
1402 enum drbd_req_event what = nothing;
1403 union drbd_state nsm = (union drbd_state){ .i = -1 };
1404
1405 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1406 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1407 if (mdev->p_uuid)
1408 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1409 }
1410
1411 fp = FP_DONT_CARE;
1412 if (get_ldev(mdev)) {
1413 fp = mdev->ldev->dc.fencing;
1414 put_ldev(mdev);
1415 }
1416
1417 /* Inform userspace about the change... */
1418 drbd_bcast_state(mdev, ns);
1419
1420 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1421 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1422 drbd_khelper(mdev, "pri-on-incon-degr");
1423
1424 /* Here we have the actions that are performed after a
1425 state change. This function might sleep */
1426
1427 if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
1428 mod_timer(&mdev->request_timer, jiffies + HZ);
1429
1430 nsm.i = -1;
1431 if (ns.susp_nod) {
1432 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1433 what = resend;
1434
1435 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1436 ns.disk > D_NEGOTIATING)
1437 what = restart_frozen_disk_io;
1438
1439 if (what != nothing)
1440 nsm.susp_nod = 0;
1441 }
1442
1443 if (ns.susp_fen) {
1444 /* case1: The outdate peer handler is successful: */
1445 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1446 tl_clear(mdev);
1447 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1448 drbd_uuid_new_current(mdev);
1449 clear_bit(NEW_CUR_UUID, &mdev->flags);
1450 }
1451 spin_lock_irq(&mdev->req_lock);
1452 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1453 spin_unlock_irq(&mdev->req_lock);
1454 }
1455 /* case2: The connection was established again: */
1456 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1457 clear_bit(NEW_CUR_UUID, &mdev->flags);
1458 what = resend;
1459 nsm.susp_fen = 0;
1460 }
1461 }
1462
1463 if (what != nothing) {
1464 spin_lock_irq(&mdev->req_lock);
1465 _tl_restart(mdev, what);
1466 nsm.i &= mdev->state.i;
1467 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1468 spin_unlock_irq(&mdev->req_lock);
1469 }
1470
1471 /* Became sync source. With protocol >= 96, we still need to send out
1472 * the sync uuid now. Need to do that before any drbd_send_state, or
1473 * the other side may go "paused sync" before receiving the sync uuids,
1474 * which is unexpected. */
1475 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1476 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1477 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1478 drbd_gen_and_send_sync_uuid(mdev);
1479 put_ldev(mdev);
1480 }
1481
1482 /* Do not change the order of the if above and the two below... */
1483 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1484 drbd_send_uuids(mdev);
1485 drbd_send_state(mdev, ns);
1486 }
1487 /* No point in queuing send_bitmap if we don't have a connection
1488 * anymore, so check also the _current_ state, not only the new state
1489 * at the time this work was queued. */
1490 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1491 mdev->state.conn == C_WF_BITMAP_S)
1492 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1493 "send_bitmap (WFBitMapS)",
1494 BM_LOCKED_TEST_ALLOWED);
1495
1496 /* Lost contact to peer's copy of the data */
1497 if ((os.pdsk >= D_INCONSISTENT &&
1498 os.pdsk != D_UNKNOWN &&
1499 os.pdsk != D_OUTDATED)
1500 && (ns.pdsk < D_INCONSISTENT ||
1501 ns.pdsk == D_UNKNOWN ||
1502 ns.pdsk == D_OUTDATED)) {
1503 if (get_ldev(mdev)) {
1504 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1505 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1506 if (is_susp(mdev->state)) {
1507 set_bit(NEW_CUR_UUID, &mdev->flags);
1508 } else {
1509 drbd_uuid_new_current(mdev);
1510 drbd_send_uuids(mdev);
1511 }
1512 }
1513 put_ldev(mdev);
1514 }
1515 }
1516
1517 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1518 if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
1519 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1520 drbd_uuid_new_current(mdev);
1521 drbd_send_uuids(mdev);
1522 }
1523 /* D_DISKLESS Peer becomes secondary */
1524 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1525 /* We may still be Primary ourselves.
1526 * No harm done if the bitmap still changes,
1527 * redirtied pages will follow later. */
1528 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1529 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1530 put_ldev(mdev);
1531 }
1532
1533 /* Write out all changed bits on demote.
1534 * Though, no need to da that just yet
1535 * if there is a resync going on still */
1536 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1537 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1538 /* No changes to the bitmap expected this time, so assert that,
1539 * even though no harm was done if it did change. */
1540 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1541 "demote", BM_LOCKED_TEST_ALLOWED);
1542 put_ldev(mdev);
1543 }
1544
1545 /* Last part of the attaching process ... */
1546 if (ns.conn >= C_CONNECTED &&
1547 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1548 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1549 drbd_send_uuids(mdev);
1550 drbd_send_state(mdev, ns);
1551 }
1552
1553 /* We want to pause/continue resync, tell peer. */
1554 if (ns.conn >= C_CONNECTED &&
1555 ((os.aftr_isp != ns.aftr_isp) ||
1556 (os.user_isp != ns.user_isp)))
1557 drbd_send_state(mdev, ns);
1558
1559 /* In case one of the isp bits got set, suspend other devices. */
1560 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1561 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1562 suspend_other_sg(mdev);
1563
1564 /* Make sure the peer gets informed about eventual state
1565 changes (ISP bits) while we were in WFReportParams. */
1566 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1567 drbd_send_state(mdev, ns);
1568
1569 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1570 drbd_send_state(mdev, ns);
1571
1572 /* We are in the progress to start a full sync... */
1573 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1574 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1575 /* no other bitmap changes expected during this phase */
1576 drbd_queue_bitmap_io(mdev,
1577 &drbd_bmio_set_n_write, &abw_start_sync,
1578 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1579
1580 /* We are invalidating our self... */
1581 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1582 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1583 /* other bitmap operation expected during this phase */
1584 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1585 "set_n_write from invalidate", BM_LOCKED_MASK);
1586
1587 /* first half of local IO error, failure to attach,
1588 * or administrative detach */
1589 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1590 enum drbd_io_error_p eh = EP_PASS_ON;
1591 int was_io_error = 0;
1592 /* corresponding get_ldev was in __drbd_set_state, to serialize
1593 * our cleanup here with the transition to D_DISKLESS.
1594 * But is is still not save to dreference ldev here, since
1595 * we might come from an failed Attach before ldev was set. */
1596 if (mdev->ldev) {
1597 eh = mdev->ldev->dc.on_io_error;
1598 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1599
1600 /* Immediately allow completion of all application IO, that waits
1601 for completion from the local disk. */
1602 tl_abort_disk_io(mdev);
1603
1604 /* current state still has to be D_FAILED,
1605 * there is only one way out: to D_DISKLESS,
1606 * and that may only happen after our put_ldev below. */
1607 if (mdev->state.disk != D_FAILED)
1608 dev_err(DEV,
1609 "ASSERT FAILED: disk is %s during detach\n",
1610 drbd_disk_str(mdev->state.disk));
1611
1612 if (ns.conn >= C_CONNECTED)
1613 drbd_send_state(mdev, ns);
1614
1615 drbd_rs_cancel_all(mdev);
1616
1617 /* In case we want to get something to stable storage still,
1618 * this may be the last chance.
1619 * Following put_ldev may transition to D_DISKLESS. */
1620 drbd_md_sync(mdev);
1621 }
1622 put_ldev(mdev);
1623
1624 if (was_io_error && eh == EP_CALL_HELPER)
1625 drbd_khelper(mdev, "local-io-error");
1626 }
1627
1628 /* second half of local IO error, failure to attach,
1629 * or administrative detach,
1630 * after local_cnt references have reached zero again */
1631 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1632 /* We must still be diskless,
1633 * re-attach has to be serialized with this! */
1634 if (mdev->state.disk != D_DISKLESS)
1635 dev_err(DEV,
1636 "ASSERT FAILED: disk is %s while going diskless\n",
1637 drbd_disk_str(mdev->state.disk));
1638
1639 mdev->rs_total = 0;
1640 mdev->rs_failed = 0;
1641 atomic_set(&mdev->rs_pending_cnt, 0);
1642
1643 if (ns.conn >= C_CONNECTED)
1644 drbd_send_state(mdev, ns);
1645
1646 /* corresponding get_ldev in __drbd_set_state
1647 * this may finally trigger drbd_ldev_destroy. */
1648 put_ldev(mdev);
1649 }
1650
1651 /* Notify peer that I had a local IO error, and did not detached.. */
1652 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
1653 drbd_send_state(mdev, ns);
1654
1655 /* Disks got bigger while they were detached */
1656 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1657 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1658 if (ns.conn == C_CONNECTED)
1659 resync_after_online_grow(mdev);
1660 }
1661
1662 /* A resync finished or aborted, wake paused devices... */
1663 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1664 (os.peer_isp && !ns.peer_isp) ||
1665 (os.user_isp && !ns.user_isp))
1666 resume_next_sg(mdev);
1667
1668 /* sync target done with resync. Explicitly notify peer, even though
1669 * it should (at least for non-empty resyncs) already know itself. */
1670 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1671 drbd_send_state(mdev, ns);
1672
1673 /* This triggers bitmap writeout of potentially still unwritten pages
1674 * if the resync finished cleanly, or aborted because of peer disk
1675 * failure, or because of connection loss.
1676 * For resync aborted because of local disk failure, we cannot do
1677 * any bitmap writeout anymore.
1678 * No harm done if some bits change during this phase.
1679 */
1680 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1681 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1682 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1683 put_ldev(mdev);
1684 }
1685
1686 /* free tl_hash if we Got thawed and are C_STANDALONE */
1687 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1688 drbd_free_tl_hash(mdev);
1689
1690 /* Upon network connection, we need to start the receiver */
1691 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1692 drbd_thread_start(&mdev->receiver);
1693
1694 /* Terminate worker thread if we are unconfigured - it will be
1695 restarted as needed... */
1696 if (ns.disk == D_DISKLESS &&
1697 ns.conn == C_STANDALONE &&
1698 ns.role == R_SECONDARY) {
1699 if (os.aftr_isp != ns.aftr_isp)
1700 resume_next_sg(mdev);
1701 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1702 if (test_bit(DEVICE_DYING, &mdev->flags))
1703 drbd_thread_stop_nowait(&mdev->worker);
1704 }
1705
1706 drbd_md_sync(mdev);
1707 }
1708
1709
1710 static int drbd_thread_setup(void *arg)
1711 {
1712 struct drbd_thread *thi = (struct drbd_thread *) arg;
1713 struct drbd_conf *mdev = thi->mdev;
1714 unsigned long flags;
1715 int retval;
1716
1717 restart:
1718 retval = thi->function(thi);
1719
1720 spin_lock_irqsave(&thi->t_lock, flags);
1721
1722 /* if the receiver has been "Exiting", the last thing it did
1723 * was set the conn state to "StandAlone",
1724 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1725 * and receiver thread will be "started".
1726 * drbd_thread_start needs to set "Restarting" in that case.
1727 * t_state check and assignment needs to be within the same spinlock,
1728 * so either thread_start sees Exiting, and can remap to Restarting,
1729 * or thread_start see None, and can proceed as normal.
1730 */
1731
1732 if (thi->t_state == Restarting) {
1733 dev_info(DEV, "Restarting %s\n", current->comm);
1734 thi->t_state = Running;
1735 spin_unlock_irqrestore(&thi->t_lock, flags);
1736 goto restart;
1737 }
1738
1739 thi->task = NULL;
1740 thi->t_state = None;
1741 smp_mb();
1742 complete(&thi->stop);
1743 spin_unlock_irqrestore(&thi->t_lock, flags);
1744
1745 dev_info(DEV, "Terminating %s\n", current->comm);
1746
1747 /* Release mod reference taken when thread was started */
1748 module_put(THIS_MODULE);
1749 return retval;
1750 }
1751
1752 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1753 int (*func) (struct drbd_thread *))
1754 {
1755 spin_lock_init(&thi->t_lock);
1756 thi->task = NULL;
1757 thi->t_state = None;
1758 thi->function = func;
1759 thi->mdev = mdev;
1760 }
1761
1762 int drbd_thread_start(struct drbd_thread *thi)
1763 {
1764 struct drbd_conf *mdev = thi->mdev;
1765 struct task_struct *nt;
1766 unsigned long flags;
1767
1768 const char *me =
1769 thi == &mdev->receiver ? "receiver" :
1770 thi == &mdev->asender ? "asender" :
1771 thi == &mdev->worker ? "worker" : "NONSENSE";
1772
1773 /* is used from state engine doing drbd_thread_stop_nowait,
1774 * while holding the req lock irqsave */
1775 spin_lock_irqsave(&thi->t_lock, flags);
1776
1777 switch (thi->t_state) {
1778 case None:
1779 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1780 me, current->comm, current->pid);
1781
1782 /* Get ref on module for thread - this is released when thread exits */
1783 if (!try_module_get(THIS_MODULE)) {
1784 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1785 spin_unlock_irqrestore(&thi->t_lock, flags);
1786 return false;
1787 }
1788
1789 init_completion(&thi->stop);
1790 D_ASSERT(thi->task == NULL);
1791 thi->reset_cpu_mask = 1;
1792 thi->t_state = Running;
1793 spin_unlock_irqrestore(&thi->t_lock, flags);
1794 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1795
1796 nt = kthread_create(drbd_thread_setup, (void *) thi,
1797 "drbd%d_%s", mdev_to_minor(mdev), me);
1798
1799 if (IS_ERR(nt)) {
1800 dev_err(DEV, "Couldn't start thread\n");
1801
1802 module_put(THIS_MODULE);
1803 return false;
1804 }
1805 spin_lock_irqsave(&thi->t_lock, flags);
1806 thi->task = nt;
1807 thi->t_state = Running;
1808 spin_unlock_irqrestore(&thi->t_lock, flags);
1809 wake_up_process(nt);
1810 break;
1811 case Exiting:
1812 thi->t_state = Restarting;
1813 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1814 me, current->comm, current->pid);
1815 /* fall through */
1816 case Running:
1817 case Restarting:
1818 default:
1819 spin_unlock_irqrestore(&thi->t_lock, flags);
1820 break;
1821 }
1822
1823 return true;
1824 }
1825
1826
1827 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1828 {
1829 unsigned long flags;
1830
1831 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1832
1833 /* may be called from state engine, holding the req lock irqsave */
1834 spin_lock_irqsave(&thi->t_lock, flags);
1835
1836 if (thi->t_state == None) {
1837 spin_unlock_irqrestore(&thi->t_lock, flags);
1838 if (restart)
1839 drbd_thread_start(thi);
1840 return;
1841 }
1842
1843 if (thi->t_state != ns) {
1844 if (thi->task == NULL) {
1845 spin_unlock_irqrestore(&thi->t_lock, flags);
1846 return;
1847 }
1848
1849 thi->t_state = ns;
1850 smp_mb();
1851 init_completion(&thi->stop);
1852 if (thi->task != current)
1853 force_sig(DRBD_SIGKILL, thi->task);
1854
1855 }
1856
1857 spin_unlock_irqrestore(&thi->t_lock, flags);
1858
1859 if (wait)
1860 wait_for_completion(&thi->stop);
1861 }
1862
1863 #ifdef CONFIG_SMP
1864 /**
1865 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1866 * @mdev: DRBD device.
1867 *
1868 * Forces all threads of a device onto the same CPU. This is beneficial for
1869 * DRBD's performance. May be overwritten by user's configuration.
1870 */
1871 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1872 {
1873 int ord, cpu;
1874
1875 /* user override. */
1876 if (cpumask_weight(mdev->cpu_mask))
1877 return;
1878
1879 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1880 for_each_online_cpu(cpu) {
1881 if (ord-- == 0) {
1882 cpumask_set_cpu(cpu, mdev->cpu_mask);
1883 return;
1884 }
1885 }
1886 /* should not be reached */
1887 cpumask_setall(mdev->cpu_mask);
1888 }
1889
1890 /**
1891 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1892 * @mdev: DRBD device.
1893 *
1894 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1895 * prematurely.
1896 */
1897 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1898 {
1899 struct task_struct *p = current;
1900 struct drbd_thread *thi =
1901 p == mdev->asender.task ? &mdev->asender :
1902 p == mdev->receiver.task ? &mdev->receiver :
1903 p == mdev->worker.task ? &mdev->worker :
1904 NULL;
1905 ERR_IF(thi == NULL)
1906 return;
1907 if (!thi->reset_cpu_mask)
1908 return;
1909 thi->reset_cpu_mask = 0;
1910 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1911 }
1912 #endif
1913
1914 /* the appropriate socket mutex must be held already */
1915 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1916 enum drbd_packets cmd, struct p_header80 *h,
1917 size_t size, unsigned msg_flags)
1918 {
1919 int sent, ok;
1920
1921 ERR_IF(!h) return false;
1922 ERR_IF(!size) return false;
1923
1924 h->magic = BE_DRBD_MAGIC;
1925 h->command = cpu_to_be16(cmd);
1926 h->length = cpu_to_be16(size-sizeof(struct p_header80));
1927
1928 sent = drbd_send(mdev, sock, h, size, msg_flags);
1929
1930 ok = (sent == size);
1931 if (!ok && !signal_pending(current))
1932 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
1933 cmdname(cmd), (int)size, sent);
1934 return ok;
1935 }
1936
1937 /* don't pass the socket. we may only look at it
1938 * when we hold the appropriate socket mutex.
1939 */
1940 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1941 enum drbd_packets cmd, struct p_header80 *h, size_t size)
1942 {
1943 int ok = 0;
1944 struct socket *sock;
1945
1946 if (use_data_socket) {
1947 mutex_lock(&mdev->data.mutex);
1948 sock = mdev->data.socket;
1949 } else {
1950 mutex_lock(&mdev->meta.mutex);
1951 sock = mdev->meta.socket;
1952 }
1953
1954 /* drbd_disconnect() could have called drbd_free_sock()
1955 * while we were waiting in down()... */
1956 if (likely(sock != NULL))
1957 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1958
1959 if (use_data_socket)
1960 mutex_unlock(&mdev->data.mutex);
1961 else
1962 mutex_unlock(&mdev->meta.mutex);
1963 return ok;
1964 }
1965
1966 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1967 size_t size)
1968 {
1969 struct p_header80 h;
1970 int ok;
1971
1972 h.magic = BE_DRBD_MAGIC;
1973 h.command = cpu_to_be16(cmd);
1974 h.length = cpu_to_be16(size);
1975
1976 if (!drbd_get_data_sock(mdev))
1977 return 0;
1978
1979 ok = (sizeof(h) ==
1980 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1981 ok = ok && (size ==
1982 drbd_send(mdev, mdev->data.socket, data, size, 0));
1983
1984 drbd_put_data_sock(mdev);
1985
1986 return ok;
1987 }
1988
1989 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1990 {
1991 struct p_rs_param_95 *p;
1992 struct socket *sock;
1993 int size, rv;
1994 const int apv = mdev->agreed_pro_version;
1995
1996 size = apv <= 87 ? sizeof(struct p_rs_param)
1997 : apv == 88 ? sizeof(struct p_rs_param)
1998 + strlen(mdev->sync_conf.verify_alg) + 1
1999 : apv <= 94 ? sizeof(struct p_rs_param_89)
2000 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2001
2002 /* used from admin command context and receiver/worker context.
2003 * to avoid kmalloc, grab the socket right here,
2004 * then use the pre-allocated sbuf there */
2005 mutex_lock(&mdev->data.mutex);
2006 sock = mdev->data.socket;
2007
2008 if (likely(sock != NULL)) {
2009 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
2010
2011 p = &mdev->data.sbuf.rs_param_95;
2012
2013 /* initialize verify_alg and csums_alg */
2014 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2015
2016 p->rate = cpu_to_be32(sc->rate);
2017 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
2018 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
2019 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
2020 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
2021
2022 if (apv >= 88)
2023 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
2024 if (apv >= 89)
2025 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
2026
2027 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
2028 } else
2029 rv = 0; /* not ok */
2030
2031 mutex_unlock(&mdev->data.mutex);
2032
2033 return rv;
2034 }
2035
2036 int drbd_send_protocol(struct drbd_conf *mdev)
2037 {
2038 struct p_protocol *p;
2039 int size, cf, rv;
2040
2041 size = sizeof(struct p_protocol);
2042
2043 if (mdev->agreed_pro_version >= 87)
2044 size += strlen(mdev->net_conf->integrity_alg) + 1;
2045
2046 /* we must not recurse into our own queue,
2047 * as that is blocked during handshake */
2048 p = kmalloc(size, GFP_NOIO);
2049 if (p == NULL)
2050 return 0;
2051
2052 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
2053 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
2054 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
2055 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
2056 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
2057
2058 cf = 0;
2059 if (mdev->net_conf->want_lose)
2060 cf |= CF_WANT_LOSE;
2061 if (mdev->net_conf->dry_run) {
2062 if (mdev->agreed_pro_version >= 92)
2063 cf |= CF_DRY_RUN;
2064 else {
2065 dev_err(DEV, "--dry-run is not supported by peer");
2066 kfree(p);
2067 return -1;
2068 }
2069 }
2070 p->conn_flags = cpu_to_be32(cf);
2071
2072 if (mdev->agreed_pro_version >= 87)
2073 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2074
2075 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
2076 (struct p_header80 *)p, size);
2077 kfree(p);
2078 return rv;
2079 }
2080
2081 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2082 {
2083 struct p_uuids p;
2084 int i;
2085
2086 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2087 return 1;
2088
2089 for (i = UI_CURRENT; i < UI_SIZE; i++)
2090 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2091
2092 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2093 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2094 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2095 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2096 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2097 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2098
2099 put_ldev(mdev);
2100
2101 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
2102 (struct p_header80 *)&p, sizeof(p));
2103 }
2104
2105 int drbd_send_uuids(struct drbd_conf *mdev)
2106 {
2107 return _drbd_send_uuids(mdev, 0);
2108 }
2109
2110 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2111 {
2112 return _drbd_send_uuids(mdev, 8);
2113 }
2114
2115 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2116 {
2117 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2118 u64 *uuid = mdev->ldev->md.uuid;
2119 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2120 text,
2121 (unsigned long long)uuid[UI_CURRENT],
2122 (unsigned long long)uuid[UI_BITMAP],
2123 (unsigned long long)uuid[UI_HISTORY_START],
2124 (unsigned long long)uuid[UI_HISTORY_END]);
2125 put_ldev(mdev);
2126 } else {
2127 dev_info(DEV, "%s effective data uuid: %016llX\n",
2128 text,
2129 (unsigned long long)mdev->ed_uuid);
2130 }
2131 }
2132
2133 int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2134 {
2135 struct p_rs_uuid p;
2136 u64 uuid;
2137
2138 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2139
2140 uuid = mdev->ldev->md.uuid[UI_BITMAP];
2141 if (uuid && uuid != UUID_JUST_CREATED)
2142 uuid = uuid + UUID_NEW_BM_OFFSET;
2143 else
2144 get_random_bytes(&uuid, sizeof(u64));
2145 drbd_uuid_set(mdev, UI_BITMAP, uuid);
2146 drbd_print_uuids(mdev, "updated sync UUID");
2147 drbd_md_sync(mdev);
2148 p.uuid = cpu_to_be64(uuid);
2149
2150 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
2151 (struct p_header80 *)&p, sizeof(p));
2152 }
2153
2154 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
2155 {
2156 struct p_sizes p;
2157 sector_t d_size, u_size;
2158 int q_order_type, max_bio_size;
2159 int ok;
2160
2161 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2162 D_ASSERT(mdev->ldev->backing_bdev);
2163 d_size = drbd_get_max_capacity(mdev->ldev);
2164 u_size = mdev->ldev->dc.disk_size;
2165 q_order_type = drbd_queue_order_type(mdev);
2166 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2167 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
2168 put_ldev(mdev);
2169 } else {
2170 d_size = 0;
2171 u_size = 0;
2172 q_order_type = QUEUE_ORDERED_NONE;
2173 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
2174 }
2175
2176 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2177 if (mdev->agreed_pro_version <= 94)
2178 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2179
2180 p.d_size = cpu_to_be64(d_size);
2181 p.u_size = cpu_to_be64(u_size);
2182 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2183 p.max_bio_size = cpu_to_be32(max_bio_size);
2184 p.queue_order_type = cpu_to_be16(q_order_type);
2185 p.dds_flags = cpu_to_be16(flags);
2186
2187 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2188 (struct p_header80 *)&p, sizeof(p));
2189 return ok;
2190 }
2191
2192 /**
2193 * drbd_send_current_state() - Sends the drbd state to the peer
2194 * @mdev: DRBD device.
2195 */
2196 int drbd_send_current_state(struct drbd_conf *mdev)
2197 {
2198 struct socket *sock;
2199 struct p_state p;
2200 int ok = 0;
2201
2202 /* Grab state lock so we wont send state if we're in the middle
2203 * of a cluster wide state change on another thread */
2204 drbd_state_lock(mdev);
2205
2206 mutex_lock(&mdev->data.mutex);
2207
2208 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2209 sock = mdev->data.socket;
2210
2211 if (likely(sock != NULL)) {
2212 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2213 (struct p_header80 *)&p, sizeof(p), 0);
2214 }
2215
2216 mutex_unlock(&mdev->data.mutex);
2217
2218 drbd_state_unlock(mdev);
2219 return ok;
2220 }
2221
2222 /**
2223 * drbd_send_state() - After a state change, sends the new state to the peer
2224 * @mdev: DRBD device.
2225 * @state: the state to send, not necessarily the current state.
2226 *
2227 * Each state change queues an "after_state_ch" work, which will eventually
2228 * send the resulting new state to the peer. If more state changes happen
2229 * between queuing and processing of the after_state_ch work, we still
2230 * want to send each intermediary state in the order it occurred.
2231 */
2232 int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
2233 {
2234 struct socket *sock;
2235 struct p_state p;
2236 int ok = 0;
2237
2238 mutex_lock(&mdev->data.mutex);
2239
2240 p.state = cpu_to_be32(state.i);
2241 sock = mdev->data.socket;
2242
2243 if (likely(sock != NULL)) {
2244 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2245 (struct p_header80 *)&p, sizeof(p), 0);
2246 }
2247
2248 mutex_unlock(&mdev->data.mutex);
2249
2250 return ok;
2251 }
2252
2253 int drbd_send_state_req(struct drbd_conf *mdev,
2254 union drbd_state mask, union drbd_state val)
2255 {
2256 struct p_req_state p;
2257
2258 p.mask = cpu_to_be32(mask.i);
2259 p.val = cpu_to_be32(val.i);
2260
2261 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2262 (struct p_header80 *)&p, sizeof(p));
2263 }
2264
2265 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2266 {
2267 struct p_req_state_reply p;
2268
2269 p.retcode = cpu_to_be32(retcode);
2270
2271 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2272 (struct p_header80 *)&p, sizeof(p));
2273 }
2274
2275 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2276 struct p_compressed_bm *p,
2277 struct bm_xfer_ctx *c)
2278 {
2279 struct bitstream bs;
2280 unsigned long plain_bits;
2281 unsigned long tmp;
2282 unsigned long rl;
2283 unsigned len;
2284 unsigned toggle;
2285 int bits;
2286
2287 /* may we use this feature? */
2288 if ((mdev->sync_conf.use_rle == 0) ||
2289 (mdev->agreed_pro_version < 90))
2290 return 0;
2291
2292 if (c->bit_offset >= c->bm_bits)
2293 return 0; /* nothing to do. */
2294
2295 /* use at most thus many bytes */
2296 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2297 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2298 /* plain bits covered in this code string */
2299 plain_bits = 0;
2300
2301 /* p->encoding & 0x80 stores whether the first run length is set.
2302 * bit offset is implicit.
2303 * start with toggle == 2 to be able to tell the first iteration */
2304 toggle = 2;
2305
2306 /* see how much plain bits we can stuff into one packet
2307 * using RLE and VLI. */
2308 do {
2309 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2310 : _drbd_bm_find_next(mdev, c->bit_offset);
2311 if (tmp == -1UL)
2312 tmp = c->bm_bits;
2313 rl = tmp - c->bit_offset;
2314
2315 if (toggle == 2) { /* first iteration */
2316 if (rl == 0) {
2317 /* the first checked bit was set,
2318 * store start value, */
2319 DCBP_set_start(p, 1);
2320 /* but skip encoding of zero run length */
2321 toggle = !toggle;
2322 continue;
2323 }
2324 DCBP_set_start(p, 0);
2325 }
2326
2327 /* paranoia: catch zero runlength.
2328 * can only happen if bitmap is modified while we scan it. */
2329 if (rl == 0) {
2330 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2331 "t:%u bo:%lu\n", toggle, c->bit_offset);
2332 return -1;
2333 }
2334
2335 bits = vli_encode_bits(&bs, rl);
2336 if (bits == -ENOBUFS) /* buffer full */
2337 break;
2338 if (bits <= 0) {
2339 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2340 return 0;
2341 }
2342
2343 toggle = !toggle;
2344 plain_bits += rl;
2345 c->bit_offset = tmp;
2346 } while (c->bit_offset < c->bm_bits);
2347
2348 len = bs.cur.b - p->code + !!bs.cur.bit;
2349
2350 if (plain_bits < (len << 3)) {
2351 /* incompressible with this method.
2352 * we need to rewind both word and bit position. */
2353 c->bit_offset -= plain_bits;
2354 bm_xfer_ctx_bit_to_word_offset(c);
2355 c->bit_offset = c->word_offset * BITS_PER_LONG;
2356 return 0;
2357 }
2358
2359 /* RLE + VLI was able to compress it just fine.
2360 * update c->word_offset. */
2361 bm_xfer_ctx_bit_to_word_offset(c);
2362
2363 /* store pad_bits */
2364 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2365
2366 return len;
2367 }
2368
2369 /**
2370 * send_bitmap_rle_or_plain
2371 *
2372 * Return 0 when done, 1 when another iteration is needed, and a negative error
2373 * code upon failure.
2374 */
2375 static int
2376 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2377 struct p_header80 *h, struct bm_xfer_ctx *c)
2378 {
2379 struct p_compressed_bm *p = (void*)h;
2380 unsigned long num_words;
2381 int len;
2382 int ok;
2383
2384 len = fill_bitmap_rle_bits(mdev, p, c);
2385
2386 if (len < 0)
2387 return -EIO;
2388
2389 if (len) {
2390 DCBP_set_code(p, RLE_VLI_Bits);
2391 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2392 sizeof(*p) + len, 0);
2393
2394 c->packets[0]++;
2395 c->bytes[0] += sizeof(*p) + len;
2396
2397 if (c->bit_offset >= c->bm_bits)
2398 len = 0; /* DONE */
2399 } else {
2400 /* was not compressible.
2401 * send a buffer full of plain text bits instead. */
2402 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2403 len = num_words * sizeof(long);
2404 if (len)
2405 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2406 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2407 h, sizeof(struct p_header80) + len, 0);
2408 c->word_offset += num_words;
2409 c->bit_offset = c->word_offset * BITS_PER_LONG;
2410
2411 c->packets[1]++;
2412 c->bytes[1] += sizeof(struct p_header80) + len;
2413
2414 if (c->bit_offset > c->bm_bits)
2415 c->bit_offset = c->bm_bits;
2416 }
2417 if (ok) {
2418 if (len == 0) {
2419 INFO_bm_xfer_stats(mdev, "send", c);
2420 return 0;
2421 } else
2422 return 1;
2423 }
2424 return -EIO;
2425 }
2426
2427 /* See the comment at receive_bitmap() */
2428 int _drbd_send_bitmap(struct drbd_conf *mdev)
2429 {
2430 struct bm_xfer_ctx c;
2431 struct p_header80 *p;
2432 int err;
2433
2434 ERR_IF(!mdev->bitmap) return false;
2435
2436 /* maybe we should use some per thread scratch page,
2437 * and allocate that during initial device creation? */
2438 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2439 if (!p) {
2440 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2441 return false;
2442 }
2443
2444 if (get_ldev(mdev)) {
2445 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2446 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2447 drbd_bm_set_all(mdev);
2448 if (drbd_bm_write(mdev)) {
2449 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2450 * but otherwise process as per normal - need to tell other
2451 * side that a full resync is required! */
2452 dev_err(DEV, "Failed to write bitmap to disk!\n");
2453 } else {
2454 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2455 drbd_md_sync(mdev);
2456 }
2457 }
2458 put_ldev(mdev);
2459 }
2460
2461 c = (struct bm_xfer_ctx) {
2462 .bm_bits = drbd_bm_bits(mdev),
2463 .bm_words = drbd_bm_words(mdev),
2464 };
2465
2466 do {
2467 err = send_bitmap_rle_or_plain(mdev, p, &c);
2468 } while (err > 0);
2469
2470 free_page((unsigned long) p);
2471 return err == 0;
2472 }
2473
2474 int drbd_send_bitmap(struct drbd_conf *mdev)
2475 {
2476 int err;
2477
2478 if (!drbd_get_data_sock(mdev))
2479 return -1;
2480 err = !_drbd_send_bitmap(mdev);
2481 drbd_put_data_sock(mdev);
2482 return err;
2483 }
2484
2485 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2486 {
2487 int ok;
2488 struct p_barrier_ack p;
2489
2490 p.barrier = barrier_nr;
2491 p.set_size = cpu_to_be32(set_size);
2492
2493 if (mdev->state.conn < C_CONNECTED)
2494 return false;
2495 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2496 (struct p_header80 *)&p, sizeof(p));
2497 return ok;
2498 }
2499
2500 /**
2501 * _drbd_send_ack() - Sends an ack packet
2502 * @mdev: DRBD device.
2503 * @cmd: Packet command code.
2504 * @sector: sector, needs to be in big endian byte order
2505 * @blksize: size in byte, needs to be in big endian byte order
2506 * @block_id: Id, big endian byte order
2507 */
2508 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2509 u64 sector,
2510 u32 blksize,
2511 u64 block_id)
2512 {
2513 int ok;
2514 struct p_block_ack p;
2515
2516 p.sector = sector;
2517 p.block_id = block_id;
2518 p.blksize = blksize;
2519 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2520
2521 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2522 return false;
2523 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2524 (struct p_header80 *)&p, sizeof(p));
2525 return ok;
2526 }
2527
2528 /* dp->sector and dp->block_id already/still in network byte order,
2529 * data_size is payload size according to dp->head,
2530 * and may need to be corrected for digest size. */
2531 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2532 struct p_data *dp, int data_size)
2533 {
2534 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2535 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2536 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2537 dp->block_id);
2538 }
2539
2540 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2541 struct p_block_req *rp)
2542 {
2543 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2544 }
2545
2546 /**
2547 * drbd_send_ack() - Sends an ack packet
2548 * @mdev: DRBD device.
2549 * @cmd: Packet command code.
2550 * @e: Epoch entry.
2551 */
2552 int drbd_send_ack(struct drbd_conf *mdev,
2553 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2554 {
2555 return _drbd_send_ack(mdev, cmd,
2556 cpu_to_be64(e->sector),
2557 cpu_to_be32(e->size),
2558 e->block_id);
2559 }
2560
2561 /* This function misuses the block_id field to signal if the blocks
2562 * are is sync or not. */
2563 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2564 sector_t sector, int blksize, u64 block_id)
2565 {
2566 return _drbd_send_ack(mdev, cmd,
2567 cpu_to_be64(sector),
2568 cpu_to_be32(blksize),
2569 cpu_to_be64(block_id));
2570 }
2571
2572 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2573 sector_t sector, int size, u64 block_id)
2574 {
2575 int ok;
2576 struct p_block_req p;
2577
2578 p.sector = cpu_to_be64(sector);
2579 p.block_id = block_id;
2580 p.blksize = cpu_to_be32(size);
2581
2582 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2583 (struct p_header80 *)&p, sizeof(p));
2584 return ok;
2585 }
2586
2587 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2588 sector_t sector, int size,
2589 void *digest, int digest_size,
2590 enum drbd_packets cmd)
2591 {
2592 int ok;
2593 struct p_block_req p;
2594
2595 p.sector = cpu_to_be64(sector);
2596 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2597 p.blksize = cpu_to_be32(size);
2598
2599 p.head.magic = BE_DRBD_MAGIC;
2600 p.head.command = cpu_to_be16(cmd);
2601 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2602
2603 mutex_lock(&mdev->data.mutex);
2604
2605 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2606 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2607
2608 mutex_unlock(&mdev->data.mutex);
2609
2610 return ok;
2611 }
2612
2613 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2614 {
2615 int ok;
2616 struct p_block_req p;
2617
2618 p.sector = cpu_to_be64(sector);
2619 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2620 p.blksize = cpu_to_be32(size);
2621
2622 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2623 (struct p_header80 *)&p, sizeof(p));
2624 return ok;
2625 }
2626
2627 /* called on sndtimeo
2628 * returns false if we should retry,
2629 * true if we think connection is dead
2630 */
2631 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2632 {
2633 int drop_it;
2634 /* long elapsed = (long)(jiffies - mdev->last_received); */
2635
2636 drop_it = mdev->meta.socket == sock
2637 || !mdev->asender.task
2638 || get_t_state(&mdev->asender) != Running
2639 || mdev->state.conn < C_CONNECTED;
2640
2641 if (drop_it)
2642 return true;
2643
2644 drop_it = !--mdev->ko_count;
2645 if (!drop_it) {
2646 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2647 current->comm, current->pid, mdev->ko_count);
2648 request_ping(mdev);
2649 }
2650
2651 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2652 }
2653
2654 /* The idea of sendpage seems to be to put some kind of reference
2655 * to the page into the skb, and to hand it over to the NIC. In
2656 * this process get_page() gets called.
2657 *
2658 * As soon as the page was really sent over the network put_page()
2659 * gets called by some part of the network layer. [ NIC driver? ]
2660 *
2661 * [ get_page() / put_page() increment/decrement the count. If count
2662 * reaches 0 the page will be freed. ]
2663 *
2664 * This works nicely with pages from FSs.
2665 * But this means that in protocol A we might signal IO completion too early!
2666 *
2667 * In order not to corrupt data during a resync we must make sure
2668 * that we do not reuse our own buffer pages (EEs) to early, therefore
2669 * we have the net_ee list.
2670 *
2671 * XFS seems to have problems, still, it submits pages with page_count == 0!
2672 * As a workaround, we disable sendpage on pages
2673 * with page_count == 0 or PageSlab.
2674 */
2675 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2676 int offset, size_t size, unsigned msg_flags)
2677 {
2678 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2679 kunmap(page);
2680 if (sent == size)
2681 mdev->send_cnt += size>>9;
2682 return sent == size;
2683 }
2684
2685 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2686 int offset, size_t size, unsigned msg_flags)
2687 {
2688 mm_segment_t oldfs = get_fs();
2689 int sent, ok;
2690 int len = size;
2691
2692 /* e.g. XFS meta- & log-data is in slab pages, which have a
2693 * page_count of 0 and/or have PageSlab() set.
2694 * we cannot use send_page for those, as that does get_page();
2695 * put_page(); and would cause either a VM_BUG directly, or
2696 * __page_cache_release a page that would actually still be referenced
2697 * by someone, leading to some obscure delayed Oops somewhere else. */
2698 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2699 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2700
2701 msg_flags |= MSG_NOSIGNAL;
2702 drbd_update_congested(mdev);
2703 set_fs(KERNEL_DS);
2704 do {
2705 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2706 offset, len,
2707 msg_flags);
2708 if (sent == -EAGAIN) {
2709 if (we_should_drop_the_connection(mdev,
2710 mdev->data.socket))
2711 break;
2712 else
2713 continue;
2714 }
2715 if (sent <= 0) {
2716 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2717 __func__, (int)size, len, sent);
2718 break;
2719 }
2720 len -= sent;
2721 offset += sent;
2722 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2723 set_fs(oldfs);
2724 clear_bit(NET_CONGESTED, &mdev->flags);
2725
2726 ok = (len == 0);
2727 if (likely(ok))
2728 mdev->send_cnt += size>>9;
2729 return ok;
2730 }
2731
2732 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2733 {
2734 struct bio_vec *bvec;
2735 int i;
2736 /* hint all but last page with MSG_MORE */
2737 __bio_for_each_segment(bvec, bio, i, 0) {
2738 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2739 bvec->bv_offset, bvec->bv_len,
2740 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2741 return 0;
2742 }
2743 return 1;
2744 }
2745
2746 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2747 {
2748 struct bio_vec *bvec;
2749 int i;
2750 /* hint all but last page with MSG_MORE */
2751 __bio_for_each_segment(bvec, bio, i, 0) {
2752 if (!_drbd_send_page(mdev, bvec->bv_page,
2753 bvec->bv_offset, bvec->bv_len,
2754 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2755 return 0;
2756 }
2757 return 1;
2758 }
2759
2760 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2761 {
2762 struct page *page = e->pages;
2763 unsigned len = e->size;
2764 /* hint all but last page with MSG_MORE */
2765 page_chain_for_each(page) {
2766 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2767 if (!_drbd_send_page(mdev, page, 0, l,
2768 page_chain_next(page) ? MSG_MORE : 0))
2769 return 0;
2770 len -= l;
2771 }
2772 return 1;
2773 }
2774
2775 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2776 {
2777 if (mdev->agreed_pro_version >= 95)
2778 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2779 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2780 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2781 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2782 else
2783 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2784 }
2785
2786 /* Used to send write requests
2787 * R_PRIMARY -> Peer (P_DATA)
2788 */
2789 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2790 {
2791 int ok = 1;
2792 struct p_data p;
2793 unsigned int dp_flags = 0;
2794 void *dgb;
2795 int dgs;
2796
2797 if (!drbd_get_data_sock(mdev))
2798 return 0;
2799
2800 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2801 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2802
2803 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2804 p.head.h80.magic = BE_DRBD_MAGIC;
2805 p.head.h80.command = cpu_to_be16(P_DATA);
2806 p.head.h80.length =
2807 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2808 } else {
2809 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2810 p.head.h95.command = cpu_to_be16(P_DATA);
2811 p.head.h95.length =
2812 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2813 }
2814
2815 p.sector = cpu_to_be64(req->sector);
2816 p.block_id = (unsigned long)req;
2817 p.seq_num = cpu_to_be32(req->seq_num =
2818 atomic_add_return(1, &mdev->packet_seq));
2819
2820 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2821
2822 if (mdev->state.conn >= C_SYNC_SOURCE &&
2823 mdev->state.conn <= C_PAUSED_SYNC_T)
2824 dp_flags |= DP_MAY_SET_IN_SYNC;
2825
2826 p.dp_flags = cpu_to_be32(dp_flags);
2827 set_bit(UNPLUG_REMOTE, &mdev->flags);
2828 ok = (sizeof(p) ==
2829 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2830 if (ok && dgs) {
2831 dgb = mdev->int_dig_out;
2832 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2833 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2834 }
2835 if (ok) {
2836 /* For protocol A, we have to memcpy the payload into
2837 * socket buffers, as we may complete right away
2838 * as soon as we handed it over to tcp, at which point the data
2839 * pages may become invalid.
2840 *
2841 * For data-integrity enabled, we copy it as well, so we can be
2842 * sure that even if the bio pages may still be modified, it
2843 * won't change the data on the wire, thus if the digest checks
2844 * out ok after sending on this side, but does not fit on the
2845 * receiving side, we sure have detected corruption elsewhere.
2846 */
2847 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2848 ok = _drbd_send_bio(mdev, req->master_bio);
2849 else
2850 ok = _drbd_send_zc_bio(mdev, req->master_bio);
2851
2852 /* double check digest, sometimes buffers have been modified in flight. */
2853 if (dgs > 0 && dgs <= 64) {
2854 /* 64 byte, 512 bit, is the largest digest size
2855 * currently supported in kernel crypto. */
2856 unsigned char digest[64];
2857 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2858 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2859 dev_warn(DEV,
2860 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2861 (unsigned long long)req->sector, req->size);
2862 }
2863 } /* else if (dgs > 64) {
2864 ... Be noisy about digest too large ...
2865 } */
2866 }
2867
2868 drbd_put_data_sock(mdev);
2869
2870 return ok;
2871 }
2872
2873 /* answer packet, used to send data back for read requests:
2874 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2875 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2876 */
2877 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2878 struct drbd_epoch_entry *e)
2879 {
2880 int ok;
2881 struct p_data p;
2882 void *dgb;
2883 int dgs;
2884
2885 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2886 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2887
2888 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2889 p.head.h80.magic = BE_DRBD_MAGIC;
2890 p.head.h80.command = cpu_to_be16(cmd);
2891 p.head.h80.length =
2892 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2893 } else {
2894 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2895 p.head.h95.command = cpu_to_be16(cmd);
2896 p.head.h95.length =
2897 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2898 }
2899
2900 p.sector = cpu_to_be64(e->sector);
2901 p.block_id = e->block_id;
2902 /* p.seq_num = 0; No sequence numbers here.. */
2903
2904 /* Only called by our kernel thread.
2905 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2906 * in response to admin command or module unload.
2907 */
2908 if (!drbd_get_data_sock(mdev))
2909 return 0;
2910
2911 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2912 if (ok && dgs) {
2913 dgb = mdev->int_dig_out;
2914 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2915 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2916 }
2917 if (ok)
2918 ok = _drbd_send_zc_ee(mdev, e);
2919
2920 drbd_put_data_sock(mdev);
2921
2922 return ok;
2923 }
2924
2925 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2926 {
2927 struct p_block_desc p;
2928
2929 p.sector = cpu_to_be64(req->sector);
2930 p.blksize = cpu_to_be32(req->size);
2931
2932 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2933 }
2934
2935 /*
2936 drbd_send distinguishes two cases:
2937
2938 Packets sent via the data socket "sock"
2939 and packets sent via the meta data socket "msock"
2940
2941 sock msock
2942 -----------------+-------------------------+------------------------------
2943 timeout conf.timeout / 2 conf.timeout / 2
2944 timeout action send a ping via msock Abort communication
2945 and close all sockets
2946 */
2947
2948 /*
2949 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2950 */
2951 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2952 void *buf, size_t size, unsigned msg_flags)
2953 {
2954 struct kvec iov;
2955 struct msghdr msg;
2956 int rv, sent = 0;
2957
2958 if (!sock)
2959 return -1000;
2960
2961 /* THINK if (signal_pending) return ... ? */
2962
2963 iov.iov_base = buf;
2964 iov.iov_len = size;
2965
2966 msg.msg_name = NULL;
2967 msg.msg_namelen = 0;
2968 msg.msg_control = NULL;
2969 msg.msg_controllen = 0;
2970 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2971
2972 if (sock == mdev->data.socket) {
2973 mdev->ko_count = mdev->net_conf->ko_count;
2974 drbd_update_congested(mdev);
2975 }
2976 do {
2977 /* STRANGE
2978 * tcp_sendmsg does _not_ use its size parameter at all ?
2979 *
2980 * -EAGAIN on timeout, -EINTR on signal.
2981 */
2982 /* THINK
2983 * do we need to block DRBD_SIG if sock == &meta.socket ??
2984 * otherwise wake_asender() might interrupt some send_*Ack !
2985 */
2986 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2987 if (rv == -EAGAIN) {
2988 if (we_should_drop_the_connection(mdev, sock))
2989 break;
2990 else
2991 continue;
2992 }
2993 D_ASSERT(rv != 0);
2994 if (rv == -EINTR) {
2995 flush_signals(current);
2996 rv = 0;
2997 }
2998 if (rv < 0)
2999 break;
3000 sent += rv;
3001 iov.iov_base += rv;
3002 iov.iov_len -= rv;
3003 } while (sent < size);
3004
3005 if (sock == mdev->data.socket)
3006 clear_bit(NET_CONGESTED, &mdev->flags);
3007
3008 if (rv <= 0) {
3009 if (rv != -EAGAIN) {
3010 dev_err(DEV, "%s_sendmsg returned %d\n",
3011 sock == mdev->meta.socket ? "msock" : "sock",
3012 rv);
3013 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
3014 } else
3015 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
3016 }
3017
3018 return sent;
3019 }
3020
3021 static int drbd_open(struct block_device *bdev, fmode_t mode)
3022 {
3023 struct drbd_conf *mdev = bdev->bd_disk->private_data;
3024 unsigned long flags;
3025 int rv = 0;
3026
3027 mutex_lock(&drbd_main_mutex);
3028 spin_lock_irqsave(&mdev->req_lock, flags);
3029 /* to have a stable mdev->state.role
3030 * and no race with updating open_cnt */
3031
3032 if (mdev->state.role != R_PRIMARY) {
3033 if (mode & FMODE_WRITE)
3034 rv = -EROFS;
3035 else if (!allow_oos)
3036 rv = -EMEDIUMTYPE;
3037 }
3038
3039 if (!rv)
3040 mdev->open_cnt++;
3041 spin_unlock_irqrestore(&mdev->req_lock, flags);
3042 mutex_unlock(&drbd_main_mutex);
3043
3044 return rv;
3045 }
3046
3047 static int drbd_release(struct gendisk *gd, fmode_t mode)
3048 {
3049 struct drbd_conf *mdev = gd->private_data;
3050 mutex_lock(&drbd_main_mutex);
3051 mdev->open_cnt--;
3052 mutex_unlock(&drbd_main_mutex);
3053 return 0;
3054 }
3055
3056 static void drbd_set_defaults(struct drbd_conf *mdev)
3057 {
3058 /* This way we get a compile error when sync_conf grows,
3059 and we forgot to initialize it here */
3060 mdev->sync_conf = (struct syncer_conf) {
3061 /* .rate = */ DRBD_RATE_DEF,
3062 /* .after = */ DRBD_AFTER_DEF,
3063 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
3064 /* .verify_alg = */ {}, 0,
3065 /* .cpu_mask = */ {}, 0,
3066 /* .csums_alg = */ {}, 0,
3067 /* .use_rle = */ 0,
3068 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
3069 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
3070 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
3071 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
3072 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
3073 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
3074 };
3075
3076 /* Have to use that way, because the layout differs between
3077 big endian and little endian */
3078 mdev->state = (union drbd_state) {
3079 { .role = R_SECONDARY,
3080 .peer = R_UNKNOWN,
3081 .conn = C_STANDALONE,
3082 .disk = D_DISKLESS,
3083 .pdsk = D_UNKNOWN,
3084 .susp = 0,
3085 .susp_nod = 0,
3086 .susp_fen = 0
3087 } };
3088 }
3089
3090 void drbd_init_set_defaults(struct drbd_conf *mdev)
3091 {
3092 /* the memset(,0,) did most of this.
3093 * note: only assignments, no allocation in here */
3094
3095 drbd_set_defaults(mdev);
3096
3097 atomic_set(&mdev->ap_bio_cnt, 0);
3098 atomic_set(&mdev->ap_pending_cnt, 0);
3099 atomic_set(&mdev->rs_pending_cnt, 0);
3100 atomic_set(&mdev->unacked_cnt, 0);
3101 atomic_set(&mdev->local_cnt, 0);
3102 atomic_set(&mdev->net_cnt, 0);
3103 atomic_set(&mdev->packet_seq, 0);
3104 atomic_set(&mdev->pp_in_use, 0);
3105 atomic_set(&mdev->pp_in_use_by_net, 0);
3106 atomic_set(&mdev->rs_sect_in, 0);
3107 atomic_set(&mdev->rs_sect_ev, 0);
3108 atomic_set(&mdev->ap_in_flight, 0);
3109 atomic_set(&mdev->md_io_in_use, 0);
3110
3111 mutex_init(&mdev->data.mutex);
3112 mutex_init(&mdev->meta.mutex);
3113 sema_init(&mdev->data.work.s, 0);
3114 sema_init(&mdev->meta.work.s, 0);
3115 mutex_init(&mdev->state_mutex);
3116
3117 spin_lock_init(&mdev->data.work.q_lock);
3118 spin_lock_init(&mdev->meta.work.q_lock);
3119
3120 spin_lock_init(&mdev->al_lock);
3121 spin_lock_init(&mdev->req_lock);
3122 spin_lock_init(&mdev->peer_seq_lock);
3123 spin_lock_init(&mdev->epoch_lock);
3124
3125 INIT_LIST_HEAD(&mdev->active_ee);
3126 INIT_LIST_HEAD(&mdev->sync_ee);
3127 INIT_LIST_HEAD(&mdev->done_ee);
3128 INIT_LIST_HEAD(&mdev->read_ee);
3129 INIT_LIST_HEAD(&mdev->net_ee);
3130 INIT_LIST_HEAD(&mdev->resync_reads);
3131 INIT_LIST_HEAD(&mdev->data.work.q);
3132 INIT_LIST_HEAD(&mdev->meta.work.q);
3133 INIT_LIST_HEAD(&mdev->resync_work.list);
3134 INIT_LIST_HEAD(&mdev->unplug_work.list);
3135 INIT_LIST_HEAD(&mdev->go_diskless.list);
3136 INIT_LIST_HEAD(&mdev->md_sync_work.list);
3137 INIT_LIST_HEAD(&mdev->start_resync_work.list);
3138 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
3139
3140 mdev->resync_work.cb = w_resync_timer;
3141 mdev->unplug_work.cb = w_send_write_hint;
3142 mdev->go_diskless.cb = w_go_diskless;
3143 mdev->md_sync_work.cb = w_md_sync;
3144 mdev->bm_io_work.w.cb = w_bitmap_io;
3145 mdev->start_resync_work.cb = w_start_resync;
3146 init_timer(&mdev->resync_timer);
3147 init_timer(&mdev->md_sync_timer);
3148 init_timer(&mdev->start_resync_timer);
3149 init_timer(&mdev->request_timer);
3150 mdev->resync_timer.function = resync_timer_fn;
3151 mdev->resync_timer.data = (unsigned long) mdev;
3152 mdev->md_sync_timer.function = md_sync_timer_fn;
3153 mdev->md_sync_timer.data = (unsigned long) mdev;
3154 mdev->start_resync_timer.function = start_resync_timer_fn;
3155 mdev->start_resync_timer.data = (unsigned long) mdev;
3156 mdev->request_timer.function = request_timer_fn;
3157 mdev->request_timer.data = (unsigned long) mdev;
3158
3159 init_waitqueue_head(&mdev->misc_wait);
3160 init_waitqueue_head(&mdev->state_wait);
3161 init_waitqueue_head(&mdev->net_cnt_wait);
3162 init_waitqueue_head(&mdev->ee_wait);
3163 init_waitqueue_head(&mdev->al_wait);
3164 init_waitqueue_head(&mdev->seq_wait);
3165
3166 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3167 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3168 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3169
3170 mdev->agreed_pro_version = PRO_VERSION_MAX;
3171 mdev->write_ordering = WO_bdev_flush;
3172 mdev->resync_wenr = LC_FREE;
3173 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3174 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3175 }
3176
3177 void drbd_mdev_cleanup(struct drbd_conf *mdev)
3178 {
3179 int i;
3180 if (mdev->receiver.t_state != None)
3181 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3182 mdev->receiver.t_state);
3183
3184 /* no need to lock it, I'm the only thread alive */
3185 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3186 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3187 mdev->al_writ_cnt =
3188 mdev->bm_writ_cnt =
3189 mdev->read_cnt =
3190 mdev->recv_cnt =
3191 mdev->send_cnt =
3192 mdev->writ_cnt =
3193 mdev->p_size =
3194 mdev->rs_start =
3195 mdev->rs_total =
3196 mdev->rs_failed = 0;
3197 mdev->rs_last_events = 0;
3198 mdev->rs_last_sect_ev = 0;
3199 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3200 mdev->rs_mark_left[i] = 0;
3201 mdev->rs_mark_time[i] = 0;
3202 }
3203 D_ASSERT(mdev->net_conf == NULL);
3204
3205 drbd_set_my_capacity(mdev, 0);
3206 if (mdev->bitmap) {
3207 /* maybe never allocated. */
3208 drbd_bm_resize(mdev, 0, 1);
3209 drbd_bm_cleanup(mdev);
3210 }
3211
3212 drbd_free_resources(mdev);
3213 clear_bit(AL_SUSPENDED, &mdev->flags);
3214
3215 /*
3216 * currently we drbd_init_ee only on module load, so
3217 * we may do drbd_release_ee only on module unload!
3218 */
3219 D_ASSERT(list_empty(&mdev->active_ee));
3220 D_ASSERT(list_empty(&mdev->sync_ee));
3221 D_ASSERT(list_empty(&mdev->done_ee));
3222 D_ASSERT(list_empty(&mdev->read_ee));
3223 D_ASSERT(list_empty(&mdev->net_ee));
3224 D_ASSERT(list_empty(&mdev->resync_reads));
3225 D_ASSERT(list_empty(&mdev->data.work.q));
3226 D_ASSERT(list_empty(&mdev->meta.work.q));
3227 D_ASSERT(list_empty(&mdev->resync_work.list));
3228 D_ASSERT(list_empty(&mdev->unplug_work.list));
3229 D_ASSERT(list_empty(&mdev->go_diskless.list));
3230
3231 drbd_set_defaults(mdev);
3232 }
3233
3234
3235 static void drbd_destroy_mempools(void)
3236 {
3237 struct page *page;
3238
3239 while (drbd_pp_pool) {
3240 page = drbd_pp_pool;
3241 drbd_pp_pool = (struct page *)page_private(page);
3242 __free_page(page);
3243 drbd_pp_vacant--;
3244 }
3245
3246 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3247
3248 if (drbd_ee_mempool)
3249 mempool_destroy(drbd_ee_mempool);
3250 if (drbd_request_mempool)
3251 mempool_destroy(drbd_request_mempool);
3252 if (drbd_ee_cache)
3253 kmem_cache_destroy(drbd_ee_cache);
3254 if (drbd_request_cache)
3255 kmem_cache_destroy(drbd_request_cache);
3256 if (drbd_bm_ext_cache)
3257 kmem_cache_destroy(drbd_bm_ext_cache);
3258 if (drbd_al_ext_cache)
3259 kmem_cache_destroy(drbd_al_ext_cache);
3260
3261 drbd_ee_mempool = NULL;
3262 drbd_request_mempool = NULL;
3263 drbd_ee_cache = NULL;
3264 drbd_request_cache = NULL;
3265 drbd_bm_ext_cache = NULL;
3266 drbd_al_ext_cache = NULL;
3267
3268 return;
3269 }
3270
3271 static int drbd_create_mempools(void)
3272 {
3273 struct page *page;
3274 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3275 int i;
3276
3277 /* prepare our caches and mempools */
3278 drbd_request_mempool = NULL;
3279 drbd_ee_cache = NULL;
3280 drbd_request_cache = NULL;
3281 drbd_bm_ext_cache = NULL;
3282 drbd_al_ext_cache = NULL;
3283 drbd_pp_pool = NULL;
3284
3285 /* caches */
3286 drbd_request_cache = kmem_cache_create(
3287 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3288 if (drbd_request_cache == NULL)
3289 goto Enomem;
3290
3291 drbd_ee_cache = kmem_cache_create(
3292 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3293 if (drbd_ee_cache == NULL)
3294 goto Enomem;
3295
3296 drbd_bm_ext_cache = kmem_cache_create(
3297 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3298 if (drbd_bm_ext_cache == NULL)
3299 goto Enomem;
3300
3301 drbd_al_ext_cache = kmem_cache_create(
3302 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3303 if (drbd_al_ext_cache == NULL)
3304 goto Enomem;
3305
3306 /* mempools */
3307 drbd_request_mempool = mempool_create(number,
3308 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3309 if (drbd_request_mempool == NULL)
3310 goto Enomem;
3311
3312 drbd_ee_mempool = mempool_create(number,
3313 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3314 if (drbd_ee_mempool == NULL)
3315 goto Enomem;
3316
3317 /* drbd's page pool */
3318 spin_lock_init(&drbd_pp_lock);
3319
3320 for (i = 0; i < number; i++) {
3321 page = alloc_page(GFP_HIGHUSER);
3322 if (!page)
3323 goto Enomem;
3324 set_page_private(page, (unsigned long)drbd_pp_pool);
3325 drbd_pp_pool = page;
3326 }
3327 drbd_pp_vacant = number;
3328
3329 return 0;
3330
3331 Enomem:
3332 drbd_destroy_mempools(); /* in case we allocated some */
3333 return -ENOMEM;
3334 }
3335
3336 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3337 void *unused)
3338 {
3339 /* just so we have it. you never know what interesting things we
3340 * might want to do here some day...
3341 */
3342
3343 return NOTIFY_DONE;
3344 }
3345
3346 static struct notifier_block drbd_notifier = {
3347 .notifier_call = drbd_notify_sys,
3348 };
3349
3350 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3351 {
3352 int rr;
3353
3354 rr = drbd_release_ee(mdev, &mdev->active_ee);
3355 if (rr)
3356 dev_err(DEV, "%d EEs in active list found!\n", rr);
3357
3358 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3359 if (rr)
3360 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3361
3362 rr = drbd_release_ee(mdev, &mdev->read_ee);
3363 if (rr)
3364 dev_err(DEV, "%d EEs in read list found!\n", rr);
3365
3366 rr = drbd_release_ee(mdev, &mdev->done_ee);
3367 if (rr)
3368 dev_err(DEV, "%d EEs in done list found!\n", rr);
3369
3370 rr = drbd_release_ee(mdev, &mdev->net_ee);
3371 if (rr)
3372 dev_err(DEV, "%d EEs in net list found!\n", rr);
3373 }
3374
3375 /* caution. no locking.
3376 * currently only used from module cleanup code. */
3377 static void drbd_delete_device(unsigned int minor)
3378 {
3379 struct drbd_conf *mdev = minor_to_mdev(minor);
3380
3381 if (!mdev)
3382 return;
3383
3384 del_timer_sync(&mdev->request_timer);
3385
3386 /* paranoia asserts */
3387 if (mdev->open_cnt != 0)
3388 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3389 __FILE__ , __LINE__);
3390
3391 ERR_IF (!list_empty(&mdev->data.work.q)) {
3392 struct list_head *lp;
3393 list_for_each(lp, &mdev->data.work.q) {
3394 dev_err(DEV, "lp = %p\n", lp);
3395 }
3396 };
3397 /* end paranoia asserts */
3398
3399 del_gendisk(mdev->vdisk);
3400
3401 /* cleanup stuff that may have been allocated during
3402 * device (re-)configuration or state changes */
3403
3404 if (mdev->this_bdev)
3405 bdput(mdev->this_bdev);
3406
3407 drbd_free_resources(mdev);
3408
3409 drbd_release_ee_lists(mdev);
3410
3411 /* should be freed on disconnect? */
3412 kfree(mdev->ee_hash);
3413 /*
3414 mdev->ee_hash_s = 0;
3415 mdev->ee_hash = NULL;
3416 */
3417
3418 lc_destroy(mdev->act_log);
3419 lc_destroy(mdev->resync);
3420
3421 kfree(mdev->p_uuid);
3422 /* mdev->p_uuid = NULL; */
3423
3424 kfree(mdev->int_dig_out);
3425 kfree(mdev->int_dig_in);
3426 kfree(mdev->int_dig_vv);
3427
3428 /* cleanup the rest that has been
3429 * allocated from drbd_new_device
3430 * and actually free the mdev itself */
3431 drbd_free_mdev(mdev);
3432 }
3433
3434 static void drbd_cleanup(void)
3435 {
3436 unsigned int i;
3437
3438 unregister_reboot_notifier(&drbd_notifier);
3439
3440 /* first remove proc,
3441 * drbdsetup uses it's presence to detect
3442 * whether DRBD is loaded.
3443 * If we would get stuck in proc removal,
3444 * but have netlink already deregistered,
3445 * some drbdsetup commands may wait forever
3446 * for an answer.
3447 */
3448 if (drbd_proc)
3449 remove_proc_entry("drbd", NULL);
3450
3451 drbd_nl_cleanup();
3452
3453 if (minor_table) {
3454 i = minor_count;
3455 while (i--)
3456 drbd_delete_device(i);
3457 drbd_destroy_mempools();
3458 }
3459
3460 kfree(minor_table);
3461
3462 unregister_blkdev(DRBD_MAJOR, "drbd");
3463
3464 printk(KERN_INFO "drbd: module cleanup done.\n");
3465 }
3466
3467 /**
3468 * drbd_congested() - Callback for pdflush
3469 * @congested_data: User data
3470 * @bdi_bits: Bits pdflush is currently interested in
3471 *
3472 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3473 */
3474 static int drbd_congested(void *congested_data, int bdi_bits)
3475 {
3476 struct drbd_conf *mdev = congested_data;
3477 struct request_queue *q;
3478 char reason = '-';
3479 int r = 0;
3480
3481 if (!may_inc_ap_bio(mdev)) {
3482 /* DRBD has frozen IO */
3483 r = bdi_bits;
3484 reason = 'd';
3485 goto out;
3486 }
3487
3488 if (get_ldev(mdev)) {
3489 q = bdev_get_queue(mdev->ldev->backing_bdev);
3490 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3491 put_ldev(mdev);
3492 if (r)
3493 reason = 'b';
3494 }
3495
3496 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3497 r |= (1 << BDI_async_congested);
3498 reason = reason == 'b' ? 'a' : 'n';
3499 }
3500
3501 out:
3502 mdev->congestion_reason = reason;
3503 return r;
3504 }
3505
3506 struct drbd_conf *drbd_new_device(unsigned int minor)
3507 {
3508 struct drbd_conf *mdev;
3509 struct gendisk *disk;
3510 struct request_queue *q;
3511
3512 /* GFP_KERNEL, we are outside of all write-out paths */
3513 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3514 if (!mdev)
3515 return NULL;
3516 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3517 goto out_no_cpumask;
3518
3519 mdev->minor = minor;
3520
3521 drbd_init_set_defaults(mdev);
3522
3523 q = blk_alloc_queue(GFP_KERNEL);
3524 if (!q)
3525 goto out_no_q;
3526 mdev->rq_queue = q;
3527 q->queuedata = mdev;
3528
3529 disk = alloc_disk(1);
3530 if (!disk)
3531 goto out_no_disk;
3532 mdev->vdisk = disk;
3533
3534 set_disk_ro(disk, true);
3535
3536 disk->queue = q;
3537 disk->major = DRBD_MAJOR;
3538 disk->first_minor = minor;
3539 disk->fops = &drbd_ops;
3540 sprintf(disk->disk_name, "drbd%d", minor);
3541 disk->private_data = mdev;
3542
3543 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3544 /* we have no partitions. we contain only ourselves. */
3545 mdev->this_bdev->bd_contains = mdev->this_bdev;
3546
3547 q->backing_dev_info.congested_fn = drbd_congested;
3548 q->backing_dev_info.congested_data = mdev;
3549
3550 blk_queue_make_request(q, drbd_make_request);
3551 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3552 This triggers a max_bio_size message upon first attach or connect */
3553 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3554 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3555 blk_queue_merge_bvec(q, drbd_merge_bvec);
3556 q->queue_lock = &mdev->req_lock;
3557
3558 mdev->md_io_page = alloc_page(GFP_KERNEL);
3559 if (!mdev->md_io_page)
3560 goto out_no_io_page;
3561
3562 if (drbd_bm_init(mdev))
3563 goto out_no_bitmap;
3564 /* no need to lock access, we are still initializing this minor device. */
3565 if (!tl_init(mdev))
3566 goto out_no_tl;
3567
3568 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3569 if (!mdev->app_reads_hash)
3570 goto out_no_app_reads;
3571
3572 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3573 if (!mdev->current_epoch)
3574 goto out_no_epoch;
3575
3576 INIT_LIST_HEAD(&mdev->current_epoch->list);
3577 mdev->epochs = 1;
3578
3579 return mdev;
3580
3581 /* out_whatever_else:
3582 kfree(mdev->current_epoch); */
3583 out_no_epoch:
3584 kfree(mdev->app_reads_hash);
3585 out_no_app_reads:
3586 tl_cleanup(mdev);
3587 out_no_tl:
3588 drbd_bm_cleanup(mdev);
3589 out_no_bitmap:
3590 __free_page(mdev->md_io_page);
3591 out_no_io_page:
3592 put_disk(disk);
3593 out_no_disk:
3594 blk_cleanup_queue(q);
3595 out_no_q:
3596 free_cpumask_var(mdev->cpu_mask);
3597 out_no_cpumask:
3598 kfree(mdev);
3599 return NULL;
3600 }
3601
3602 /* counterpart of drbd_new_device.
3603 * last part of drbd_delete_device. */
3604 void drbd_free_mdev(struct drbd_conf *mdev)
3605 {
3606 kfree(mdev->current_epoch);
3607 kfree(mdev->app_reads_hash);
3608 tl_cleanup(mdev);
3609 if (mdev->bitmap) /* should no longer be there. */
3610 drbd_bm_cleanup(mdev);
3611 __free_page(mdev->md_io_page);
3612 put_disk(mdev->vdisk);
3613 blk_cleanup_queue(mdev->rq_queue);
3614 free_cpumask_var(mdev->cpu_mask);
3615 drbd_free_tl_hash(mdev);
3616 kfree(mdev);
3617 }
3618
3619
3620 int __init drbd_init(void)
3621 {
3622 int err;
3623
3624 if (sizeof(struct p_handshake) != 80) {
3625 printk(KERN_ERR
3626 "drbd: never change the size or layout "
3627 "of the HandShake packet.\n");
3628 return -EINVAL;
3629 }
3630
3631 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
3632 printk(KERN_ERR
3633 "drbd: invalid minor_count (%d)\n", minor_count);
3634 #ifdef MODULE
3635 return -EINVAL;
3636 #else
3637 minor_count = 8;
3638 #endif
3639 }
3640
3641 err = drbd_nl_init();
3642 if (err)
3643 return err;
3644
3645 err = register_blkdev(DRBD_MAJOR, "drbd");
3646 if (err) {
3647 printk(KERN_ERR
3648 "drbd: unable to register block device major %d\n",
3649 DRBD_MAJOR);
3650 return err;
3651 }
3652
3653 register_reboot_notifier(&drbd_notifier);
3654
3655 /*
3656 * allocate all necessary structs
3657 */
3658 err = -ENOMEM;
3659
3660 init_waitqueue_head(&drbd_pp_wait);
3661
3662 drbd_proc = NULL; /* play safe for drbd_cleanup */
3663 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3664 GFP_KERNEL);
3665 if (!minor_table)
3666 goto Enomem;
3667
3668 err = drbd_create_mempools();
3669 if (err)
3670 goto Enomem;
3671
3672 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3673 if (!drbd_proc) {
3674 printk(KERN_ERR "drbd: unable to register proc file\n");
3675 goto Enomem;
3676 }
3677
3678 rwlock_init(&global_state_lock);
3679
3680 printk(KERN_INFO "drbd: initialized. "
3681 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3682 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3683 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3684 printk(KERN_INFO "drbd: registered as block device major %d\n",
3685 DRBD_MAJOR);
3686 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3687
3688 return 0; /* Success! */
3689
3690 Enomem:
3691 drbd_cleanup();
3692 if (err == -ENOMEM)
3693 /* currently always the case */
3694 printk(KERN_ERR "drbd: ran out of memory\n");
3695 else
3696 printk(KERN_ERR "drbd: initialization failure\n");
3697 return err;
3698 }
3699
3700 void drbd_free_bc(struct drbd_backing_dev *ldev)
3701 {
3702 if (ldev == NULL)
3703 return;
3704
3705 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3706 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3707
3708 kfree(ldev);
3709 }
3710
3711 void drbd_free_sock(struct drbd_conf *mdev)
3712 {
3713 if (mdev->data.socket) {
3714 mutex_lock(&mdev->data.mutex);
3715 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3716 sock_release(mdev->data.socket);
3717 mdev->data.socket = NULL;
3718 mutex_unlock(&mdev->data.mutex);
3719 }
3720 if (mdev->meta.socket) {
3721 mutex_lock(&mdev->meta.mutex);
3722 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3723 sock_release(mdev->meta.socket);
3724 mdev->meta.socket = NULL;
3725 mutex_unlock(&mdev->meta.mutex);
3726 }
3727 }
3728
3729
3730 void drbd_free_resources(struct drbd_conf *mdev)
3731 {
3732 crypto_free_hash(mdev->csums_tfm);
3733 mdev->csums_tfm = NULL;
3734 crypto_free_hash(mdev->verify_tfm);
3735 mdev->verify_tfm = NULL;
3736 crypto_free_hash(mdev->cram_hmac_tfm);
3737 mdev->cram_hmac_tfm = NULL;
3738 crypto_free_hash(mdev->integrity_w_tfm);
3739 mdev->integrity_w_tfm = NULL;
3740 crypto_free_hash(mdev->integrity_r_tfm);
3741 mdev->integrity_r_tfm = NULL;
3742
3743 drbd_free_sock(mdev);
3744
3745 __no_warn(local,
3746 drbd_free_bc(mdev->ldev);
3747 mdev->ldev = NULL;);
3748 }
3749
3750 /* meta data management */
3751
3752 struct meta_data_on_disk {
3753 u64 la_size; /* last agreed size. */
3754 u64 uuid[UI_SIZE]; /* UUIDs. */
3755 u64 device_uuid;
3756 u64 reserved_u64_1;
3757 u32 flags; /* MDF */
3758 u32 magic;
3759 u32 md_size_sect;
3760 u32 al_offset; /* offset to this block */
3761 u32 al_nr_extents; /* important for restoring the AL */
3762 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3763 u32 bm_offset; /* offset to the bitmap, from here */
3764 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3765 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3766 u32 reserved_u32[3];
3767
3768 } __packed;
3769
3770 /**
3771 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3772 * @mdev: DRBD device.
3773 */
3774 void drbd_md_sync(struct drbd_conf *mdev)
3775 {
3776 struct meta_data_on_disk *buffer;
3777 sector_t sector;
3778 int i;
3779
3780 del_timer(&mdev->md_sync_timer);
3781 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3782 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3783 return;
3784
3785 /* We use here D_FAILED and not D_ATTACHING because we try to write
3786 * metadata even if we detach due to a disk failure! */
3787 if (!get_ldev_if_state(mdev, D_FAILED))
3788 return;
3789
3790 buffer = drbd_md_get_buffer(mdev);
3791 if (!buffer)
3792 goto out;
3793
3794 memset(buffer, 0, 512);
3795
3796 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3797 for (i = UI_CURRENT; i < UI_SIZE; i++)
3798 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3799 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3800 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3801
3802 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3803 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3804 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3805 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3806 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3807
3808 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3809 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
3810
3811 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3812 sector = mdev->ldev->md.md_offset;
3813
3814 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3815 /* this was a try anyways ... */
3816 dev_err(DEV, "meta data update failed!\n");
3817 drbd_chk_io_error(mdev, 1, true);
3818 }
3819
3820 /* Update mdev->ldev->md.la_size_sect,
3821 * since we updated it on metadata. */
3822 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3823
3824 drbd_md_put_buffer(mdev);
3825 out:
3826 put_ldev(mdev);
3827 }
3828
3829 /**
3830 * drbd_md_read() - Reads in the meta data super block
3831 * @mdev: DRBD device.
3832 * @bdev: Device from which the meta data should be read in.
3833 *
3834 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3835 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3836 */
3837 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3838 {
3839 struct meta_data_on_disk *buffer;
3840 int i, rv = NO_ERROR;
3841
3842 if (!get_ldev_if_state(mdev, D_ATTACHING))
3843 return ERR_IO_MD_DISK;
3844
3845 buffer = drbd_md_get_buffer(mdev);
3846 if (!buffer)
3847 goto out;
3848
3849 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3850 /* NOTE: can't do normal error processing here as this is
3851 called BEFORE disk is attached */
3852 dev_err(DEV, "Error while reading metadata.\n");
3853 rv = ERR_IO_MD_DISK;
3854 goto err;
3855 }
3856
3857 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3858 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3859 rv = ERR_MD_INVALID;
3860 goto err;
3861 }
3862 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3863 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3864 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3865 rv = ERR_MD_INVALID;
3866 goto err;
3867 }
3868 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3869 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3870 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3871 rv = ERR_MD_INVALID;
3872 goto err;
3873 }
3874 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3875 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3876 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3877 rv = ERR_MD_INVALID;
3878 goto err;
3879 }
3880
3881 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3882 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3883 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3884 rv = ERR_MD_INVALID;
3885 goto err;
3886 }
3887
3888 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3889 for (i = UI_CURRENT; i < UI_SIZE; i++)
3890 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3891 bdev->md.flags = be32_to_cpu(buffer->flags);
3892 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3893 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3894
3895 spin_lock_irq(&mdev->req_lock);
3896 if (mdev->state.conn < C_CONNECTED) {
3897 int peer;
3898 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3899 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3900 mdev->peer_max_bio_size = peer;
3901 }
3902 spin_unlock_irq(&mdev->req_lock);
3903
3904 if (mdev->sync_conf.al_extents < 7)
3905 mdev->sync_conf.al_extents = 127;
3906
3907 err:
3908 drbd_md_put_buffer(mdev);
3909 out:
3910 put_ldev(mdev);
3911
3912 return rv;
3913 }
3914
3915 /**
3916 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3917 * @mdev: DRBD device.
3918 *
3919 * Call this function if you change anything that should be written to
3920 * the meta-data super block. This function sets MD_DIRTY, and starts a
3921 * timer that ensures that within five seconds you have to call drbd_md_sync().
3922 */
3923 #ifdef DEBUG
3924 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3925 {
3926 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3927 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3928 mdev->last_md_mark_dirty.line = line;
3929 mdev->last_md_mark_dirty.func = func;
3930 }
3931 }
3932 #else
3933 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3934 {
3935 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3936 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3937 }
3938 #endif
3939
3940 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3941 {
3942 int i;
3943
3944 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3945 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3946 }
3947
3948 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3949 {
3950 if (idx == UI_CURRENT) {
3951 if (mdev->state.role == R_PRIMARY)
3952 val |= 1;
3953 else
3954 val &= ~((u64)1);
3955
3956 drbd_set_ed_uuid(mdev, val);
3957 }
3958
3959 mdev->ldev->md.uuid[idx] = val;
3960 drbd_md_mark_dirty(mdev);
3961 }
3962
3963
3964 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3965 {
3966 if (mdev->ldev->md.uuid[idx]) {
3967 drbd_uuid_move_history(mdev);
3968 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3969 }
3970 _drbd_uuid_set(mdev, idx, val);
3971 }
3972
3973 /**
3974 * drbd_uuid_new_current() - Creates a new current UUID
3975 * @mdev: DRBD device.
3976 *
3977 * Creates a new current UUID, and rotates the old current UUID into
3978 * the bitmap slot. Causes an incremental resync upon next connect.
3979 */
3980 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3981 {
3982 u64 val;
3983 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3984
3985 if (bm_uuid)
3986 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3987
3988 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3989
3990 get_random_bytes(&val, sizeof(u64));
3991 _drbd_uuid_set(mdev, UI_CURRENT, val);
3992 drbd_print_uuids(mdev, "new current UUID");
3993 /* get it to stable storage _now_ */
3994 drbd_md_sync(mdev);
3995 }
3996
3997 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3998 {
3999 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
4000 return;
4001
4002 if (val == 0) {
4003 drbd_uuid_move_history(mdev);
4004 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
4005 mdev->ldev->md.uuid[UI_BITMAP] = 0;
4006 } else {
4007 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
4008 if (bm_uuid)
4009 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
4010
4011 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
4012 }
4013 drbd_md_mark_dirty(mdev);
4014 }
4015
4016 /**
4017 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4018 * @mdev: DRBD device.
4019 *
4020 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
4021 */
4022 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
4023 {
4024 int rv = -EIO;
4025
4026 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4027 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
4028 drbd_md_sync(mdev);
4029 drbd_bm_set_all(mdev);
4030
4031 rv = drbd_bm_write(mdev);
4032
4033 if (!rv) {
4034 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
4035 drbd_md_sync(mdev);
4036 }
4037
4038 put_ldev(mdev);
4039 }
4040
4041 return rv;
4042 }
4043
4044 /**
4045 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4046 * @mdev: DRBD device.
4047 *
4048 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
4049 */
4050 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
4051 {
4052 int rv = -EIO;
4053
4054 drbd_resume_al(mdev);
4055 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4056 drbd_bm_clear_all(mdev);
4057 rv = drbd_bm_write(mdev);
4058 put_ldev(mdev);
4059 }
4060
4061 return rv;
4062 }
4063
4064 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4065 {
4066 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
4067 int rv = -EIO;
4068
4069 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
4070
4071 if (get_ldev(mdev)) {
4072 drbd_bm_lock(mdev, work->why, work->flags);
4073 rv = work->io_fn(mdev);
4074 drbd_bm_unlock(mdev);
4075 put_ldev(mdev);
4076 }
4077
4078 clear_bit(BITMAP_IO, &mdev->flags);
4079 smp_mb__after_clear_bit();
4080 wake_up(&mdev->misc_wait);
4081
4082 if (work->done)
4083 work->done(mdev, rv);
4084
4085 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
4086 work->why = NULL;
4087 work->flags = 0;
4088
4089 return 1;
4090 }
4091
4092 void drbd_ldev_destroy(struct drbd_conf *mdev)
4093 {
4094 lc_destroy(mdev->resync);
4095 mdev->resync = NULL;
4096 lc_destroy(mdev->act_log);
4097 mdev->act_log = NULL;
4098 __no_warn(local,
4099 drbd_free_bc(mdev->ldev);
4100 mdev->ldev = NULL;);
4101
4102 if (mdev->md_io_tmpp) {
4103 __free_page(mdev->md_io_tmpp);
4104 mdev->md_io_tmpp = NULL;
4105 }
4106 clear_bit(GO_DISKLESS, &mdev->flags);
4107 }
4108
4109 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4110 {
4111 D_ASSERT(mdev->state.disk == D_FAILED);
4112 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
4113 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
4114 * the protected members anymore, though, so once put_ldev reaches zero
4115 * again, it will be safe to free them. */
4116 drbd_force_state(mdev, NS(disk, D_DISKLESS));
4117 return 1;
4118 }
4119
4120 void drbd_go_diskless(struct drbd_conf *mdev)
4121 {
4122 D_ASSERT(mdev->state.disk == D_FAILED);
4123 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
4124 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
4125 }
4126
4127 /**
4128 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
4129 * @mdev: DRBD device.
4130 * @io_fn: IO callback to be called when bitmap IO is possible
4131 * @done: callback to be called after the bitmap IO was performed
4132 * @why: Descriptive text of the reason for doing the IO
4133 *
4134 * While IO on the bitmap happens we freeze application IO thus we ensure
4135 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4136 * called from worker context. It MUST NOT be used while a previous such
4137 * work is still pending!
4138 */
4139 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4140 int (*io_fn)(struct drbd_conf *),
4141 void (*done)(struct drbd_conf *, int),
4142 char *why, enum bm_flag flags)
4143 {
4144 D_ASSERT(current == mdev->worker.task);
4145
4146 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4147 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4148 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4149 if (mdev->bm_io_work.why)
4150 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4151 why, mdev->bm_io_work.why);
4152
4153 mdev->bm_io_work.io_fn = io_fn;
4154 mdev->bm_io_work.done = done;
4155 mdev->bm_io_work.why = why;
4156 mdev->bm_io_work.flags = flags;
4157
4158 spin_lock_irq(&mdev->req_lock);
4159 set_bit(BITMAP_IO, &mdev->flags);
4160 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4161 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4162 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4163 }
4164 spin_unlock_irq(&mdev->req_lock);
4165 }
4166
4167 /**
4168 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4169 * @mdev: DRBD device.
4170 * @io_fn: IO callback to be called when bitmap IO is possible
4171 * @why: Descriptive text of the reason for doing the IO
4172 *
4173 * freezes application IO while that the actual IO operations runs. This
4174 * functions MAY NOT be called from worker context.
4175 */
4176 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4177 char *why, enum bm_flag flags)
4178 {
4179 int rv;
4180
4181 D_ASSERT(current != mdev->worker.task);
4182
4183 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4184 drbd_suspend_io(mdev);
4185
4186 drbd_bm_lock(mdev, why, flags);
4187 rv = io_fn(mdev);
4188 drbd_bm_unlock(mdev);
4189
4190 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4191 drbd_resume_io(mdev);
4192
4193 return rv;
4194 }
4195
4196 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4197 {
4198 if ((mdev->ldev->md.flags & flag) != flag) {
4199 drbd_md_mark_dirty(mdev);
4200 mdev->ldev->md.flags |= flag;
4201 }
4202 }
4203
4204 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4205 {
4206 if ((mdev->ldev->md.flags & flag) != 0) {
4207 drbd_md_mark_dirty(mdev);
4208 mdev->ldev->md.flags &= ~flag;
4209 }
4210 }
4211 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4212 {
4213 return (bdev->md.flags & flag) != 0;
4214 }
4215
4216 static void md_sync_timer_fn(unsigned long data)
4217 {
4218 struct drbd_conf *mdev = (struct drbd_conf *) data;
4219
4220 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4221 }
4222
4223 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4224 {
4225 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4226 #ifdef DEBUG
4227 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4228 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4229 #endif
4230 drbd_md_sync(mdev);
4231 return 1;
4232 }
4233
4234 #ifdef CONFIG_DRBD_FAULT_INJECTION
4235 /* Fault insertion support including random number generator shamelessly
4236 * stolen from kernel/rcutorture.c */
4237 struct fault_random_state {
4238 unsigned long state;
4239 unsigned long count;
4240 };
4241
4242 #define FAULT_RANDOM_MULT 39916801 /* prime */
4243 #define FAULT_RANDOM_ADD 479001701 /* prime */
4244 #define FAULT_RANDOM_REFRESH 10000
4245
4246 /*
4247 * Crude but fast random-number generator. Uses a linear congruential
4248 * generator, with occasional help from get_random_bytes().
4249 */
4250 static unsigned long
4251 _drbd_fault_random(struct fault_random_state *rsp)
4252 {
4253 long refresh;
4254
4255 if (!rsp->count--) {
4256 get_random_bytes(&refresh, sizeof(refresh));
4257 rsp->state += refresh;
4258 rsp->count = FAULT_RANDOM_REFRESH;
4259 }
4260 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4261 return swahw32(rsp->state);
4262 }
4263
4264 static char *
4265 _drbd_fault_str(unsigned int type) {
4266 static char *_faults[] = {
4267 [DRBD_FAULT_MD_WR] = "Meta-data write",
4268 [DRBD_FAULT_MD_RD] = "Meta-data read",
4269 [DRBD_FAULT_RS_WR] = "Resync write",
4270 [DRBD_FAULT_RS_RD] = "Resync read",
4271 [DRBD_FAULT_DT_WR] = "Data write",
4272 [DRBD_FAULT_DT_RD] = "Data read",
4273 [DRBD_FAULT_DT_RA] = "Data read ahead",
4274 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4275 [DRBD_FAULT_AL_EE] = "EE allocation",
4276 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4277 };
4278
4279 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4280 }
4281
4282 unsigned int
4283 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4284 {
4285 static struct fault_random_state rrs = {0, 0};
4286
4287 unsigned int ret = (
4288 (fault_devs == 0 ||
4289 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4290 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4291
4292 if (ret) {
4293 fault_count++;
4294
4295 if (__ratelimit(&drbd_ratelimit_state))
4296 dev_warn(DEV, "***Simulating %s failure\n",
4297 _drbd_fault_str(type));
4298 }
4299
4300 return ret;
4301 }
4302 #endif
4303
4304 const char *drbd_buildtag(void)
4305 {
4306 /* DRBD built from external sources has here a reference to the
4307 git hash of the source code. */
4308
4309 static char buildtag[38] = "\0uilt-in";
4310
4311 if (buildtag[0] == 0) {
4312 #ifdef CONFIG_MODULES
4313 if (THIS_MODULE != NULL)
4314 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4315 else
4316 #endif
4317 buildtag[0] = 'b';
4318 }
4319
4320 return buildtag;
4321 }
4322
4323 module_init(drbd_init)
4324 module_exit(drbd_cleanup)
4325
4326 EXPORT_SYMBOL(drbd_conn_str);
4327 EXPORT_SYMBOL(drbd_role_str);
4328 EXPORT_SYMBOL(drbd_disk_str);
4329 EXPORT_SYMBOL(drbd_set_st_err_str);