]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/drbd/drbd_main.c
drbd: moved md_io into mdev
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
2a48fc0a 67static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
68int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
e9e6f3ec 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
b411b363 82
b411b363
PR
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
2b8a90b5
PR
88MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
90MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
2b8a90b5 119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
90ab5ee9
RR
120bool disable_sendpage;
121bool allow_oos;
b411b363
PR
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
7d4e9d09 156static const struct block_device_operations drbd_ops = {
b411b363
PR
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
7e602c0a 205 b->n_writes = 0;
b411b363
PR
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
6d7e32f5 211 INIT_LIST_HEAD(&mdev->barrier_acked_requests);
b411b363
PR
212
213 mdev->tl_hash = NULL;
214 mdev->tl_hash_s = 0;
215
216 return 1;
217}
218
219static void tl_cleanup(struct drbd_conf *mdev)
220{
221 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
222 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
223 kfree(mdev->oldest_tle);
224 mdev->oldest_tle = NULL;
225 kfree(mdev->unused_spare_tle);
226 mdev->unused_spare_tle = NULL;
227 kfree(mdev->tl_hash);
228 mdev->tl_hash = NULL;
229 mdev->tl_hash_s = 0;
230}
231
232/**
233 * _tl_add_barrier() - Adds a barrier to the transfer log
234 * @mdev: DRBD device.
235 * @new: Barrier to be added before the current head of the TL.
236 *
237 * The caller must hold the req_lock.
238 */
239void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240{
241 struct drbd_tl_epoch *newest_before;
242
243 INIT_LIST_HEAD(&new->requests);
244 INIT_LIST_HEAD(&new->w.list);
245 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
246 new->next = NULL;
7e602c0a 247 new->n_writes = 0;
b411b363
PR
248
249 newest_before = mdev->newest_tle;
250 /* never send a barrier number == 0, because that is special-cased
251 * when using TCQ for our write ordering code */
252 new->br_number = (newest_before->br_number+1) ?: 1;
253 if (mdev->newest_tle != new) {
254 mdev->newest_tle->next = new;
255 mdev->newest_tle = new;
256 }
257}
258
259/**
260 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
261 * @mdev: DRBD device.
262 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
263 * @set_size: Expected number of requests before that barrier.
264 *
265 * In case the passed barrier_nr or set_size does not match the oldest
266 * &struct drbd_tl_epoch objects this function will cause a termination
267 * of the connection.
268 */
269void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
270 unsigned int set_size)
271{
272 struct drbd_tl_epoch *b, *nob; /* next old barrier */
273 struct list_head *le, *tle;
274 struct drbd_request *r;
275
276 spin_lock_irq(&mdev->req_lock);
277
278 b = mdev->oldest_tle;
279
280 /* first some paranoia code */
281 if (b == NULL) {
282 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
283 barrier_nr);
284 goto bail;
285 }
286 if (b->br_number != barrier_nr) {
287 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
288 barrier_nr, b->br_number);
289 goto bail;
290 }
7e602c0a
PR
291 if (b->n_writes != set_size) {
292 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
293 barrier_nr, set_size, b->n_writes);
b411b363
PR
294 goto bail;
295 }
296
297 /* Clean up list of requests processed during current epoch */
298 list_for_each_safe(le, tle, &b->requests) {
299 r = list_entry(le, struct drbd_request, tl_requests);
300 _req_mod(r, barrier_acked);
301 }
302 /* There could be requests on the list waiting for completion
303 of the write to the local disk. To avoid corruptions of
304 slab's data structures we have to remove the lists head.
305
306 Also there could have been a barrier ack out of sequence, overtaking
307 the write acks - which would be a bug and violating write ordering.
308 To not deadlock in case we lose connection while such requests are
309 still pending, we need some way to find them for the
310 _req_mode(connection_lost_while_pending).
311
312 These have been list_move'd to the out_of_sequence_requests list in
313 _req_mod(, barrier_acked) above.
314 */
6d7e32f5 315 list_splice_init(&b->requests, &mdev->barrier_acked_requests);
b411b363
PR
316
317 nob = b->next;
318 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
319 _tl_add_barrier(mdev, b);
320 if (nob)
321 mdev->oldest_tle = nob;
322 /* if nob == NULL b was the only barrier, and becomes the new
323 barrier. Therefore mdev->oldest_tle points already to b */
324 } else {
325 D_ASSERT(nob != NULL);
326 mdev->oldest_tle = nob;
327 kfree(b);
328 }
329
330 spin_unlock_irq(&mdev->req_lock);
331 dec_ap_pending(mdev);
332
333 return;
334
335bail:
336 spin_unlock_irq(&mdev->req_lock);
337 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
338}
339
617049aa 340
b411b363 341/**
11b58e73 342 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 343 * @mdev: DRBD device.
11b58e73 344 * @what: The action/event to perform with all request objects
b411b363 345 *
11b58e73 346 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
6d7e32f5 347 * restart_frozen_disk_io, abort_disk_io.
b411b363 348 */
11b58e73 349static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b411b363 350{
11b58e73 351 struct drbd_tl_epoch *b, *tmp, **pn;
b9b98716 352 struct list_head *le, *tle, carry_reads;
11b58e73
PR
353 struct drbd_request *req;
354 int rv, n_writes, n_reads;
b411b363
PR
355
356 b = mdev->oldest_tle;
11b58e73 357 pn = &mdev->oldest_tle;
b411b363 358 while (b) {
11b58e73
PR
359 n_writes = 0;
360 n_reads = 0;
b9b98716 361 INIT_LIST_HEAD(&carry_reads);
b411b363 362 list_for_each_safe(le, tle, &b->requests) {
11b58e73
PR
363 req = list_entry(le, struct drbd_request, tl_requests);
364 rv = _req_mod(req, what);
365
366 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
367 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
b411b363
PR
368 }
369 tmp = b->next;
370
2b4dd36f
PR
371 if (what == abort_disk_io) {
372 /* Only walk the TL, leave barrier objects in place */
373 b = tmp;
374 continue;
375 }
376
b9b98716 377 if (n_writes) {
11b58e73
PR
378 if (what == resend) {
379 b->n_writes = n_writes;
380 if (b->w.cb == NULL) {
381 b->w.cb = w_send_barrier;
382 inc_ap_pending(mdev);
383 set_bit(CREATE_BARRIER, &mdev->flags);
384 }
385
386 drbd_queue_work(&mdev->data.work, &b->w);
387 }
388 pn = &b->next;
389 } else {
b9b98716
PR
390 if (n_reads)
391 list_add(&carry_reads, &b->requests);
11b58e73
PR
392 /* there could still be requests on that ring list,
393 * in case local io is still pending */
394 list_del(&b->requests);
395
396 /* dec_ap_pending corresponding to queue_barrier.
397 * the newest barrier may not have been queued yet,
398 * in which case w.cb is still NULL. */
399 if (b->w.cb != NULL)
400 dec_ap_pending(mdev);
401
402 if (b == mdev->newest_tle) {
403 /* recycle, but reinit! */
404 D_ASSERT(tmp == NULL);
405 INIT_LIST_HEAD(&b->requests);
b9b98716 406 list_splice(&carry_reads, &b->requests);
11b58e73
PR
407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
b411b363 417 }
b411b363 418 b = tmp;
b9b98716 419 list_splice(&carry_reads, &b->requests);
b411b363 420 }
6d7e32f5
PR
421
422 /* Actions operating on the disk state, also want to work on
423 requests that got barrier acked. */
424 switch (what) {
425 case abort_disk_io:
426 case fail_frozen_disk_io:
427 case restart_frozen_disk_io:
428 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
429 req = list_entry(le, struct drbd_request, tl_requests);
430 _req_mod(req, what);
431 }
432
433 case connection_lost_while_pending:
434 case resend:
435 break;
436 default:
437 dev_err(DEV, "what = %d in _tl_restart()\n", what);
438 }
11b58e73
PR
439}
440
b411b363
PR
441
442/**
443 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
444 * @mdev: DRBD device.
445 *
446 * This is called after the connection to the peer was lost. The storage covered
447 * by the requests on the transfer gets marked as our of sync. Called from the
448 * receiver thread and the worker thread.
449 */
450void tl_clear(struct drbd_conf *mdev)
451{
b411b363
PR
452 struct list_head *le, *tle;
453 struct drbd_request *r;
b411b363
PR
454
455 spin_lock_irq(&mdev->req_lock);
456
11b58e73 457 _tl_restart(mdev, connection_lost_while_pending);
b411b363
PR
458
459 /* we expect this list to be empty. */
460 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
461
462 /* but just in case, clean it up anyways! */
463 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
464 r = list_entry(le, struct drbd_request, tl_requests);
465 /* It would be nice to complete outside of spinlock.
466 * But this is easier for now. */
467 _req_mod(r, connection_lost_while_pending);
468 }
469
470 /* ensure bit indicating barrier is required is clear */
471 clear_bit(CREATE_BARRIER, &mdev->flags);
472
288f422e
PR
473 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
474
b411b363
PR
475 spin_unlock_irq(&mdev->req_lock);
476}
477
11b58e73
PR
478void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
479{
480 spin_lock_irq(&mdev->req_lock);
481 _tl_restart(mdev, what);
b411b363
PR
482 spin_unlock_irq(&mdev->req_lock);
483}
484
485/**
81e84650 486 * cl_wide_st_chg() - true if the state change is a cluster wide one
b411b363
PR
487 * @mdev: DRBD device.
488 * @os: old (current) state.
489 * @ns: new (wanted) state.
490 */
491static int cl_wide_st_chg(struct drbd_conf *mdev,
492 union drbd_state os, union drbd_state ns)
493{
494 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
495 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
496 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
497 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
498 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
499 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
500 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
501}
502
bf885f8a
AG
503enum drbd_state_rv
504drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
505 union drbd_state mask, union drbd_state val)
b411b363
PR
506{
507 unsigned long flags;
508 union drbd_state os, ns;
bf885f8a 509 enum drbd_state_rv rv;
b411b363
PR
510
511 spin_lock_irqsave(&mdev->req_lock, flags);
512 os = mdev->state;
513 ns.i = (os.i & ~mask.i) | val.i;
514 rv = _drbd_set_state(mdev, ns, f, NULL);
515 ns = mdev->state;
516 spin_unlock_irqrestore(&mdev->req_lock, flags);
517
518 return rv;
519}
520
521/**
522 * drbd_force_state() - Impose a change which happens outside our control on our state
523 * @mdev: DRBD device.
524 * @mask: mask of state bits to change.
525 * @val: value of new state bits.
526 */
527void drbd_force_state(struct drbd_conf *mdev,
528 union drbd_state mask, union drbd_state val)
529{
530 drbd_change_state(mdev, CS_HARD, mask, val);
531}
532
bf885f8a
AG
533static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
534static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
535 union drbd_state,
536 union drbd_state);
77e8fdfc
PR
537enum sanitize_state_warnings {
538 NO_WARNING,
539 ABORTED_ONLINE_VERIFY,
540 ABORTED_RESYNC,
541 CONNECTION_LOST_NEGOTIATING,
542 IMPLICITLY_UPGRADED_DISK,
543 IMPLICITLY_UPGRADED_PDSK,
544};
b411b363 545static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
77e8fdfc 546 union drbd_state ns, enum sanitize_state_warnings *warn);
b411b363
PR
547int drbd_send_state_req(struct drbd_conf *,
548 union drbd_state, union drbd_state);
549
c8b32563
AG
550static enum drbd_state_rv
551_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
552 union drbd_state val)
b411b363
PR
553{
554 union drbd_state os, ns;
555 unsigned long flags;
bf885f8a 556 enum drbd_state_rv rv;
b411b363
PR
557
558 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
559 return SS_CW_SUCCESS;
560
561 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
562 return SS_CW_FAILED_BY_PEER;
563
564 rv = 0;
565 spin_lock_irqsave(&mdev->req_lock, flags);
566 os = mdev->state;
567 ns.i = (os.i & ~mask.i) | val.i;
568 ns = sanitize_state(mdev, os, ns, NULL);
569
570 if (!cl_wide_st_chg(mdev, os, ns))
571 rv = SS_CW_NO_NEED;
572 if (!rv) {
573 rv = is_valid_state(mdev, ns);
574 if (rv == SS_SUCCESS) {
575 rv = is_valid_state_transition(mdev, ns, os);
576 if (rv == SS_SUCCESS)
bf885f8a 577 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b411b363
PR
578 }
579 }
580 spin_unlock_irqrestore(&mdev->req_lock, flags);
581
582 return rv;
583}
584
585/**
586 * drbd_req_state() - Perform an eventually cluster wide state change
587 * @mdev: DRBD device.
588 * @mask: mask of state bits to change.
589 * @val: value of new state bits.
590 * @f: flags
591 *
592 * Should not be called directly, use drbd_request_state() or
593 * _drbd_request_state().
594 */
bf885f8a
AG
595static enum drbd_state_rv
596drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
597 union drbd_state val, enum chg_state_flags f)
b411b363
PR
598{
599 struct completion done;
600 unsigned long flags;
601 union drbd_state os, ns;
bf885f8a 602 enum drbd_state_rv rv;
b411b363
PR
603
604 init_completion(&done);
605
606 if (f & CS_SERIALIZE)
607 mutex_lock(&mdev->state_mutex);
608
609 spin_lock_irqsave(&mdev->req_lock, flags);
610 os = mdev->state;
611 ns.i = (os.i & ~mask.i) | val.i;
612 ns = sanitize_state(mdev, os, ns, NULL);
613
614 if (cl_wide_st_chg(mdev, os, ns)) {
615 rv = is_valid_state(mdev, ns);
616 if (rv == SS_SUCCESS)
617 rv = is_valid_state_transition(mdev, ns, os);
618 spin_unlock_irqrestore(&mdev->req_lock, flags);
619
620 if (rv < SS_SUCCESS) {
621 if (f & CS_VERBOSE)
622 print_st_err(mdev, os, ns, rv);
623 goto abort;
624 }
625
626 drbd_state_lock(mdev);
627 if (!drbd_send_state_req(mdev, mask, val)) {
628 drbd_state_unlock(mdev);
629 rv = SS_CW_FAILED_BY_PEER;
630 if (f & CS_VERBOSE)
631 print_st_err(mdev, os, ns, rv);
632 goto abort;
633 }
634
635 wait_event(mdev->state_wait,
636 (rv = _req_st_cond(mdev, mask, val)));
637
638 if (rv < SS_SUCCESS) {
639 drbd_state_unlock(mdev);
640 if (f & CS_VERBOSE)
641 print_st_err(mdev, os, ns, rv);
642 goto abort;
643 }
644 spin_lock_irqsave(&mdev->req_lock, flags);
645 os = mdev->state;
646 ns.i = (os.i & ~mask.i) | val.i;
647 rv = _drbd_set_state(mdev, ns, f, &done);
648 drbd_state_unlock(mdev);
649 } else {
650 rv = _drbd_set_state(mdev, ns, f, &done);
651 }
652
653 spin_unlock_irqrestore(&mdev->req_lock, flags);
654
655 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
656 D_ASSERT(current != mdev->worker.task);
657 wait_for_completion(&done);
658 }
659
660abort:
661 if (f & CS_SERIALIZE)
662 mutex_unlock(&mdev->state_mutex);
663
664 return rv;
665}
666
667/**
668 * _drbd_request_state() - Request a state change (with flags)
669 * @mdev: DRBD device.
670 * @mask: mask of state bits to change.
671 * @val: value of new state bits.
672 * @f: flags
673 *
674 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
675 * flag, or when logging of failed state change requests is not desired.
676 */
bf885f8a
AG
677enum drbd_state_rv
678_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
679 union drbd_state val, enum chg_state_flags f)
b411b363 680{
bf885f8a 681 enum drbd_state_rv rv;
b411b363
PR
682
683 wait_event(mdev->state_wait,
684 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
685
686 return rv;
687}
688
689static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
690{
691 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
692 name,
693 drbd_conn_str(ns.conn),
694 drbd_role_str(ns.role),
695 drbd_role_str(ns.peer),
696 drbd_disk_str(ns.disk),
697 drbd_disk_str(ns.pdsk),
fb22c402 698 is_susp(ns) ? 's' : 'r',
b411b363
PR
699 ns.aftr_isp ? 'a' : '-',
700 ns.peer_isp ? 'p' : '-',
701 ns.user_isp ? 'u' : '-'
702 );
703}
704
bf885f8a
AG
705void print_st_err(struct drbd_conf *mdev, union drbd_state os,
706 union drbd_state ns, enum drbd_state_rv err)
b411b363
PR
707{
708 if (err == SS_IN_TRANSIENT_STATE)
709 return;
710 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
711 print_st(mdev, " state", os);
712 print_st(mdev, "wanted", ns);
713}
714
715
b411b363
PR
716/**
717 * is_valid_state() - Returns an SS_ error code if ns is not valid
718 * @mdev: DRBD device.
719 * @ns: State to consider.
720 */
bf885f8a
AG
721static enum drbd_state_rv
722is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
b411b363
PR
723{
724 /* See drbd_state_sw_errors in drbd_strings.c */
725
726 enum drbd_fencing_p fp;
bf885f8a 727 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
728
729 fp = FP_DONT_CARE;
730 if (get_ldev(mdev)) {
731 fp = mdev->ldev->dc.fencing;
732 put_ldev(mdev);
733 }
734
735 if (get_net_conf(mdev)) {
736 if (!mdev->net_conf->two_primaries &&
737 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
738 rv = SS_TWO_PRIMARIES;
739 put_net_conf(mdev);
740 }
741
742 if (rv <= 0)
743 /* already found a reason to abort */;
744 else if (ns.role == R_SECONDARY && mdev->open_cnt)
745 rv = SS_DEVICE_IN_USE;
746
747 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
748 rv = SS_NO_UP_TO_DATE_DISK;
749
750 else if (fp >= FP_RESOURCE &&
751 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
752 rv = SS_PRIMARY_NOP;
753
754 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
755 rv = SS_NO_UP_TO_DATE_DISK;
756
757 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
758 rv = SS_NO_LOCAL_DISK;
759
760 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
761 rv = SS_NO_REMOTE_DISK;
762
8d4ce82b
LE
763 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
764 rv = SS_NO_UP_TO_DATE_DISK;
765
b411b363
PR
766 else if ((ns.conn == C_CONNECTED ||
767 ns.conn == C_WF_BITMAP_S ||
768 ns.conn == C_SYNC_SOURCE ||
769 ns.conn == C_PAUSED_SYNC_S) &&
770 ns.disk == D_OUTDATED)
771 rv = SS_CONNECTED_OUTDATES;
772
773 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
774 (mdev->sync_conf.verify_alg[0] == 0))
775 rv = SS_NO_VERIFY_ALG;
776
777 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
778 mdev->agreed_pro_version < 88)
779 rv = SS_NOT_SUPPORTED;
780
fa7d9396
PR
781 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
782 rv = SS_CONNECTED_OUTDATES;
783
b411b363
PR
784 return rv;
785}
786
787/**
788 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
789 * @mdev: DRBD device.
790 * @ns: new state.
791 * @os: old state.
792 */
bf885f8a
AG
793static enum drbd_state_rv
794is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
795 union drbd_state os)
b411b363 796{
bf885f8a 797 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
798
799 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
800 os.conn > C_CONNECTED)
801 rv = SS_RESYNC_RUNNING;
802
803 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
804 rv = SS_ALREADY_STANDALONE;
805
806 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
807 rv = SS_IS_DISKLESS;
808
809 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
810 rv = SS_NO_NET_CONFIG;
811
812 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
813 rv = SS_LOWER_THAN_OUTDATED;
814
815 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
816 rv = SS_IN_TRANSIENT_STATE;
817
818 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
819 rv = SS_IN_TRANSIENT_STATE;
820
821 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
822 rv = SS_NEED_CONNECTION;
823
824 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
825 ns.conn != os.conn && os.conn > C_CONNECTED)
826 rv = SS_RESYNC_RUNNING;
827
828 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
829 os.conn < C_CONNECTED)
830 rv = SS_NEED_CONNECTION;
831
1fc80cf3
PR
832 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
833 && os.conn < C_WF_REPORT_PARAMS)
834 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
835
b411b363
PR
836 return rv;
837}
838
77e8fdfc
PR
839static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
840{
841 static const char *msg_table[] = {
842 [NO_WARNING] = "",
843 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
844 [ABORTED_RESYNC] = "Resync aborted.",
845 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
846 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
847 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
848 };
849
850 if (warn != NO_WARNING)
851 dev_warn(DEV, "%s\n", msg_table[warn]);
852}
853
b411b363
PR
854/**
855 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
856 * @mdev: DRBD device.
857 * @os: old state.
858 * @ns: new state.
859 * @warn_sync_abort:
860 *
861 * When we loose connection, we have to set the state of the peers disk (pdsk)
862 * to D_UNKNOWN. This rule and many more along those lines are in this function.
863 */
864static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
77e8fdfc 865 union drbd_state ns, enum sanitize_state_warnings *warn)
b411b363
PR
866{
867 enum drbd_fencing_p fp;
ab17b68f 868 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
b411b363 869
77e8fdfc
PR
870 if (warn)
871 *warn = NO_WARNING;
872
b411b363
PR
873 fp = FP_DONT_CARE;
874 if (get_ldev(mdev)) {
875 fp = mdev->ldev->dc.fencing;
876 put_ldev(mdev);
877 }
878
879 /* Disallow Network errors to configure a device's network part */
880 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
881 os.conn <= C_DISCONNECTING)
882 ns.conn = os.conn;
883
f2906e18
LE
884 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
885 * If you try to go into some Sync* state, that shall fail (elsewhere). */
b411b363 886 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
f2906e18 887 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
b411b363
PR
888 ns.conn = os.conn;
889
82f59cc6
LE
890 /* we cannot fail (again) if we already detached */
891 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
892 ns.disk = D_DISKLESS;
893
894 /* if we are only D_ATTACHING yet,
895 * we can (and should) go directly to D_DISKLESS. */
896 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
897 ns.disk = D_DISKLESS;
898
b411b363
PR
899 /* After C_DISCONNECTING only C_STANDALONE may follow */
900 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
901 ns.conn = os.conn;
902
903 if (ns.conn < C_CONNECTED) {
904 ns.peer_isp = 0;
905 ns.peer = R_UNKNOWN;
906 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
907 ns.pdsk = D_UNKNOWN;
908 }
909
910 /* Clear the aftr_isp when becoming unconfigured */
911 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
912 ns.aftr_isp = 0;
913
b411b363
PR
914 /* Abort resync if a disk fails/detaches */
915 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
916 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
77e8fdfc
PR
917 if (warn)
918 *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
919 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
b411b363
PR
920 ns.conn = C_CONNECTED;
921 }
922
b411b363
PR
923 /* Connection breaks down before we finished "Negotiating" */
924 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
925 get_ldev_if_state(mdev, D_NEGOTIATING)) {
926 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
927 ns.disk = mdev->new_state_tmp.disk;
928 ns.pdsk = mdev->new_state_tmp.pdsk;
929 } else {
77e8fdfc
PR
930 if (warn)
931 *warn = CONNECTION_LOST_NEGOTIATING;
b411b363
PR
932 ns.disk = D_DISKLESS;
933 ns.pdsk = D_UNKNOWN;
934 }
935 put_ldev(mdev);
936 }
937
ab17b68f
PR
938 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
939 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
940 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
941 ns.disk = D_UP_TO_DATE;
942 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
943 ns.pdsk = D_UP_TO_DATE;
944 }
945
946 /* Implications of the connection stat on the disk states */
947 disk_min = D_DISKLESS;
948 disk_max = D_UP_TO_DATE;
949 pdsk_min = D_INCONSISTENT;
950 pdsk_max = D_UNKNOWN;
951 switch ((enum drbd_conns)ns.conn) {
952 case C_WF_BITMAP_T:
953 case C_PAUSED_SYNC_T:
954 case C_STARTING_SYNC_T:
955 case C_WF_SYNC_UUID:
956 case C_BEHIND:
957 disk_min = D_INCONSISTENT;
958 disk_max = D_OUTDATED;
959 pdsk_min = D_UP_TO_DATE;
960 pdsk_max = D_UP_TO_DATE;
961 break;
962 case C_VERIFY_S:
963 case C_VERIFY_T:
964 disk_min = D_UP_TO_DATE;
965 disk_max = D_UP_TO_DATE;
966 pdsk_min = D_UP_TO_DATE;
967 pdsk_max = D_UP_TO_DATE;
968 break;
969 case C_CONNECTED:
970 disk_min = D_DISKLESS;
971 disk_max = D_UP_TO_DATE;
972 pdsk_min = D_DISKLESS;
973 pdsk_max = D_UP_TO_DATE;
974 break;
975 case C_WF_BITMAP_S:
976 case C_PAUSED_SYNC_S:
977 case C_STARTING_SYNC_S:
978 case C_AHEAD:
979 disk_min = D_UP_TO_DATE;
980 disk_max = D_UP_TO_DATE;
981 pdsk_min = D_INCONSISTENT;
982 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
983 break;
984 case C_SYNC_TARGET:
985 disk_min = D_INCONSISTENT;
986 disk_max = D_INCONSISTENT;
987 pdsk_min = D_UP_TO_DATE;
988 pdsk_max = D_UP_TO_DATE;
989 break;
990 case C_SYNC_SOURCE:
991 disk_min = D_UP_TO_DATE;
992 disk_max = D_UP_TO_DATE;
993 pdsk_min = D_INCONSISTENT;
994 pdsk_max = D_INCONSISTENT;
995 break;
996 case C_STANDALONE:
997 case C_DISCONNECTING:
998 case C_UNCONNECTED:
999 case C_TIMEOUT:
1000 case C_BROKEN_PIPE:
1001 case C_NETWORK_FAILURE:
1002 case C_PROTOCOL_ERROR:
1003 case C_TEAR_DOWN:
1004 case C_WF_CONNECTION:
1005 case C_WF_REPORT_PARAMS:
1006 case C_MASK:
1007 break;
1008 }
1009 if (ns.disk > disk_max)
1010 ns.disk = disk_max;
1011
1012 if (ns.disk < disk_min) {
77e8fdfc
PR
1013 if (warn)
1014 *warn = IMPLICITLY_UPGRADED_DISK;
ab17b68f
PR
1015 ns.disk = disk_min;
1016 }
1017 if (ns.pdsk > pdsk_max)
1018 ns.pdsk = pdsk_max;
1019
1020 if (ns.pdsk < pdsk_min) {
77e8fdfc
PR
1021 if (warn)
1022 *warn = IMPLICITLY_UPGRADED_PDSK;
ab17b68f
PR
1023 ns.pdsk = pdsk_min;
1024 }
1025
b411b363 1026 if (fp == FP_STONITH &&
0a492166
PR
1027 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1028 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
fb22c402 1029 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
265be2d0
PR
1030
1031 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
1032 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1033 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
fb22c402 1034 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
b411b363
PR
1035
1036 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1037 if (ns.conn == C_SYNC_SOURCE)
1038 ns.conn = C_PAUSED_SYNC_S;
1039 if (ns.conn == C_SYNC_TARGET)
1040 ns.conn = C_PAUSED_SYNC_T;
1041 } else {
1042 if (ns.conn == C_PAUSED_SYNC_S)
1043 ns.conn = C_SYNC_SOURCE;
1044 if (ns.conn == C_PAUSED_SYNC_T)
1045 ns.conn = C_SYNC_TARGET;
1046 }
1047
1048 return ns;
1049}
1050
1051/* helper for __drbd_set_state */
1052static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1053{
30b743a2
LE
1054 if (mdev->agreed_pro_version < 90)
1055 mdev->ov_start_sector = 0;
1056 mdev->rs_total = drbd_bm_bits(mdev);
1057 mdev->ov_position = 0;
b411b363
PR
1058 if (cs == C_VERIFY_T) {
1059 /* starting online verify from an arbitrary position
1060 * does not fit well into the existing protocol.
1061 * on C_VERIFY_T, we initialize ov_left and friends
1062 * implicitly in receive_DataRequest once the
1063 * first P_OV_REQUEST is received */
1064 mdev->ov_start_sector = ~(sector_t)0;
1065 } else {
1066 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
30b743a2 1067 if (bit >= mdev->rs_total) {
b411b363
PR
1068 mdev->ov_start_sector =
1069 BM_BIT_TO_SECT(mdev->rs_total - 1);
30b743a2
LE
1070 mdev->rs_total = 1;
1071 } else
1072 mdev->rs_total -= bit;
b411b363
PR
1073 mdev->ov_position = mdev->ov_start_sector;
1074 }
30b743a2 1075 mdev->ov_left = mdev->rs_total;
b411b363
PR
1076}
1077
0778286a
PR
1078static void drbd_resume_al(struct drbd_conf *mdev)
1079{
1080 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1081 dev_info(DEV, "Resumed AL updates\n");
1082}
1083
b411b363
PR
1084/**
1085 * __drbd_set_state() - Set a new DRBD state
1086 * @mdev: DRBD device.
1087 * @ns: new state.
1088 * @flags: Flags
1089 * @done: Optional completion, that will get completed after the after_state_ch() finished
1090 *
1091 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1092 */
bf885f8a
AG
1093enum drbd_state_rv
1094__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1095 enum chg_state_flags flags, struct completion *done)
b411b363
PR
1096{
1097 union drbd_state os;
bf885f8a 1098 enum drbd_state_rv rv = SS_SUCCESS;
77e8fdfc 1099 enum sanitize_state_warnings ssw;
b411b363
PR
1100 struct after_state_chg_work *ascw;
1101
1102 os = mdev->state;
1103
77e8fdfc 1104 ns = sanitize_state(mdev, os, ns, &ssw);
b411b363
PR
1105
1106 if (ns.i == os.i)
1107 return SS_NOTHING_TO_DO;
1108
1109 if (!(flags & CS_HARD)) {
1110 /* pre-state-change checks ; only look at ns */
1111 /* See drbd_state_sw_errors in drbd_strings.c */
1112
1113 rv = is_valid_state(mdev, ns);
1114 if (rv < SS_SUCCESS) {
1115 /* If the old state was illegal as well, then let
1116 this happen...*/
1117
1616a254 1118 if (is_valid_state(mdev, os) == rv)
b411b363 1119 rv = is_valid_state_transition(mdev, ns, os);
b411b363
PR
1120 } else
1121 rv = is_valid_state_transition(mdev, ns, os);
1122 }
1123
1124 if (rv < SS_SUCCESS) {
1125 if (flags & CS_VERBOSE)
1126 print_st_err(mdev, os, ns, rv);
1127 return rv;
1128 }
1129
77e8fdfc 1130 print_sanitize_warnings(mdev, ssw);
b411b363
PR
1131
1132 {
662d91a2
AG
1133 char *pbp, pb[300];
1134 pbp = pb;
1135 *pbp = 0;
1136 if (ns.role != os.role)
1137 pbp += sprintf(pbp, "role( %s -> %s ) ",
1138 drbd_role_str(os.role),
1139 drbd_role_str(ns.role));
1140 if (ns.peer != os.peer)
1141 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1142 drbd_role_str(os.peer),
1143 drbd_role_str(ns.peer));
1144 if (ns.conn != os.conn)
1145 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1146 drbd_conn_str(os.conn),
1147 drbd_conn_str(ns.conn));
1148 if (ns.disk != os.disk)
1149 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1150 drbd_disk_str(os.disk),
1151 drbd_disk_str(ns.disk));
1152 if (ns.pdsk != os.pdsk)
1153 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1154 drbd_disk_str(os.pdsk),
1155 drbd_disk_str(ns.pdsk));
1156 if (is_susp(ns) != is_susp(os))
1157 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1158 is_susp(os),
1159 is_susp(ns));
1160 if (ns.aftr_isp != os.aftr_isp)
1161 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1162 os.aftr_isp,
1163 ns.aftr_isp);
1164 if (ns.peer_isp != os.peer_isp)
1165 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1166 os.peer_isp,
1167 ns.peer_isp);
1168 if (ns.user_isp != os.user_isp)
1169 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1170 os.user_isp,
1171 ns.user_isp);
1172 dev_info(DEV, "%s\n", pb);
b411b363
PR
1173 }
1174
1175 /* solve the race between becoming unconfigured,
1176 * worker doing the cleanup, and
1177 * admin reconfiguring us:
1178 * on (re)configure, first set CONFIG_PENDING,
1179 * then wait for a potentially exiting worker,
1180 * start the worker, and schedule one no_op.
1181 * then proceed with configuration.
1182 */
1183 if (ns.disk == D_DISKLESS &&
1184 ns.conn == C_STANDALONE &&
1185 ns.role == R_SECONDARY &&
1186 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1187 set_bit(DEVICE_DYING, &mdev->flags);
1188
82f59cc6
LE
1189 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1190 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1191 * drbd_ldev_destroy() won't happen before our corresponding
1192 * after_state_ch works run, where we put_ldev again. */
1193 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1194 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1195 atomic_inc(&mdev->local_cnt);
1196
1197 mdev->state = ns;
62b0da3a
LE
1198
1199 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1200 drbd_print_uuids(mdev, "attached to UUIDs");
1201
b411b363
PR
1202 wake_up(&mdev->misc_wait);
1203 wake_up(&mdev->state_wait);
1204
b411b363
PR
1205 /* aborted verify run. log the last position */
1206 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1207 ns.conn < C_CONNECTED) {
1208 mdev->ov_start_sector =
30b743a2 1209 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
b411b363
PR
1210 dev_info(DEV, "Online Verify reached sector %llu\n",
1211 (unsigned long long)mdev->ov_start_sector);
1212 }
1213
1214 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1215 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1216 dev_info(DEV, "Syncer continues.\n");
1d7734a0
LE
1217 mdev->rs_paused += (long)jiffies
1218 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
63106d3c
PR
1219 if (ns.conn == C_SYNC_TARGET)
1220 mod_timer(&mdev->resync_timer, jiffies);
b411b363
PR
1221 }
1222
1223 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1224 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1225 dev_info(DEV, "Resync suspended\n");
1d7734a0 1226 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
b411b363
PR
1227 }
1228
1229 if (os.conn == C_CONNECTED &&
1230 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1d7734a0
LE
1231 unsigned long now = jiffies;
1232 int i;
1233
30b743a2 1234 set_ov_position(mdev, ns.conn);
1d7734a0 1235 mdev->rs_start = now;
0f0601f4
LE
1236 mdev->rs_last_events = 0;
1237 mdev->rs_last_sect_ev = 0;
b411b363
PR
1238 mdev->ov_last_oos_size = 0;
1239 mdev->ov_last_oos_start = 0;
1240
1d7734a0 1241 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
30b743a2 1242 mdev->rs_mark_left[i] = mdev->ov_left;
1d7734a0
LE
1243 mdev->rs_mark_time[i] = now;
1244 }
1245
2649f080
LE
1246 drbd_rs_controller_reset(mdev);
1247
b411b363
PR
1248 if (ns.conn == C_VERIFY_S) {
1249 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1250 (unsigned long long)mdev->ov_position);
1251 mod_timer(&mdev->resync_timer, jiffies);
1252 }
1253 }
1254
1255 if (get_ldev(mdev)) {
1256 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1257 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1258 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1259
1260 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1261 mdf |= MDF_CRASHED_PRIMARY;
1262 if (mdev->state.role == R_PRIMARY ||
1263 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1264 mdf |= MDF_PRIMARY_IND;
1265 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1266 mdf |= MDF_CONNECTED_IND;
1267 if (mdev->state.disk > D_INCONSISTENT)
1268 mdf |= MDF_CONSISTENT;
1269 if (mdev->state.disk > D_OUTDATED)
1270 mdf |= MDF_WAS_UP_TO_DATE;
1271 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1272 mdf |= MDF_PEER_OUT_DATED;
1273 if (mdf != mdev->ldev->md.flags) {
1274 mdev->ldev->md.flags = mdf;
1275 drbd_md_mark_dirty(mdev);
1276 }
1277 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1278 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1279 put_ldev(mdev);
1280 }
1281
1282 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1283 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1284 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1285 set_bit(CONSIDER_RESYNC, &mdev->flags);
1286
1287 /* Receiver should clean up itself */
1288 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1289 drbd_thread_stop_nowait(&mdev->receiver);
1290
1291 /* Now the receiver finished cleaning up itself, it should die */
1292 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1293 drbd_thread_stop_nowait(&mdev->receiver);
1294
1295 /* Upon network failure, we need to restart the receiver. */
1296 if (os.conn > C_TEAR_DOWN &&
1297 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1298 drbd_thread_restart_nowait(&mdev->receiver);
1299
0778286a
PR
1300 /* Resume AL writing if we get a connection */
1301 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1302 drbd_resume_al(mdev);
1303
b411b363
PR
1304 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1305 if (ascw) {
1306 ascw->os = os;
1307 ascw->ns = ns;
1308 ascw->flags = flags;
1309 ascw->w.cb = w_after_state_ch;
1310 ascw->done = done;
1311 drbd_queue_work(&mdev->data.work, &ascw->w);
1312 } else {
1313 dev_warn(DEV, "Could not kmalloc an ascw\n");
1314 }
1315
1316 return rv;
1317}
1318
1319static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1320{
1321 struct after_state_chg_work *ascw =
1322 container_of(w, struct after_state_chg_work, w);
1323 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1324 if (ascw->flags & CS_WAIT_COMPLETE) {
1325 D_ASSERT(ascw->done != NULL);
1326 complete(ascw->done);
1327 }
1328 kfree(ascw);
1329
1330 return 1;
1331}
1332
1333static void abw_start_sync(struct drbd_conf *mdev, int rv)
1334{
1335 if (rv) {
1336 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1337 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1338 return;
1339 }
1340
1341 switch (mdev->state.conn) {
1342 case C_STARTING_SYNC_T:
1343 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1344 break;
1345 case C_STARTING_SYNC_S:
1346 drbd_start_resync(mdev, C_SYNC_SOURCE);
1347 break;
1348 }
1349}
1350
20ceb2b2
LE
1351int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1352 int (*io_fn)(struct drbd_conf *),
1353 char *why, enum bm_flag flags)
19f843aa
LE
1354{
1355 int rv;
1356
1357 D_ASSERT(current == mdev->worker.task);
1358
1359 /* open coded non-blocking drbd_suspend_io(mdev); */
1360 set_bit(SUSPEND_IO, &mdev->flags);
19f843aa 1361
20ceb2b2 1362 drbd_bm_lock(mdev, why, flags);
19f843aa
LE
1363 rv = io_fn(mdev);
1364 drbd_bm_unlock(mdev);
1365
1366 drbd_resume_io(mdev);
1367
1368 return rv;
1369}
1370
b411b363
PR
1371/**
1372 * after_state_ch() - Perform after state change actions that may sleep
1373 * @mdev: DRBD device.
1374 * @os: old state.
1375 * @ns: new state.
1376 * @flags: Flags
1377 */
1378static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1379 union drbd_state ns, enum chg_state_flags flags)
1380{
1381 enum drbd_fencing_p fp;
67098930 1382 enum drbd_req_event what = nothing;
fb22c402 1383 union drbd_state nsm = (union drbd_state){ .i = -1 };
b411b363
PR
1384
1385 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1386 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1387 if (mdev->p_uuid)
1388 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1389 }
1390
1391 fp = FP_DONT_CARE;
1392 if (get_ldev(mdev)) {
1393 fp = mdev->ldev->dc.fencing;
1394 put_ldev(mdev);
1395 }
1396
1397 /* Inform userspace about the change... */
1398 drbd_bcast_state(mdev, ns);
1399
1400 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1401 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1402 drbd_khelper(mdev, "pri-on-incon-degr");
1403
1404 /* Here we have the actions that are performed after a
1405 state change. This function might sleep */
1406
fb22c402
PR
1407 nsm.i = -1;
1408 if (ns.susp_nod) {
3f98688a
PR
1409 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1410 what = resend;
265be2d0 1411
67098930 1412 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
3f98688a 1413 what = restart_frozen_disk_io;
fb22c402 1414
3f98688a
PR
1415 if (what != nothing)
1416 nsm.susp_nod = 0;
265be2d0
PR
1417 }
1418
fb22c402 1419 if (ns.susp_fen) {
43a5182c
PR
1420 /* case1: The outdate peer handler is successful: */
1421 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
b411b363 1422 tl_clear(mdev);
43a5182c
PR
1423 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1424 drbd_uuid_new_current(mdev);
1425 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 1426 }
b411b363 1427 spin_lock_irq(&mdev->req_lock);
fb22c402 1428 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
b411b363
PR
1429 spin_unlock_irq(&mdev->req_lock);
1430 }
43a5182c
PR
1431 /* case2: The connection was established again: */
1432 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1433 clear_bit(NEW_CUR_UUID, &mdev->flags);
67098930 1434 what = resend;
fb22c402 1435 nsm.susp_fen = 0;
43a5182c 1436 }
b411b363 1437 }
67098930
PR
1438
1439 if (what != nothing) {
1440 spin_lock_irq(&mdev->req_lock);
1441 _tl_restart(mdev, what);
fb22c402
PR
1442 nsm.i &= mdev->state.i;
1443 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
67098930 1444 spin_unlock_irq(&mdev->req_lock);
b411b363 1445 }
67098930 1446
5a22db89
LE
1447 /* Became sync source. With protocol >= 96, we still need to send out
1448 * the sync uuid now. Need to do that before any drbd_send_state, or
1449 * the other side may go "paused sync" before receiving the sync uuids,
1450 * which is unexpected. */
1451 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1452 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1453 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1454 drbd_gen_and_send_sync_uuid(mdev);
1455 put_ldev(mdev);
1456 }
1457
b411b363
PR
1458 /* Do not change the order of the if above and the two below... */
1459 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1460 drbd_send_uuids(mdev);
1461 drbd_send_state(mdev);
1462 }
54b956ab
LE
1463 /* No point in queuing send_bitmap if we don't have a connection
1464 * anymore, so check also the _current_ state, not only the new state
1465 * at the time this work was queued. */
1466 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1467 mdev->state.conn == C_WF_BITMAP_S)
1468 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
20ceb2b2
LE
1469 "send_bitmap (WFBitMapS)",
1470 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1471
1472 /* Lost contact to peer's copy of the data */
1473 if ((os.pdsk >= D_INCONSISTENT &&
1474 os.pdsk != D_UNKNOWN &&
1475 os.pdsk != D_OUTDATED)
1476 && (ns.pdsk < D_INCONSISTENT ||
1477 ns.pdsk == D_UNKNOWN ||
1478 ns.pdsk == D_OUTDATED)) {
b411b363
PR
1479 if (get_ldev(mdev)) {
1480 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
2c8d1967 1481 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
fb22c402 1482 if (is_susp(mdev->state)) {
43a5182c
PR
1483 set_bit(NEW_CUR_UUID, &mdev->flags);
1484 } else {
1485 drbd_uuid_new_current(mdev);
1486 drbd_send_uuids(mdev);
1487 }
2c8d1967 1488 }
b411b363
PR
1489 put_ldev(mdev);
1490 }
1491 }
1492
1493 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
18a50fa2 1494 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
2c8d1967 1495 drbd_uuid_new_current(mdev);
18a50fa2
PR
1496 drbd_send_uuids(mdev);
1497 }
b411b363
PR
1498
1499 /* D_DISKLESS Peer becomes secondary */
1500 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
20ceb2b2
LE
1501 /* We may still be Primary ourselves.
1502 * No harm done if the bitmap still changes,
1503 * redirtied pages will follow later. */
1504 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1505 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
19f843aa
LE
1506 put_ldev(mdev);
1507 }
1508
06d33e96
LE
1509 /* Write out all changed bits on demote.
1510 * Though, no need to da that just yet
1511 * if there is a resync going on still */
1512 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1513 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
20ceb2b2
LE
1514 /* No changes to the bitmap expected this time, so assert that,
1515 * even though no harm was done if it did change. */
1516 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1517 "demote", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1518 put_ldev(mdev);
1519 }
1520
1521 /* Last part of the attaching process ... */
1522 if (ns.conn >= C_CONNECTED &&
1523 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
e89b591c 1524 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
b411b363
PR
1525 drbd_send_uuids(mdev);
1526 drbd_send_state(mdev);
1527 }
1528
1529 /* We want to pause/continue resync, tell peer. */
1530 if (ns.conn >= C_CONNECTED &&
1531 ((os.aftr_isp != ns.aftr_isp) ||
1532 (os.user_isp != ns.user_isp)))
1533 drbd_send_state(mdev);
1534
1535 /* In case one of the isp bits got set, suspend other devices. */
1536 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1537 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1538 suspend_other_sg(mdev);
1539
1540 /* Make sure the peer gets informed about eventual state
1541 changes (ISP bits) while we were in WFReportParams. */
1542 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1543 drbd_send_state(mdev);
1544
67531718
PR
1545 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1546 drbd_send_state(mdev);
1547
b411b363
PR
1548 /* We are in the progress to start a full sync... */
1549 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1550 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
20ceb2b2
LE
1551 /* no other bitmap changes expected during this phase */
1552 drbd_queue_bitmap_io(mdev,
1553 &drbd_bmio_set_n_write, &abw_start_sync,
1554 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1555
1556 /* We are invalidating our self... */
1557 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1558 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
20ceb2b2
LE
1559 /* other bitmap operation expected during this phase */
1560 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1561 "set_n_write from invalidate", BM_LOCKED_MASK);
b411b363 1562
82f59cc6
LE
1563 /* first half of local IO error, failure to attach,
1564 * or administrative detach */
1565 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1566 enum drbd_io_error_p eh;
1567 int was_io_error;
1568 /* corresponding get_ldev was in __drbd_set_state, to serialize
1569 * our cleanup here with the transition to D_DISKLESS,
1570 * so it is safe to dreference ldev here. */
1571 eh = mdev->ldev->dc.on_io_error;
1572 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1573
2b4dd36f
PR
1574 /* Immediately allow completion of all application IO, that waits
1575 for completion from the local disk. */
1576 tl_restart(mdev, abort_disk_io);
1577
82f59cc6
LE
1578 /* current state still has to be D_FAILED,
1579 * there is only one way out: to D_DISKLESS,
1580 * and that may only happen after our put_ldev below. */
1581 if (mdev->state.disk != D_FAILED)
1582 dev_err(DEV,
1583 "ASSERT FAILED: disk is %s during detach\n",
1584 drbd_disk_str(mdev->state.disk));
e9e6f3ec
LE
1585
1586 if (drbd_send_state(mdev))
07667347 1587 dev_info(DEV, "Notified peer that I am detaching my disk\n");
e9e6f3ec
LE
1588
1589 drbd_rs_cancel_all(mdev);
b411b363 1590
82f59cc6
LE
1591 /* In case we want to get something to stable storage still,
1592 * this may be the last chance.
1593 * Following put_ldev may transition to D_DISKLESS. */
1594 drbd_md_sync(mdev);
1595 put_ldev(mdev);
1596
1597 if (was_io_error && eh == EP_CALL_HELPER)
e9e6f3ec
LE
1598 drbd_khelper(mdev, "local-io-error");
1599 }
b411b363 1600
82f59cc6
LE
1601 /* second half of local IO error, failure to attach,
1602 * or administrative detach,
1603 * after local_cnt references have reached zero again */
1604 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1605 /* We must still be diskless,
1606 * re-attach has to be serialized with this! */
1607 if (mdev->state.disk != D_DISKLESS)
1608 dev_err(DEV,
1609 "ASSERT FAILED: disk is %s while going diskless\n",
1610 drbd_disk_str(mdev->state.disk));
e9e6f3ec 1611
82f59cc6
LE
1612 mdev->rs_total = 0;
1613 mdev->rs_failed = 0;
1614 atomic_set(&mdev->rs_pending_cnt, 0);
9d282875 1615
e9e6f3ec 1616 if (drbd_send_state(mdev))
07667347 1617 dev_info(DEV, "Notified peer that I'm now diskless.\n");
82f59cc6 1618 /* corresponding get_ldev in __drbd_set_state
25985edc 1619 * this may finally trigger drbd_ldev_destroy. */
82f59cc6 1620 put_ldev(mdev);
b411b363
PR
1621 }
1622
738a84b2
PR
1623 /* Notify peer that I had a local IO error, and did not detached.. */
1624 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1625 drbd_send_state(mdev);
1626
b411b363
PR
1627 /* Disks got bigger while they were detached */
1628 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1629 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1630 if (ns.conn == C_CONNECTED)
1631 resync_after_online_grow(mdev);
1632 }
1633
1634 /* A resync finished or aborted, wake paused devices... */
1635 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1636 (os.peer_isp && !ns.peer_isp) ||
1637 (os.user_isp && !ns.user_isp))
1638 resume_next_sg(mdev);
1639
af85e8e8
LE
1640 /* sync target done with resync. Explicitly notify peer, even though
1641 * it should (at least for non-empty resyncs) already know itself. */
1642 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1643 drbd_send_state(mdev);
1644
79a30d2d
LE
1645 /* This triggers bitmap writeout of potentially still unwritten pages
1646 * if the resync finished cleanly, or aborted because of peer disk
20ceb2b2 1647 * failure, or because of connection loss.
79a30d2d
LE
1648 * For resync aborted because of local disk failure, we cannot do
1649 * any bitmap writeout anymore.
20ceb2b2 1650 * No harm done if some bits change during this phase.
79a30d2d 1651 */
20ceb2b2
LE
1652 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1653 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1654 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
79a30d2d
LE
1655 put_ldev(mdev);
1656 }
02851e9f 1657
f70b3511 1658 /* free tl_hash if we Got thawed and are C_STANDALONE */
fb22c402 1659 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
f70b3511
PR
1660 drbd_free_tl_hash(mdev);
1661
b411b363
PR
1662 /* Upon network connection, we need to start the receiver */
1663 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1664 drbd_thread_start(&mdev->receiver);
1665
1666 /* Terminate worker thread if we are unconfigured - it will be
1667 restarted as needed... */
1668 if (ns.disk == D_DISKLESS &&
1669 ns.conn == C_STANDALONE &&
1670 ns.role == R_SECONDARY) {
1671 if (os.aftr_isp != ns.aftr_isp)
1672 resume_next_sg(mdev);
1673 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1674 if (test_bit(DEVICE_DYING, &mdev->flags))
1675 drbd_thread_stop_nowait(&mdev->worker);
1676 }
1677
1678 drbd_md_sync(mdev);
1679}
1680
1681
1682static int drbd_thread_setup(void *arg)
1683{
1684 struct drbd_thread *thi = (struct drbd_thread *) arg;
1685 struct drbd_conf *mdev = thi->mdev;
1686 unsigned long flags;
1687 int retval;
1688
1689restart:
1690 retval = thi->function(thi);
1691
1692 spin_lock_irqsave(&thi->t_lock, flags);
1693
1694 /* if the receiver has been "Exiting", the last thing it did
1695 * was set the conn state to "StandAlone",
1696 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1697 * and receiver thread will be "started".
1698 * drbd_thread_start needs to set "Restarting" in that case.
1699 * t_state check and assignment needs to be within the same spinlock,
1700 * so either thread_start sees Exiting, and can remap to Restarting,
1701 * or thread_start see None, and can proceed as normal.
1702 */
1703
1704 if (thi->t_state == Restarting) {
1705 dev_info(DEV, "Restarting %s\n", current->comm);
1706 thi->t_state = Running;
1707 spin_unlock_irqrestore(&thi->t_lock, flags);
1708 goto restart;
1709 }
1710
1711 thi->task = NULL;
1712 thi->t_state = None;
1713 smp_mb();
1714 complete(&thi->stop);
1715 spin_unlock_irqrestore(&thi->t_lock, flags);
1716
1717 dev_info(DEV, "Terminating %s\n", current->comm);
1718
1719 /* Release mod reference taken when thread was started */
1720 module_put(THIS_MODULE);
1721 return retval;
1722}
1723
1724static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1725 int (*func) (struct drbd_thread *))
1726{
1727 spin_lock_init(&thi->t_lock);
1728 thi->task = NULL;
1729 thi->t_state = None;
1730 thi->function = func;
1731 thi->mdev = mdev;
1732}
1733
1734int drbd_thread_start(struct drbd_thread *thi)
1735{
1736 struct drbd_conf *mdev = thi->mdev;
1737 struct task_struct *nt;
1738 unsigned long flags;
1739
1740 const char *me =
1741 thi == &mdev->receiver ? "receiver" :
1742 thi == &mdev->asender ? "asender" :
1743 thi == &mdev->worker ? "worker" : "NONSENSE";
1744
1745 /* is used from state engine doing drbd_thread_stop_nowait,
1746 * while holding the req lock irqsave */
1747 spin_lock_irqsave(&thi->t_lock, flags);
1748
1749 switch (thi->t_state) {
1750 case None:
1751 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1752 me, current->comm, current->pid);
1753
1754 /* Get ref on module for thread - this is released when thread exits */
1755 if (!try_module_get(THIS_MODULE)) {
1756 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1757 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 1758 return false;
b411b363
PR
1759 }
1760
1761 init_completion(&thi->stop);
1762 D_ASSERT(thi->task == NULL);
1763 thi->reset_cpu_mask = 1;
1764 thi->t_state = Running;
1765 spin_unlock_irqrestore(&thi->t_lock, flags);
1766 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1767
1768 nt = kthread_create(drbd_thread_setup, (void *) thi,
1769 "drbd%d_%s", mdev_to_minor(mdev), me);
1770
1771 if (IS_ERR(nt)) {
1772 dev_err(DEV, "Couldn't start thread\n");
1773
1774 module_put(THIS_MODULE);
81e84650 1775 return false;
b411b363
PR
1776 }
1777 spin_lock_irqsave(&thi->t_lock, flags);
1778 thi->task = nt;
1779 thi->t_state = Running;
1780 spin_unlock_irqrestore(&thi->t_lock, flags);
1781 wake_up_process(nt);
1782 break;
1783 case Exiting:
1784 thi->t_state = Restarting;
1785 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1786 me, current->comm, current->pid);
1787 /* fall through */
1788 case Running:
1789 case Restarting:
1790 default:
1791 spin_unlock_irqrestore(&thi->t_lock, flags);
1792 break;
1793 }
1794
81e84650 1795 return true;
b411b363
PR
1796}
1797
1798
1799void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1800{
1801 unsigned long flags;
1802
1803 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1804
1805 /* may be called from state engine, holding the req lock irqsave */
1806 spin_lock_irqsave(&thi->t_lock, flags);
1807
1808 if (thi->t_state == None) {
1809 spin_unlock_irqrestore(&thi->t_lock, flags);
1810 if (restart)
1811 drbd_thread_start(thi);
1812 return;
1813 }
1814
1815 if (thi->t_state != ns) {
1816 if (thi->task == NULL) {
1817 spin_unlock_irqrestore(&thi->t_lock, flags);
1818 return;
1819 }
1820
1821 thi->t_state = ns;
1822 smp_mb();
1823 init_completion(&thi->stop);
1824 if (thi->task != current)
1825 force_sig(DRBD_SIGKILL, thi->task);
1826
1827 }
1828
1829 spin_unlock_irqrestore(&thi->t_lock, flags);
1830
1831 if (wait)
1832 wait_for_completion(&thi->stop);
1833}
1834
1835#ifdef CONFIG_SMP
1836/**
1837 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1838 * @mdev: DRBD device.
1839 *
1840 * Forces all threads of a device onto the same CPU. This is beneficial for
1841 * DRBD's performance. May be overwritten by user's configuration.
1842 */
1843void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1844{
1845 int ord, cpu;
1846
1847 /* user override. */
1848 if (cpumask_weight(mdev->cpu_mask))
1849 return;
1850
1851 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1852 for_each_online_cpu(cpu) {
1853 if (ord-- == 0) {
1854 cpumask_set_cpu(cpu, mdev->cpu_mask);
1855 return;
1856 }
1857 }
1858 /* should not be reached */
1859 cpumask_setall(mdev->cpu_mask);
1860}
1861
1862/**
1863 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1864 * @mdev: DRBD device.
1865 *
1866 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1867 * prematurely.
1868 */
1869void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1870{
1871 struct task_struct *p = current;
1872 struct drbd_thread *thi =
1873 p == mdev->asender.task ? &mdev->asender :
1874 p == mdev->receiver.task ? &mdev->receiver :
1875 p == mdev->worker.task ? &mdev->worker :
1876 NULL;
1877 ERR_IF(thi == NULL)
1878 return;
1879 if (!thi->reset_cpu_mask)
1880 return;
1881 thi->reset_cpu_mask = 0;
1882 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1883}
1884#endif
1885
1886/* the appropriate socket mutex must be held already */
1887int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
0b70a13d 1888 enum drbd_packets cmd, struct p_header80 *h,
b411b363
PR
1889 size_t size, unsigned msg_flags)
1890{
1891 int sent, ok;
1892
81e84650
AG
1893 ERR_IF(!h) return false;
1894 ERR_IF(!size) return false;
b411b363
PR
1895
1896 h->magic = BE_DRBD_MAGIC;
1897 h->command = cpu_to_be16(cmd);
0b70a13d 1898 h->length = cpu_to_be16(size-sizeof(struct p_header80));
b411b363 1899
b411b363
PR
1900 sent = drbd_send(mdev, sock, h, size, msg_flags);
1901
1902 ok = (sent == size);
0ddc5549
LE
1903 if (!ok && !signal_pending(current))
1904 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
b411b363
PR
1905 cmdname(cmd), (int)size, sent);
1906 return ok;
1907}
1908
1909/* don't pass the socket. we may only look at it
1910 * when we hold the appropriate socket mutex.
1911 */
1912int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
0b70a13d 1913 enum drbd_packets cmd, struct p_header80 *h, size_t size)
b411b363
PR
1914{
1915 int ok = 0;
1916 struct socket *sock;
1917
1918 if (use_data_socket) {
1919 mutex_lock(&mdev->data.mutex);
1920 sock = mdev->data.socket;
1921 } else {
1922 mutex_lock(&mdev->meta.mutex);
1923 sock = mdev->meta.socket;
1924 }
1925
1926 /* drbd_disconnect() could have called drbd_free_sock()
1927 * while we were waiting in down()... */
1928 if (likely(sock != NULL))
1929 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1930
1931 if (use_data_socket)
1932 mutex_unlock(&mdev->data.mutex);
1933 else
1934 mutex_unlock(&mdev->meta.mutex);
1935 return ok;
1936}
1937
1938int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1939 size_t size)
1940{
0b70a13d 1941 struct p_header80 h;
b411b363
PR
1942 int ok;
1943
1944 h.magic = BE_DRBD_MAGIC;
1945 h.command = cpu_to_be16(cmd);
1946 h.length = cpu_to_be16(size);
1947
1948 if (!drbd_get_data_sock(mdev))
1949 return 0;
1950
b411b363
PR
1951 ok = (sizeof(h) ==
1952 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1953 ok = ok && (size ==
1954 drbd_send(mdev, mdev->data.socket, data, size, 0));
1955
1956 drbd_put_data_sock(mdev);
1957
1958 return ok;
1959}
1960
1961int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1962{
8e26f9cc 1963 struct p_rs_param_95 *p;
b411b363
PR
1964 struct socket *sock;
1965 int size, rv;
1966 const int apv = mdev->agreed_pro_version;
1967
1968 size = apv <= 87 ? sizeof(struct p_rs_param)
1969 : apv == 88 ? sizeof(struct p_rs_param)
1970 + strlen(mdev->sync_conf.verify_alg) + 1
8e26f9cc
PR
1971 : apv <= 94 ? sizeof(struct p_rs_param_89)
1972 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363
PR
1973
1974 /* used from admin command context and receiver/worker context.
1975 * to avoid kmalloc, grab the socket right here,
1976 * then use the pre-allocated sbuf there */
1977 mutex_lock(&mdev->data.mutex);
1978 sock = mdev->data.socket;
1979
1980 if (likely(sock != NULL)) {
1981 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1982
8e26f9cc 1983 p = &mdev->data.sbuf.rs_param_95;
b411b363
PR
1984
1985 /* initialize verify_alg and csums_alg */
1986 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1987
1988 p->rate = cpu_to_be32(sc->rate);
8e26f9cc
PR
1989 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1990 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1991 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1992 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
b411b363
PR
1993
1994 if (apv >= 88)
1995 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1996 if (apv >= 89)
1997 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1998
1999 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
2000 } else
2001 rv = 0; /* not ok */
2002
2003 mutex_unlock(&mdev->data.mutex);
2004
2005 return rv;
2006}
2007
2008int drbd_send_protocol(struct drbd_conf *mdev)
2009{
2010 struct p_protocol *p;
cf14c2e9 2011 int size, cf, rv;
b411b363
PR
2012
2013 size = sizeof(struct p_protocol);
2014
2015 if (mdev->agreed_pro_version >= 87)
2016 size += strlen(mdev->net_conf->integrity_alg) + 1;
2017
2018 /* we must not recurse into our own queue,
2019 * as that is blocked during handshake */
2020 p = kmalloc(size, GFP_NOIO);
2021 if (p == NULL)
2022 return 0;
2023
2024 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
2025 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
2026 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
2027 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
b411b363
PR
2028 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
2029
cf14c2e9
PR
2030 cf = 0;
2031 if (mdev->net_conf->want_lose)
2032 cf |= CF_WANT_LOSE;
2033 if (mdev->net_conf->dry_run) {
2034 if (mdev->agreed_pro_version >= 92)
2035 cf |= CF_DRY_RUN;
2036 else {
2037 dev_err(DEV, "--dry-run is not supported by peer");
7ac314c8 2038 kfree(p);
148efa16 2039 return -1;
cf14c2e9
PR
2040 }
2041 }
2042 p->conn_flags = cpu_to_be32(cf);
2043
b411b363
PR
2044 if (mdev->agreed_pro_version >= 87)
2045 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2046
2047 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
0b70a13d 2048 (struct p_header80 *)p, size);
b411b363
PR
2049 kfree(p);
2050 return rv;
2051}
2052
2053int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2054{
2055 struct p_uuids p;
2056 int i;
2057
2058 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2059 return 1;
2060
2061 for (i = UI_CURRENT; i < UI_SIZE; i++)
2062 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2063
2064 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2065 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2066 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2067 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2068 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2069 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2070
2071 put_ldev(mdev);
2072
2073 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
0b70a13d 2074 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2075}
2076
2077int drbd_send_uuids(struct drbd_conf *mdev)
2078{
2079 return _drbd_send_uuids(mdev, 0);
2080}
2081
2082int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2083{
2084 return _drbd_send_uuids(mdev, 8);
2085}
2086
62b0da3a
LE
2087void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2088{
2089 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2090 u64 *uuid = mdev->ldev->md.uuid;
2091 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2092 text,
2093 (unsigned long long)uuid[UI_CURRENT],
2094 (unsigned long long)uuid[UI_BITMAP],
2095 (unsigned long long)uuid[UI_HISTORY_START],
2096 (unsigned long long)uuid[UI_HISTORY_END]);
2097 put_ldev(mdev);
2098 } else {
2099 dev_info(DEV, "%s effective data uuid: %016llX\n",
2100 text,
2101 (unsigned long long)mdev->ed_uuid);
2102 }
2103}
2104
5a22db89 2105int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363
PR
2106{
2107 struct p_rs_uuid p;
5a22db89
LE
2108 u64 uuid;
2109
2110 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 2111
4a23f264 2112 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
5a22db89 2113 drbd_uuid_set(mdev, UI_BITMAP, uuid);
62b0da3a 2114 drbd_print_uuids(mdev, "updated sync UUID");
5a22db89
LE
2115 drbd_md_sync(mdev);
2116 p.uuid = cpu_to_be64(uuid);
b411b363
PR
2117
2118 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
0b70a13d 2119 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2120}
2121
e89b591c 2122int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363
PR
2123{
2124 struct p_sizes p;
2125 sector_t d_size, u_size;
99432fcc 2126 int q_order_type, max_bio_size;
b411b363
PR
2127 int ok;
2128
2129 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2130 D_ASSERT(mdev->ldev->backing_bdev);
2131 d_size = drbd_get_max_capacity(mdev->ldev);
2132 u_size = mdev->ldev->dc.disk_size;
2133 q_order_type = drbd_queue_order_type(mdev);
99432fcc
PR
2134 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2135 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
b411b363
PR
2136 put_ldev(mdev);
2137 } else {
2138 d_size = 0;
2139 u_size = 0;
2140 q_order_type = QUEUE_ORDERED_NONE;
99432fcc 2141 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
b411b363
PR
2142 }
2143
6809384c
PR
2144 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2145 if (mdev->agreed_pro_version <= 94)
2146 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2147
b411b363
PR
2148 p.d_size = cpu_to_be64(d_size);
2149 p.u_size = cpu_to_be64(u_size);
2150 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
99432fcc 2151 p.max_bio_size = cpu_to_be32(max_bio_size);
e89b591c
PR
2152 p.queue_order_type = cpu_to_be16(q_order_type);
2153 p.dds_flags = cpu_to_be16(flags);
b411b363
PR
2154
2155 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
0b70a13d 2156 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2157 return ok;
2158}
2159
2160/**
2161 * drbd_send_state() - Sends the drbd state to the peer
2162 * @mdev: DRBD device.
2163 */
2164int drbd_send_state(struct drbd_conf *mdev)
2165{
2166 struct socket *sock;
2167 struct p_state p;
2168 int ok = 0;
2169
2170 /* Grab state lock so we wont send state if we're in the middle
2171 * of a cluster wide state change on another thread */
2172 drbd_state_lock(mdev);
2173
2174 mutex_lock(&mdev->data.mutex);
2175
2176 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2177 sock = mdev->data.socket;
2178
2179 if (likely(sock != NULL)) {
2180 ok = _drbd_send_cmd(mdev, sock, P_STATE,
0b70a13d 2181 (struct p_header80 *)&p, sizeof(p), 0);
b411b363
PR
2182 }
2183
2184 mutex_unlock(&mdev->data.mutex);
2185
2186 drbd_state_unlock(mdev);
2187 return ok;
2188}
2189
2190int drbd_send_state_req(struct drbd_conf *mdev,
2191 union drbd_state mask, union drbd_state val)
2192{
2193 struct p_req_state p;
2194
2195 p.mask = cpu_to_be32(mask.i);
2196 p.val = cpu_to_be32(val.i);
2197
2198 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
0b70a13d 2199 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2200}
2201
bf885f8a 2202int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363
PR
2203{
2204 struct p_req_state_reply p;
2205
2206 p.retcode = cpu_to_be32(retcode);
2207
2208 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
0b70a13d 2209 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2210}
2211
2212int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2213 struct p_compressed_bm *p,
2214 struct bm_xfer_ctx *c)
2215{
2216 struct bitstream bs;
2217 unsigned long plain_bits;
2218 unsigned long tmp;
2219 unsigned long rl;
2220 unsigned len;
2221 unsigned toggle;
2222 int bits;
2223
2224 /* may we use this feature? */
2225 if ((mdev->sync_conf.use_rle == 0) ||
2226 (mdev->agreed_pro_version < 90))
2227 return 0;
2228
2229 if (c->bit_offset >= c->bm_bits)
2230 return 0; /* nothing to do. */
2231
2232 /* use at most thus many bytes */
2233 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2234 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2235 /* plain bits covered in this code string */
2236 plain_bits = 0;
2237
2238 /* p->encoding & 0x80 stores whether the first run length is set.
2239 * bit offset is implicit.
2240 * start with toggle == 2 to be able to tell the first iteration */
2241 toggle = 2;
2242
2243 /* see how much plain bits we can stuff into one packet
2244 * using RLE and VLI. */
2245 do {
2246 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2247 : _drbd_bm_find_next(mdev, c->bit_offset);
2248 if (tmp == -1UL)
2249 tmp = c->bm_bits;
2250 rl = tmp - c->bit_offset;
2251
2252 if (toggle == 2) { /* first iteration */
2253 if (rl == 0) {
2254 /* the first checked bit was set,
2255 * store start value, */
2256 DCBP_set_start(p, 1);
2257 /* but skip encoding of zero run length */
2258 toggle = !toggle;
2259 continue;
2260 }
2261 DCBP_set_start(p, 0);
2262 }
2263
2264 /* paranoia: catch zero runlength.
2265 * can only happen if bitmap is modified while we scan it. */
2266 if (rl == 0) {
2267 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2268 "t:%u bo:%lu\n", toggle, c->bit_offset);
2269 return -1;
2270 }
2271
2272 bits = vli_encode_bits(&bs, rl);
2273 if (bits == -ENOBUFS) /* buffer full */
2274 break;
2275 if (bits <= 0) {
2276 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2277 return 0;
2278 }
2279
2280 toggle = !toggle;
2281 plain_bits += rl;
2282 c->bit_offset = tmp;
2283 } while (c->bit_offset < c->bm_bits);
2284
2285 len = bs.cur.b - p->code + !!bs.cur.bit;
2286
2287 if (plain_bits < (len << 3)) {
2288 /* incompressible with this method.
2289 * we need to rewind both word and bit position. */
2290 c->bit_offset -= plain_bits;
2291 bm_xfer_ctx_bit_to_word_offset(c);
2292 c->bit_offset = c->word_offset * BITS_PER_LONG;
2293 return 0;
2294 }
2295
2296 /* RLE + VLI was able to compress it just fine.
2297 * update c->word_offset. */
2298 bm_xfer_ctx_bit_to_word_offset(c);
2299
2300 /* store pad_bits */
2301 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2302
2303 return len;
2304}
2305
f70af118
AG
2306/**
2307 * send_bitmap_rle_or_plain
2308 *
2309 * Return 0 when done, 1 when another iteration is needed, and a negative error
2310 * code upon failure.
2311 */
2312static int
b411b363 2313send_bitmap_rle_or_plain(struct drbd_conf *mdev,
f70af118 2314 struct p_header80 *h, struct bm_xfer_ctx *c)
b411b363
PR
2315{
2316 struct p_compressed_bm *p = (void*)h;
2317 unsigned long num_words;
2318 int len;
2319 int ok;
2320
2321 len = fill_bitmap_rle_bits(mdev, p, c);
2322
2323 if (len < 0)
f70af118 2324 return -EIO;
b411b363
PR
2325
2326 if (len) {
2327 DCBP_set_code(p, RLE_VLI_Bits);
2328 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2329 sizeof(*p) + len, 0);
2330
2331 c->packets[0]++;
2332 c->bytes[0] += sizeof(*p) + len;
2333
2334 if (c->bit_offset >= c->bm_bits)
2335 len = 0; /* DONE */
2336 } else {
2337 /* was not compressible.
2338 * send a buffer full of plain text bits instead. */
2339 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2340 len = num_words * sizeof(long);
2341 if (len)
2342 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2343 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
0b70a13d 2344 h, sizeof(struct p_header80) + len, 0);
b411b363
PR
2345 c->word_offset += num_words;
2346 c->bit_offset = c->word_offset * BITS_PER_LONG;
2347
2348 c->packets[1]++;
0b70a13d 2349 c->bytes[1] += sizeof(struct p_header80) + len;
b411b363
PR
2350
2351 if (c->bit_offset > c->bm_bits)
2352 c->bit_offset = c->bm_bits;
2353 }
f70af118
AG
2354 if (ok) {
2355 if (len == 0) {
2356 INFO_bm_xfer_stats(mdev, "send", c);
2357 return 0;
2358 } else
2359 return 1;
2360 }
2361 return -EIO;
b411b363
PR
2362}
2363
2364/* See the comment at receive_bitmap() */
2365int _drbd_send_bitmap(struct drbd_conf *mdev)
2366{
2367 struct bm_xfer_ctx c;
0b70a13d 2368 struct p_header80 *p;
f70af118 2369 int err;
b411b363 2370
81e84650 2371 ERR_IF(!mdev->bitmap) return false;
b411b363
PR
2372
2373 /* maybe we should use some per thread scratch page,
2374 * and allocate that during initial device creation? */
0b70a13d 2375 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
b411b363
PR
2376 if (!p) {
2377 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
81e84650 2378 return false;
b411b363
PR
2379 }
2380
2381 if (get_ldev(mdev)) {
2382 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2383 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2384 drbd_bm_set_all(mdev);
2385 if (drbd_bm_write(mdev)) {
2386 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2387 * but otherwise process as per normal - need to tell other
2388 * side that a full resync is required! */
2389 dev_err(DEV, "Failed to write bitmap to disk!\n");
2390 } else {
2391 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2392 drbd_md_sync(mdev);
2393 }
2394 }
2395 put_ldev(mdev);
2396 }
2397
2398 c = (struct bm_xfer_ctx) {
2399 .bm_bits = drbd_bm_bits(mdev),
2400 .bm_words = drbd_bm_words(mdev),
2401 };
2402
2403 do {
f70af118
AG
2404 err = send_bitmap_rle_or_plain(mdev, p, &c);
2405 } while (err > 0);
b411b363
PR
2406
2407 free_page((unsigned long) p);
f70af118 2408 return err == 0;
b411b363
PR
2409}
2410
2411int drbd_send_bitmap(struct drbd_conf *mdev)
2412{
2413 int err;
2414
2415 if (!drbd_get_data_sock(mdev))
2416 return -1;
2417 err = !_drbd_send_bitmap(mdev);
2418 drbd_put_data_sock(mdev);
2419 return err;
2420}
2421
2422int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2423{
2424 int ok;
2425 struct p_barrier_ack p;
2426
2427 p.barrier = barrier_nr;
2428 p.set_size = cpu_to_be32(set_size);
2429
2430 if (mdev->state.conn < C_CONNECTED)
81e84650 2431 return false;
b411b363 2432 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
0b70a13d 2433 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2434 return ok;
2435}
2436
2437/**
2438 * _drbd_send_ack() - Sends an ack packet
2439 * @mdev: DRBD device.
2440 * @cmd: Packet command code.
2441 * @sector: sector, needs to be in big endian byte order
2442 * @blksize: size in byte, needs to be in big endian byte order
2443 * @block_id: Id, big endian byte order
2444 */
2445static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2446 u64 sector,
2447 u32 blksize,
2448 u64 block_id)
2449{
2450 int ok;
2451 struct p_block_ack p;
2452
2453 p.sector = sector;
2454 p.block_id = block_id;
2455 p.blksize = blksize;
2456 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2457
2458 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
81e84650 2459 return false;
b411b363 2460 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
0b70a13d 2461 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2462 return ok;
2463}
2464
2b2bf214
LE
2465/* dp->sector and dp->block_id already/still in network byte order,
2466 * data_size is payload size according to dp->head,
2467 * and may need to be corrected for digest size. */
b411b363 2468int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2b2bf214 2469 struct p_data *dp, int data_size)
b411b363 2470{
2b2bf214
LE
2471 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2472 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
b411b363
PR
2473 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2474 dp->block_id);
2475}
2476
2477int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2478 struct p_block_req *rp)
2479{
2480 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2481}
2482
2483/**
2484 * drbd_send_ack() - Sends an ack packet
2485 * @mdev: DRBD device.
2486 * @cmd: Packet command code.
2487 * @e: Epoch entry.
2488 */
2489int drbd_send_ack(struct drbd_conf *mdev,
2490 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2491{
2492 return _drbd_send_ack(mdev, cmd,
2493 cpu_to_be64(e->sector),
2494 cpu_to_be32(e->size),
2495 e->block_id);
2496}
2497
2498/* This function misuses the block_id field to signal if the blocks
2499 * are is sync or not. */
2500int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2501 sector_t sector, int blksize, u64 block_id)
2502{
2503 return _drbd_send_ack(mdev, cmd,
2504 cpu_to_be64(sector),
2505 cpu_to_be32(blksize),
2506 cpu_to_be64(block_id));
2507}
2508
2509int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2510 sector_t sector, int size, u64 block_id)
2511{
2512 int ok;
2513 struct p_block_req p;
2514
2515 p.sector = cpu_to_be64(sector);
2516 p.block_id = block_id;
2517 p.blksize = cpu_to_be32(size);
2518
2519 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
0b70a13d 2520 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2521 return ok;
2522}
2523
2524int drbd_send_drequest_csum(struct drbd_conf *mdev,
2525 sector_t sector, int size,
2526 void *digest, int digest_size,
2527 enum drbd_packets cmd)
2528{
2529 int ok;
2530 struct p_block_req p;
2531
2532 p.sector = cpu_to_be64(sector);
2533 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2534 p.blksize = cpu_to_be32(size);
2535
2536 p.head.magic = BE_DRBD_MAGIC;
2537 p.head.command = cpu_to_be16(cmd);
0b70a13d 2538 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
b411b363
PR
2539
2540 mutex_lock(&mdev->data.mutex);
2541
2542 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2543 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2544
2545 mutex_unlock(&mdev->data.mutex);
2546
2547 return ok;
2548}
2549
2550int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2551{
2552 int ok;
2553 struct p_block_req p;
2554
2555 p.sector = cpu_to_be64(sector);
2556 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2557 p.blksize = cpu_to_be32(size);
2558
2559 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
0b70a13d 2560 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2561 return ok;
2562}
2563
2564/* called on sndtimeo
81e84650
AG
2565 * returns false if we should retry,
2566 * true if we think connection is dead
b411b363
PR
2567 */
2568static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2569{
2570 int drop_it;
2571 /* long elapsed = (long)(jiffies - mdev->last_received); */
2572
2573 drop_it = mdev->meta.socket == sock
2574 || !mdev->asender.task
2575 || get_t_state(&mdev->asender) != Running
2576 || mdev->state.conn < C_CONNECTED;
2577
2578 if (drop_it)
81e84650 2579 return true;
b411b363
PR
2580
2581 drop_it = !--mdev->ko_count;
2582 if (!drop_it) {
2583 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2584 current->comm, current->pid, mdev->ko_count);
2585 request_ping(mdev);
2586 }
2587
2588 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2589}
2590
2591/* The idea of sendpage seems to be to put some kind of reference
2592 * to the page into the skb, and to hand it over to the NIC. In
2593 * this process get_page() gets called.
2594 *
2595 * As soon as the page was really sent over the network put_page()
2596 * gets called by some part of the network layer. [ NIC driver? ]
2597 *
2598 * [ get_page() / put_page() increment/decrement the count. If count
2599 * reaches 0 the page will be freed. ]
2600 *
2601 * This works nicely with pages from FSs.
2602 * But this means that in protocol A we might signal IO completion too early!
2603 *
2604 * In order not to corrupt data during a resync we must make sure
2605 * that we do not reuse our own buffer pages (EEs) to early, therefore
2606 * we have the net_ee list.
2607 *
2608 * XFS seems to have problems, still, it submits pages with page_count == 0!
2609 * As a workaround, we disable sendpage on pages
2610 * with page_count == 0 or PageSlab.
2611 */
2612static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2613 int offset, size_t size, unsigned msg_flags)
b411b363 2614{
ba11ad9a 2615 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
b411b363
PR
2616 kunmap(page);
2617 if (sent == size)
2618 mdev->send_cnt += size>>9;
2619 return sent == size;
2620}
2621
2622static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2623 int offset, size_t size, unsigned msg_flags)
b411b363
PR
2624{
2625 mm_segment_t oldfs = get_fs();
2626 int sent, ok;
2627 int len = size;
2628
2629 /* e.g. XFS meta- & log-data is in slab pages, which have a
2630 * page_count of 0 and/or have PageSlab() set.
2631 * we cannot use send_page for those, as that does get_page();
2632 * put_page(); and would cause either a VM_BUG directly, or
2633 * __page_cache_release a page that would actually still be referenced
2634 * by someone, leading to some obscure delayed Oops somewhere else. */
2635 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
ba11ad9a 2636 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 2637
ba11ad9a 2638 msg_flags |= MSG_NOSIGNAL;
b411b363
PR
2639 drbd_update_congested(mdev);
2640 set_fs(KERNEL_DS);
2641 do {
2642 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2643 offset, len,
ba11ad9a 2644 msg_flags);
b411b363
PR
2645 if (sent == -EAGAIN) {
2646 if (we_should_drop_the_connection(mdev,
2647 mdev->data.socket))
2648 break;
2649 else
2650 continue;
2651 }
2652 if (sent <= 0) {
2653 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2654 __func__, (int)size, len, sent);
2655 break;
2656 }
2657 len -= sent;
2658 offset += sent;
2659 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2660 set_fs(oldfs);
2661 clear_bit(NET_CONGESTED, &mdev->flags);
2662
2663 ok = (len == 0);
2664 if (likely(ok))
2665 mdev->send_cnt += size>>9;
2666 return ok;
2667}
2668
2669static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2670{
2671 struct bio_vec *bvec;
2672 int i;
ba11ad9a 2673 /* hint all but last page with MSG_MORE */
b411b363
PR
2674 __bio_for_each_segment(bvec, bio, i, 0) {
2675 if (!_drbd_no_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2676 bvec->bv_offset, bvec->bv_len,
2677 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2678 return 0;
2679 }
2680 return 1;
2681}
2682
2683static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2684{
2685 struct bio_vec *bvec;
2686 int i;
ba11ad9a 2687 /* hint all but last page with MSG_MORE */
b411b363
PR
2688 __bio_for_each_segment(bvec, bio, i, 0) {
2689 if (!_drbd_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2690 bvec->bv_offset, bvec->bv_len,
2691 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2692 return 0;
2693 }
b411b363
PR
2694 return 1;
2695}
2696
45bb912b
LE
2697static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2698{
2699 struct page *page = e->pages;
2700 unsigned len = e->size;
ba11ad9a 2701 /* hint all but last page with MSG_MORE */
45bb912b
LE
2702 page_chain_for_each(page) {
2703 unsigned l = min_t(unsigned, len, PAGE_SIZE);
ba11ad9a
LE
2704 if (!_drbd_send_page(mdev, page, 0, l,
2705 page_chain_next(page) ? MSG_MORE : 0))
45bb912b
LE
2706 return 0;
2707 len -= l;
2708 }
2709 return 1;
2710}
2711
76d2e7ec
PR
2712static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2713{
2714 if (mdev->agreed_pro_version >= 95)
2715 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
2716 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2717 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2718 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2719 else
721a9602 2720 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
2721}
2722
b411b363
PR
2723/* Used to send write requests
2724 * R_PRIMARY -> Peer (P_DATA)
2725 */
2726int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2727{
2728 int ok = 1;
2729 struct p_data p;
2730 unsigned int dp_flags = 0;
2731 void *dgb;
2732 int dgs;
2733
2734 if (!drbd_get_data_sock(mdev))
2735 return 0;
2736
2737 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2738 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2739
d5373389 2740 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2741 p.head.h80.magic = BE_DRBD_MAGIC;
2742 p.head.h80.command = cpu_to_be16(P_DATA);
2743 p.head.h80.length =
2744 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2745 } else {
2746 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2747 p.head.h95.command = cpu_to_be16(P_DATA);
2748 p.head.h95.length =
2749 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2750 }
b411b363
PR
2751
2752 p.sector = cpu_to_be64(req->sector);
2753 p.block_id = (unsigned long)req;
2754 p.seq_num = cpu_to_be32(req->seq_num =
2755 atomic_add_return(1, &mdev->packet_seq));
b411b363 2756
76d2e7ec
PR
2757 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2758
b411b363
PR
2759 if (mdev->state.conn >= C_SYNC_SOURCE &&
2760 mdev->state.conn <= C_PAUSED_SYNC_T)
2761 dp_flags |= DP_MAY_SET_IN_SYNC;
2762
2763 p.dp_flags = cpu_to_be32(dp_flags);
b411b363
PR
2764 set_bit(UNPLUG_REMOTE, &mdev->flags);
2765 ok = (sizeof(p) ==
ba11ad9a 2766 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
b411b363
PR
2767 if (ok && dgs) {
2768 dgb = mdev->int_dig_out;
45bb912b 2769 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
cab2f74b 2770 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2771 }
2772 if (ok) {
470be44a
LE
2773 /* For protocol A, we have to memcpy the payload into
2774 * socket buffers, as we may complete right away
2775 * as soon as we handed it over to tcp, at which point the data
2776 * pages may become invalid.
2777 *
2778 * For data-integrity enabled, we copy it as well, so we can be
2779 * sure that even if the bio pages may still be modified, it
2780 * won't change the data on the wire, thus if the digest checks
2781 * out ok after sending on this side, but does not fit on the
2782 * receiving side, we sure have detected corruption elsewhere.
2783 */
2784 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
b411b363
PR
2785 ok = _drbd_send_bio(mdev, req->master_bio);
2786 else
2787 ok = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
2788
2789 /* double check digest, sometimes buffers have been modified in flight. */
2790 if (dgs > 0 && dgs <= 64) {
24c4830c 2791 /* 64 byte, 512 bit, is the largest digest size
470be44a
LE
2792 * currently supported in kernel crypto. */
2793 unsigned char digest[64];
2794 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2795 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2796 dev_warn(DEV,
2797 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2798 (unsigned long long)req->sector, req->size);
2799 }
2800 } /* else if (dgs > 64) {
2801 ... Be noisy about digest too large ...
2802 } */
b411b363
PR
2803 }
2804
2805 drbd_put_data_sock(mdev);
bd26bfc5 2806
b411b363
PR
2807 return ok;
2808}
2809
2810/* answer packet, used to send data back for read requests:
2811 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2812 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2813 */
2814int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2815 struct drbd_epoch_entry *e)
2816{
2817 int ok;
2818 struct p_data p;
2819 void *dgb;
2820 int dgs;
2821
2822 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2823 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2824
d5373389 2825 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2826 p.head.h80.magic = BE_DRBD_MAGIC;
2827 p.head.h80.command = cpu_to_be16(cmd);
2828 p.head.h80.length =
2829 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2830 } else {
2831 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2832 p.head.h95.command = cpu_to_be16(cmd);
2833 p.head.h95.length =
2834 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2835 }
b411b363
PR
2836
2837 p.sector = cpu_to_be64(e->sector);
2838 p.block_id = e->block_id;
2839 /* p.seq_num = 0; No sequence numbers here.. */
2840
2841 /* Only called by our kernel thread.
2842 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2843 * in response to admin command or module unload.
2844 */
2845 if (!drbd_get_data_sock(mdev))
2846 return 0;
2847
0b70a13d 2848 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
b411b363
PR
2849 if (ok && dgs) {
2850 dgb = mdev->int_dig_out;
45bb912b 2851 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
cab2f74b 2852 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2853 }
2854 if (ok)
45bb912b 2855 ok = _drbd_send_zc_ee(mdev, e);
b411b363
PR
2856
2857 drbd_put_data_sock(mdev);
bd26bfc5 2858
b411b363
PR
2859 return ok;
2860}
2861
73a01a18
PR
2862int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2863{
2864 struct p_block_desc p;
2865
2866 p.sector = cpu_to_be64(req->sector);
2867 p.blksize = cpu_to_be32(req->size);
2868
2869 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2870}
2871
b411b363
PR
2872/*
2873 drbd_send distinguishes two cases:
2874
2875 Packets sent via the data socket "sock"
2876 and packets sent via the meta data socket "msock"
2877
2878 sock msock
2879 -----------------+-------------------------+------------------------------
2880 timeout conf.timeout / 2 conf.timeout / 2
2881 timeout action send a ping via msock Abort communication
2882 and close all sockets
2883*/
2884
2885/*
2886 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2887 */
2888int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2889 void *buf, size_t size, unsigned msg_flags)
2890{
2891 struct kvec iov;
2892 struct msghdr msg;
2893 int rv, sent = 0;
2894
2895 if (!sock)
2896 return -1000;
2897
2898 /* THINK if (signal_pending) return ... ? */
2899
2900 iov.iov_base = buf;
2901 iov.iov_len = size;
2902
2903 msg.msg_name = NULL;
2904 msg.msg_namelen = 0;
2905 msg.msg_control = NULL;
2906 msg.msg_controllen = 0;
2907 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2908
2909 if (sock == mdev->data.socket) {
2910 mdev->ko_count = mdev->net_conf->ko_count;
2911 drbd_update_congested(mdev);
2912 }
2913 do {
2914 /* STRANGE
2915 * tcp_sendmsg does _not_ use its size parameter at all ?
2916 *
2917 * -EAGAIN on timeout, -EINTR on signal.
2918 */
2919/* THINK
2920 * do we need to block DRBD_SIG if sock == &meta.socket ??
2921 * otherwise wake_asender() might interrupt some send_*Ack !
2922 */
2923 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2924 if (rv == -EAGAIN) {
2925 if (we_should_drop_the_connection(mdev, sock))
2926 break;
2927 else
2928 continue;
2929 }
2930 D_ASSERT(rv != 0);
2931 if (rv == -EINTR) {
2932 flush_signals(current);
2933 rv = 0;
2934 }
2935 if (rv < 0)
2936 break;
2937 sent += rv;
2938 iov.iov_base += rv;
2939 iov.iov_len -= rv;
2940 } while (sent < size);
2941
2942 if (sock == mdev->data.socket)
2943 clear_bit(NET_CONGESTED, &mdev->flags);
2944
2945 if (rv <= 0) {
2946 if (rv != -EAGAIN) {
2947 dev_err(DEV, "%s_sendmsg returned %d\n",
2948 sock == mdev->meta.socket ? "msock" : "sock",
2949 rv);
2950 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2951 } else
2952 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2953 }
2954
2955 return sent;
2956}
2957
2958static int drbd_open(struct block_device *bdev, fmode_t mode)
2959{
2960 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2961 unsigned long flags;
2962 int rv = 0;
2963
2a48fc0a 2964 mutex_lock(&drbd_main_mutex);
b411b363
PR
2965 spin_lock_irqsave(&mdev->req_lock, flags);
2966 /* to have a stable mdev->state.role
2967 * and no race with updating open_cnt */
2968
2969 if (mdev->state.role != R_PRIMARY) {
2970 if (mode & FMODE_WRITE)
2971 rv = -EROFS;
2972 else if (!allow_oos)
2973 rv = -EMEDIUMTYPE;
2974 }
2975
2976 if (!rv)
2977 mdev->open_cnt++;
2978 spin_unlock_irqrestore(&mdev->req_lock, flags);
2a48fc0a 2979 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2980
2981 return rv;
2982}
2983
2984static int drbd_release(struct gendisk *gd, fmode_t mode)
2985{
2986 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 2987 mutex_lock(&drbd_main_mutex);
b411b363 2988 mdev->open_cnt--;
2a48fc0a 2989 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2990 return 0;
2991}
2992
b411b363
PR
2993static void drbd_set_defaults(struct drbd_conf *mdev)
2994{
85f4cc17
PR
2995 /* This way we get a compile error when sync_conf grows,
2996 and we forgot to initialize it here */
2997 mdev->sync_conf = (struct syncer_conf) {
2998 /* .rate = */ DRBD_RATE_DEF,
2999 /* .after = */ DRBD_AFTER_DEF,
3000 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
85f4cc17
PR
3001 /* .verify_alg = */ {}, 0,
3002 /* .cpu_mask = */ {}, 0,
3003 /* .csums_alg = */ {}, 0,
e756414f 3004 /* .use_rle = */ 0,
9a31d716
PR
3005 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
3006 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
3007 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
3008 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
0f0601f4
LE
3009 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
3010 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
85f4cc17
PR
3011 };
3012
3013 /* Have to use that way, because the layout differs between
3014 big endian and little endian */
b411b363
PR
3015 mdev->state = (union drbd_state) {
3016 { .role = R_SECONDARY,
3017 .peer = R_UNKNOWN,
3018 .conn = C_STANDALONE,
3019 .disk = D_DISKLESS,
3020 .pdsk = D_UNKNOWN,
fb22c402
PR
3021 .susp = 0,
3022 .susp_nod = 0,
3023 .susp_fen = 0
b411b363
PR
3024 } };
3025}
3026
3027void drbd_init_set_defaults(struct drbd_conf *mdev)
3028{
3029 /* the memset(,0,) did most of this.
3030 * note: only assignments, no allocation in here */
3031
3032 drbd_set_defaults(mdev);
3033
b411b363
PR
3034 atomic_set(&mdev->ap_bio_cnt, 0);
3035 atomic_set(&mdev->ap_pending_cnt, 0);
3036 atomic_set(&mdev->rs_pending_cnt, 0);
3037 atomic_set(&mdev->unacked_cnt, 0);
3038 atomic_set(&mdev->local_cnt, 0);
3039 atomic_set(&mdev->net_cnt, 0);
3040 atomic_set(&mdev->packet_seq, 0);
3041 atomic_set(&mdev->pp_in_use, 0);
435f0740 3042 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 3043 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 3044 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 3045 atomic_set(&mdev->ap_in_flight, 0);
b411b363
PR
3046
3047 mutex_init(&mdev->md_io_mutex);
3048 mutex_init(&mdev->data.mutex);
3049 mutex_init(&mdev->meta.mutex);
3050 sema_init(&mdev->data.work.s, 0);
3051 sema_init(&mdev->meta.work.s, 0);
3052 mutex_init(&mdev->state_mutex);
3053
3054 spin_lock_init(&mdev->data.work.q_lock);
3055 spin_lock_init(&mdev->meta.work.q_lock);
3056
3057 spin_lock_init(&mdev->al_lock);
3058 spin_lock_init(&mdev->req_lock);
3059 spin_lock_init(&mdev->peer_seq_lock);
3060 spin_lock_init(&mdev->epoch_lock);
3061
3062 INIT_LIST_HEAD(&mdev->active_ee);
3063 INIT_LIST_HEAD(&mdev->sync_ee);
3064 INIT_LIST_HEAD(&mdev->done_ee);
3065 INIT_LIST_HEAD(&mdev->read_ee);
3066 INIT_LIST_HEAD(&mdev->net_ee);
3067 INIT_LIST_HEAD(&mdev->resync_reads);
3068 INIT_LIST_HEAD(&mdev->data.work.q);
3069 INIT_LIST_HEAD(&mdev->meta.work.q);
3070 INIT_LIST_HEAD(&mdev->resync_work.list);
3071 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 3072 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 3073 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 3074 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 3075 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 3076
794abb75 3077 mdev->resync_work.cb = w_resync_timer;
b411b363 3078 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 3079 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
3080 mdev->md_sync_work.cb = w_md_sync;
3081 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 3082 mdev->start_resync_work.cb = w_start_resync;
b411b363
PR
3083 init_timer(&mdev->resync_timer);
3084 init_timer(&mdev->md_sync_timer);
370a43e7 3085 init_timer(&mdev->start_resync_timer);
7fde2be9 3086 init_timer(&mdev->request_timer);
b411b363
PR
3087 mdev->resync_timer.function = resync_timer_fn;
3088 mdev->resync_timer.data = (unsigned long) mdev;
3089 mdev->md_sync_timer.function = md_sync_timer_fn;
3090 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
3091 mdev->start_resync_timer.function = start_resync_timer_fn;
3092 mdev->start_resync_timer.data = (unsigned long) mdev;
7fde2be9
PR
3093 mdev->request_timer.function = request_timer_fn;
3094 mdev->request_timer.data = (unsigned long) mdev;
b411b363
PR
3095
3096 init_waitqueue_head(&mdev->misc_wait);
3097 init_waitqueue_head(&mdev->state_wait);
84dfb9f5 3098 init_waitqueue_head(&mdev->net_cnt_wait);
b411b363
PR
3099 init_waitqueue_head(&mdev->ee_wait);
3100 init_waitqueue_head(&mdev->al_wait);
3101 init_waitqueue_head(&mdev->seq_wait);
3102
3103 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3104 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3105 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3106
3107 mdev->agreed_pro_version = PRO_VERSION_MAX;
2451fc3b 3108 mdev->write_ordering = WO_bdev_flush;
b411b363 3109 mdev->resync_wenr = LC_FREE;
99432fcc
PR
3110 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3111 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
b411b363
PR
3112}
3113
3114void drbd_mdev_cleanup(struct drbd_conf *mdev)
3115{
1d7734a0 3116 int i;
b411b363
PR
3117 if (mdev->receiver.t_state != None)
3118 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3119 mdev->receiver.t_state);
3120
3121 /* no need to lock it, I'm the only thread alive */
3122 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3123 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3124 mdev->al_writ_cnt =
3125 mdev->bm_writ_cnt =
3126 mdev->read_cnt =
3127 mdev->recv_cnt =
3128 mdev->send_cnt =
3129 mdev->writ_cnt =
3130 mdev->p_size =
3131 mdev->rs_start =
3132 mdev->rs_total =
1d7734a0
LE
3133 mdev->rs_failed = 0;
3134 mdev->rs_last_events = 0;
0f0601f4 3135 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
3136 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3137 mdev->rs_mark_left[i] = 0;
3138 mdev->rs_mark_time[i] = 0;
3139 }
b411b363
PR
3140 D_ASSERT(mdev->net_conf == NULL);
3141
3142 drbd_set_my_capacity(mdev, 0);
3143 if (mdev->bitmap) {
3144 /* maybe never allocated. */
02d9a94b 3145 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
3146 drbd_bm_cleanup(mdev);
3147 }
3148
3149 drbd_free_resources(mdev);
0778286a 3150 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363
PR
3151
3152 /*
3153 * currently we drbd_init_ee only on module load, so
3154 * we may do drbd_release_ee only on module unload!
3155 */
3156 D_ASSERT(list_empty(&mdev->active_ee));
3157 D_ASSERT(list_empty(&mdev->sync_ee));
3158 D_ASSERT(list_empty(&mdev->done_ee));
3159 D_ASSERT(list_empty(&mdev->read_ee));
3160 D_ASSERT(list_empty(&mdev->net_ee));
3161 D_ASSERT(list_empty(&mdev->resync_reads));
3162 D_ASSERT(list_empty(&mdev->data.work.q));
3163 D_ASSERT(list_empty(&mdev->meta.work.q));
3164 D_ASSERT(list_empty(&mdev->resync_work.list));
3165 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 3166 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
3167
3168 drbd_set_defaults(mdev);
b411b363
PR
3169}
3170
3171
3172static void drbd_destroy_mempools(void)
3173{
3174 struct page *page;
3175
3176 while (drbd_pp_pool) {
3177 page = drbd_pp_pool;
3178 drbd_pp_pool = (struct page *)page_private(page);
3179 __free_page(page);
3180 drbd_pp_vacant--;
3181 }
3182
3183 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3184
3185 if (drbd_ee_mempool)
3186 mempool_destroy(drbd_ee_mempool);
3187 if (drbd_request_mempool)
3188 mempool_destroy(drbd_request_mempool);
3189 if (drbd_ee_cache)
3190 kmem_cache_destroy(drbd_ee_cache);
3191 if (drbd_request_cache)
3192 kmem_cache_destroy(drbd_request_cache);
3193 if (drbd_bm_ext_cache)
3194 kmem_cache_destroy(drbd_bm_ext_cache);
3195 if (drbd_al_ext_cache)
3196 kmem_cache_destroy(drbd_al_ext_cache);
3197
3198 drbd_ee_mempool = NULL;
3199 drbd_request_mempool = NULL;
3200 drbd_ee_cache = NULL;
3201 drbd_request_cache = NULL;
3202 drbd_bm_ext_cache = NULL;
3203 drbd_al_ext_cache = NULL;
3204
3205 return;
3206}
3207
3208static int drbd_create_mempools(void)
3209{
3210 struct page *page;
1816a2b4 3211 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
3212 int i;
3213
3214 /* prepare our caches and mempools */
3215 drbd_request_mempool = NULL;
3216 drbd_ee_cache = NULL;
3217 drbd_request_cache = NULL;
3218 drbd_bm_ext_cache = NULL;
3219 drbd_al_ext_cache = NULL;
3220 drbd_pp_pool = NULL;
3221
3222 /* caches */
3223 drbd_request_cache = kmem_cache_create(
3224 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3225 if (drbd_request_cache == NULL)
3226 goto Enomem;
3227
3228 drbd_ee_cache = kmem_cache_create(
3229 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3230 if (drbd_ee_cache == NULL)
3231 goto Enomem;
3232
3233 drbd_bm_ext_cache = kmem_cache_create(
3234 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3235 if (drbd_bm_ext_cache == NULL)
3236 goto Enomem;
3237
3238 drbd_al_ext_cache = kmem_cache_create(
3239 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3240 if (drbd_al_ext_cache == NULL)
3241 goto Enomem;
3242
3243 /* mempools */
3244 drbd_request_mempool = mempool_create(number,
3245 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3246 if (drbd_request_mempool == NULL)
3247 goto Enomem;
3248
3249 drbd_ee_mempool = mempool_create(number,
3250 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 3251 if (drbd_ee_mempool == NULL)
b411b363
PR
3252 goto Enomem;
3253
3254 /* drbd's page pool */
3255 spin_lock_init(&drbd_pp_lock);
3256
3257 for (i = 0; i < number; i++) {
3258 page = alloc_page(GFP_HIGHUSER);
3259 if (!page)
3260 goto Enomem;
3261 set_page_private(page, (unsigned long)drbd_pp_pool);
3262 drbd_pp_pool = page;
3263 }
3264 drbd_pp_vacant = number;
3265
3266 return 0;
3267
3268Enomem:
3269 drbd_destroy_mempools(); /* in case we allocated some */
3270 return -ENOMEM;
3271}
3272
3273static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3274 void *unused)
3275{
3276 /* just so we have it. you never know what interesting things we
3277 * might want to do here some day...
3278 */
3279
3280 return NOTIFY_DONE;
3281}
3282
3283static struct notifier_block drbd_notifier = {
3284 .notifier_call = drbd_notify_sys,
3285};
3286
3287static void drbd_release_ee_lists(struct drbd_conf *mdev)
3288{
3289 int rr;
3290
3291 rr = drbd_release_ee(mdev, &mdev->active_ee);
3292 if (rr)
3293 dev_err(DEV, "%d EEs in active list found!\n", rr);
3294
3295 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3296 if (rr)
3297 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3298
3299 rr = drbd_release_ee(mdev, &mdev->read_ee);
3300 if (rr)
3301 dev_err(DEV, "%d EEs in read list found!\n", rr);
3302
3303 rr = drbd_release_ee(mdev, &mdev->done_ee);
3304 if (rr)
3305 dev_err(DEV, "%d EEs in done list found!\n", rr);
3306
3307 rr = drbd_release_ee(mdev, &mdev->net_ee);
3308 if (rr)
3309 dev_err(DEV, "%d EEs in net list found!\n", rr);
3310}
3311
3312/* caution. no locking.
3313 * currently only used from module cleanup code. */
3314static void drbd_delete_device(unsigned int minor)
3315{
3316 struct drbd_conf *mdev = minor_to_mdev(minor);
3317
3318 if (!mdev)
3319 return;
3320
3321 /* paranoia asserts */
3322 if (mdev->open_cnt != 0)
3323 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3324 __FILE__ , __LINE__);
3325
3326 ERR_IF (!list_empty(&mdev->data.work.q)) {
3327 struct list_head *lp;
3328 list_for_each(lp, &mdev->data.work.q) {
3329 dev_err(DEV, "lp = %p\n", lp);
3330 }
3331 };
3332 /* end paranoia asserts */
3333
3334 del_gendisk(mdev->vdisk);
3335
3336 /* cleanup stuff that may have been allocated during
3337 * device (re-)configuration or state changes */
3338
3339 if (mdev->this_bdev)
3340 bdput(mdev->this_bdev);
3341
3342 drbd_free_resources(mdev);
3343
3344 drbd_release_ee_lists(mdev);
3345
24c4830c 3346 /* should be freed on disconnect? */
b411b363
PR
3347 kfree(mdev->ee_hash);
3348 /*
3349 mdev->ee_hash_s = 0;
3350 mdev->ee_hash = NULL;
3351 */
3352
3353 lc_destroy(mdev->act_log);
3354 lc_destroy(mdev->resync);
3355
3356 kfree(mdev->p_uuid);
3357 /* mdev->p_uuid = NULL; */
3358
3359 kfree(mdev->int_dig_out);
3360 kfree(mdev->int_dig_in);
3361 kfree(mdev->int_dig_vv);
3362
3363 /* cleanup the rest that has been
3364 * allocated from drbd_new_device
3365 * and actually free the mdev itself */
3366 drbd_free_mdev(mdev);
3367}
3368
3369static void drbd_cleanup(void)
3370{
3371 unsigned int i;
3372
3373 unregister_reboot_notifier(&drbd_notifier);
3374
17a93f30
LE
3375 /* first remove proc,
3376 * drbdsetup uses it's presence to detect
3377 * whether DRBD is loaded.
3378 * If we would get stuck in proc removal,
3379 * but have netlink already deregistered,
3380 * some drbdsetup commands may wait forever
3381 * for an answer.
3382 */
3383 if (drbd_proc)
3384 remove_proc_entry("drbd", NULL);
3385
b411b363
PR
3386 drbd_nl_cleanup();
3387
3388 if (minor_table) {
b411b363
PR
3389 i = minor_count;
3390 while (i--)
3391 drbd_delete_device(i);
3392 drbd_destroy_mempools();
3393 }
3394
3395 kfree(minor_table);
3396
3397 unregister_blkdev(DRBD_MAJOR, "drbd");
3398
3399 printk(KERN_INFO "drbd: module cleanup done.\n");
3400}
3401
3402/**
3403 * drbd_congested() - Callback for pdflush
3404 * @congested_data: User data
3405 * @bdi_bits: Bits pdflush is currently interested in
3406 *
3407 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3408 */
3409static int drbd_congested(void *congested_data, int bdi_bits)
3410{
3411 struct drbd_conf *mdev = congested_data;
3412 struct request_queue *q;
3413 char reason = '-';
3414 int r = 0;
3415
1b881ef7 3416 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
3417 /* DRBD has frozen IO */
3418 r = bdi_bits;
3419 reason = 'd';
3420 goto out;
3421 }
3422
3423 if (get_ldev(mdev)) {
3424 q = bdev_get_queue(mdev->ldev->backing_bdev);
3425 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3426 put_ldev(mdev);
3427 if (r)
3428 reason = 'b';
3429 }
3430
3431 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3432 r |= (1 << BDI_async_congested);
3433 reason = reason == 'b' ? 'a' : 'n';
3434 }
3435
3436out:
3437 mdev->congestion_reason = reason;
3438 return r;
3439}
3440
3441struct drbd_conf *drbd_new_device(unsigned int minor)
3442{
3443 struct drbd_conf *mdev;
3444 struct gendisk *disk;
3445 struct request_queue *q;
3446
3447 /* GFP_KERNEL, we are outside of all write-out paths */
3448 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3449 if (!mdev)
3450 return NULL;
3451 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3452 goto out_no_cpumask;
3453
3454 mdev->minor = minor;
3455
3456 drbd_init_set_defaults(mdev);
3457
3458 q = blk_alloc_queue(GFP_KERNEL);
3459 if (!q)
3460 goto out_no_q;
3461 mdev->rq_queue = q;
3462 q->queuedata = mdev;
b411b363
PR
3463
3464 disk = alloc_disk(1);
3465 if (!disk)
3466 goto out_no_disk;
3467 mdev->vdisk = disk;
3468
81e84650 3469 set_disk_ro(disk, true);
b411b363
PR
3470
3471 disk->queue = q;
3472 disk->major = DRBD_MAJOR;
3473 disk->first_minor = minor;
3474 disk->fops = &drbd_ops;
3475 sprintf(disk->disk_name, "drbd%d", minor);
3476 disk->private_data = mdev;
3477
3478 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3479 /* we have no partitions. we contain only ourselves. */
3480 mdev->this_bdev->bd_contains = mdev->this_bdev;
3481
3482 q->backing_dev_info.congested_fn = drbd_congested;
3483 q->backing_dev_info.congested_data = mdev;
3484
2f58dcfc 3485 blk_queue_make_request(q, drbd_make_request);
99432fcc
PR
3486 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3487 This triggers a max_bio_size message upon first attach or connect */
3488 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
b411b363
PR
3489 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3490 blk_queue_merge_bvec(q, drbd_merge_bvec);
7eaceacc 3491 q->queue_lock = &mdev->req_lock;
b411b363
PR
3492
3493 mdev->md_io_page = alloc_page(GFP_KERNEL);
3494 if (!mdev->md_io_page)
3495 goto out_no_io_page;
3496
3497 if (drbd_bm_init(mdev))
3498 goto out_no_bitmap;
3499 /* no need to lock access, we are still initializing this minor device. */
3500 if (!tl_init(mdev))
3501 goto out_no_tl;
3502
3503 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3504 if (!mdev->app_reads_hash)
3505 goto out_no_app_reads;
3506
3507 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3508 if (!mdev->current_epoch)
3509 goto out_no_epoch;
3510
3511 INIT_LIST_HEAD(&mdev->current_epoch->list);
3512 mdev->epochs = 1;
3513
3514 return mdev;
3515
3516/* out_whatever_else:
3517 kfree(mdev->current_epoch); */
3518out_no_epoch:
3519 kfree(mdev->app_reads_hash);
3520out_no_app_reads:
3521 tl_cleanup(mdev);
3522out_no_tl:
3523 drbd_bm_cleanup(mdev);
3524out_no_bitmap:
3525 __free_page(mdev->md_io_page);
3526out_no_io_page:
3527 put_disk(disk);
3528out_no_disk:
3529 blk_cleanup_queue(q);
3530out_no_q:
3531 free_cpumask_var(mdev->cpu_mask);
3532out_no_cpumask:
3533 kfree(mdev);
3534 return NULL;
3535}
3536
3537/* counterpart of drbd_new_device.
3538 * last part of drbd_delete_device. */
3539void drbd_free_mdev(struct drbd_conf *mdev)
3540{
3541 kfree(mdev->current_epoch);
3542 kfree(mdev->app_reads_hash);
3543 tl_cleanup(mdev);
3544 if (mdev->bitmap) /* should no longer be there. */
3545 drbd_bm_cleanup(mdev);
3546 __free_page(mdev->md_io_page);
3547 put_disk(mdev->vdisk);
3548 blk_cleanup_queue(mdev->rq_queue);
3549 free_cpumask_var(mdev->cpu_mask);
3719094e 3550 drbd_free_tl_hash(mdev);
b411b363
PR
3551 kfree(mdev);
3552}
3553
3554
3555int __init drbd_init(void)
3556{
3557 int err;
3558
3559 if (sizeof(struct p_handshake) != 80) {
3560 printk(KERN_ERR
3561 "drbd: never change the size or layout "
3562 "of the HandShake packet.\n");
3563 return -EINVAL;
3564 }
3565
2b8a90b5 3566 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363
PR
3567 printk(KERN_ERR
3568 "drbd: invalid minor_count (%d)\n", minor_count);
3569#ifdef MODULE
3570 return -EINVAL;
3571#else
3572 minor_count = 8;
3573#endif
3574 }
3575
3576 err = drbd_nl_init();
3577 if (err)
3578 return err;
3579
3580 err = register_blkdev(DRBD_MAJOR, "drbd");
3581 if (err) {
3582 printk(KERN_ERR
3583 "drbd: unable to register block device major %d\n",
3584 DRBD_MAJOR);
3585 return err;
3586 }
3587
3588 register_reboot_notifier(&drbd_notifier);
3589
3590 /*
3591 * allocate all necessary structs
3592 */
3593 err = -ENOMEM;
3594
3595 init_waitqueue_head(&drbd_pp_wait);
3596
3597 drbd_proc = NULL; /* play safe for drbd_cleanup */
3598 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3599 GFP_KERNEL);
3600 if (!minor_table)
3601 goto Enomem;
3602
3603 err = drbd_create_mempools();
3604 if (err)
3605 goto Enomem;
3606
8c484ee4 3607 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
3608 if (!drbd_proc) {
3609 printk(KERN_ERR "drbd: unable to register proc file\n");
3610 goto Enomem;
3611 }
3612
3613 rwlock_init(&global_state_lock);
3614
3615 printk(KERN_INFO "drbd: initialized. "
3616 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3617 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3618 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3619 printk(KERN_INFO "drbd: registered as block device major %d\n",
3620 DRBD_MAJOR);
3621 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3622
3623 return 0; /* Success! */
3624
3625Enomem:
3626 drbd_cleanup();
3627 if (err == -ENOMEM)
3628 /* currently always the case */
3629 printk(KERN_ERR "drbd: ran out of memory\n");
3630 else
3631 printk(KERN_ERR "drbd: initialization failure\n");
3632 return err;
3633}
3634
3635void drbd_free_bc(struct drbd_backing_dev *ldev)
3636{
3637 if (ldev == NULL)
3638 return;
3639
e525fd89
TH
3640 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3641 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
3642
3643 kfree(ldev);
3644}
3645
3646void drbd_free_sock(struct drbd_conf *mdev)
3647{
3648 if (mdev->data.socket) {
4589d7f8 3649 mutex_lock(&mdev->data.mutex);
b411b363
PR
3650 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3651 sock_release(mdev->data.socket);
3652 mdev->data.socket = NULL;
4589d7f8 3653 mutex_unlock(&mdev->data.mutex);
b411b363
PR
3654 }
3655 if (mdev->meta.socket) {
4589d7f8 3656 mutex_lock(&mdev->meta.mutex);
b411b363
PR
3657 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3658 sock_release(mdev->meta.socket);
3659 mdev->meta.socket = NULL;
4589d7f8 3660 mutex_unlock(&mdev->meta.mutex);
b411b363
PR
3661 }
3662}
3663
3664
3665void drbd_free_resources(struct drbd_conf *mdev)
3666{
3667 crypto_free_hash(mdev->csums_tfm);
3668 mdev->csums_tfm = NULL;
3669 crypto_free_hash(mdev->verify_tfm);
3670 mdev->verify_tfm = NULL;
3671 crypto_free_hash(mdev->cram_hmac_tfm);
3672 mdev->cram_hmac_tfm = NULL;
3673 crypto_free_hash(mdev->integrity_w_tfm);
3674 mdev->integrity_w_tfm = NULL;
3675 crypto_free_hash(mdev->integrity_r_tfm);
3676 mdev->integrity_r_tfm = NULL;
3677
3678 drbd_free_sock(mdev);
3679
3680 __no_warn(local,
3681 drbd_free_bc(mdev->ldev);
3682 mdev->ldev = NULL;);
3683}
3684
3685/* meta data management */
3686
3687struct meta_data_on_disk {
3688 u64 la_size; /* last agreed size. */
3689 u64 uuid[UI_SIZE]; /* UUIDs. */
3690 u64 device_uuid;
3691 u64 reserved_u64_1;
3692 u32 flags; /* MDF */
3693 u32 magic;
3694 u32 md_size_sect;
3695 u32 al_offset; /* offset to this block */
3696 u32 al_nr_extents; /* important for restoring the AL */
3697 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3698 u32 bm_offset; /* offset to the bitmap, from here */
3699 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
99432fcc
PR
3700 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3701 u32 reserved_u32[3];
b411b363
PR
3702
3703} __packed;
3704
3705/**
3706 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3707 * @mdev: DRBD device.
3708 */
3709void drbd_md_sync(struct drbd_conf *mdev)
3710{
3711 struct meta_data_on_disk *buffer;
3712 sector_t sector;
3713 int i;
3714
ee15b038
LE
3715 del_timer(&mdev->md_sync_timer);
3716 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
3717 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3718 return;
b411b363
PR
3719
3720 /* We use here D_FAILED and not D_ATTACHING because we try to write
3721 * metadata even if we detach due to a disk failure! */
3722 if (!get_ldev_if_state(mdev, D_FAILED))
3723 return;
3724
b411b363
PR
3725 mutex_lock(&mdev->md_io_mutex);
3726 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3727 memset(buffer, 0, 512);
3728
3729 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3730 for (i = UI_CURRENT; i < UI_SIZE; i++)
3731 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3732 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3733 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3734
3735 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3736 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3737 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3738 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3739 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3740
3741 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
99432fcc 3742 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
b411b363
PR
3743
3744 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3745 sector = mdev->ldev->md.md_offset;
3746
3f3a9b84 3747 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
3748 /* this was a try anyways ... */
3749 dev_err(DEV, "meta data update failed!\n");
81e84650 3750 drbd_chk_io_error(mdev, 1, true);
b411b363
PR
3751 }
3752
3753 /* Update mdev->ldev->md.la_size_sect,
3754 * since we updated it on metadata. */
3755 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3756
3757 mutex_unlock(&mdev->md_io_mutex);
3758 put_ldev(mdev);
3759}
3760
3761/**
3762 * drbd_md_read() - Reads in the meta data super block
3763 * @mdev: DRBD device.
3764 * @bdev: Device from which the meta data should be read in.
3765 *
116676ca 3766 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
b411b363
PR
3767 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3768 */
3769int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3770{
3771 struct meta_data_on_disk *buffer;
3772 int i, rv = NO_ERROR;
3773
3774 if (!get_ldev_if_state(mdev, D_ATTACHING))
3775 return ERR_IO_MD_DISK;
3776
b411b363
PR
3777 mutex_lock(&mdev->md_io_mutex);
3778 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3779
3780 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
25985edc 3781 /* NOTE: can't do normal error processing here as this is
b411b363
PR
3782 called BEFORE disk is attached */
3783 dev_err(DEV, "Error while reading metadata.\n");
3784 rv = ERR_IO_MD_DISK;
3785 goto err;
3786 }
3787
3788 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3789 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3790 rv = ERR_MD_INVALID;
3791 goto err;
3792 }
3793 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3794 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3795 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3796 rv = ERR_MD_INVALID;
3797 goto err;
3798 }
3799 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3800 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3801 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3802 rv = ERR_MD_INVALID;
3803 goto err;
3804 }
3805 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3806 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3807 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3808 rv = ERR_MD_INVALID;
3809 goto err;
3810 }
3811
3812 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3813 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3814 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3815 rv = ERR_MD_INVALID;
3816 goto err;
3817 }
3818
3819 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3820 for (i = UI_CURRENT; i < UI_SIZE; i++)
3821 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3822 bdev->md.flags = be32_to_cpu(buffer->flags);
3823 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3824 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3825
99432fcc
PR
3826 spin_lock_irq(&mdev->req_lock);
3827 if (mdev->state.conn < C_CONNECTED) {
3828 int peer;
3829 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3830 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3831 mdev->peer_max_bio_size = peer;
3832 }
3833 spin_unlock_irq(&mdev->req_lock);
3834
b411b363
PR
3835 if (mdev->sync_conf.al_extents < 7)
3836 mdev->sync_conf.al_extents = 127;
3837
3838 err:
3839 mutex_unlock(&mdev->md_io_mutex);
3840 put_ldev(mdev);
3841
3842 return rv;
3843}
3844
3845/**
3846 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3847 * @mdev: DRBD device.
3848 *
3849 * Call this function if you change anything that should be written to
3850 * the meta-data super block. This function sets MD_DIRTY, and starts a
3851 * timer that ensures that within five seconds you have to call drbd_md_sync().
3852 */
ca0e6098 3853#ifdef DEBUG
ee15b038
LE
3854void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3855{
3856 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3857 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3858 mdev->last_md_mark_dirty.line = line;
3859 mdev->last_md_mark_dirty.func = func;
3860 }
3861}
3862#else
b411b363
PR
3863void drbd_md_mark_dirty(struct drbd_conf *mdev)
3864{
ee15b038 3865 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3866 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3867}
ee15b038 3868#endif
b411b363
PR
3869
3870static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3871{
3872 int i;
3873
62b0da3a 3874 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
b411b363 3875 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
b411b363
PR
3876}
3877
3878void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3879{
3880 if (idx == UI_CURRENT) {
3881 if (mdev->state.role == R_PRIMARY)
3882 val |= 1;
3883 else
3884 val &= ~((u64)1);
3885
3886 drbd_set_ed_uuid(mdev, val);
3887 }
3888
3889 mdev->ldev->md.uuid[idx] = val;
b411b363
PR
3890 drbd_md_mark_dirty(mdev);
3891}
3892
3893
3894void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3895{
3896 if (mdev->ldev->md.uuid[idx]) {
3897 drbd_uuid_move_history(mdev);
3898 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
b411b363
PR
3899 }
3900 _drbd_uuid_set(mdev, idx, val);
3901}
3902
3903/**
3904 * drbd_uuid_new_current() - Creates a new current UUID
3905 * @mdev: DRBD device.
3906 *
3907 * Creates a new current UUID, and rotates the old current UUID into
3908 * the bitmap slot. Causes an incremental resync upon next connect.
3909 */
3910void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3911{
3912 u64 val;
62b0da3a
LE
3913 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3914
3915 if (bm_uuid)
3916 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3917
b411b363 3918 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
b411b363
PR
3919
3920 get_random_bytes(&val, sizeof(u64));
3921 _drbd_uuid_set(mdev, UI_CURRENT, val);
62b0da3a 3922 drbd_print_uuids(mdev, "new current UUID");
aaa8e2b3
LE
3923 /* get it to stable storage _now_ */
3924 drbd_md_sync(mdev);
b411b363
PR
3925}
3926
3927void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3928{
3929 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3930 return;
3931
3932 if (val == 0) {
3933 drbd_uuid_move_history(mdev);
3934 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3935 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3936 } else {
62b0da3a
LE
3937 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3938 if (bm_uuid)
3939 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3940
62b0da3a 3941 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
b411b363
PR
3942 }
3943 drbd_md_mark_dirty(mdev);
3944}
3945
3946/**
3947 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3948 * @mdev: DRBD device.
3949 *
3950 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3951 */
3952int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3953{
3954 int rv = -EIO;
3955
3956 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3957 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3958 drbd_md_sync(mdev);
3959 drbd_bm_set_all(mdev);
3960
3961 rv = drbd_bm_write(mdev);
3962
3963 if (!rv) {
3964 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3965 drbd_md_sync(mdev);
3966 }
3967
3968 put_ldev(mdev);
3969 }
3970
3971 return rv;
3972}
3973
3974/**
3975 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3976 * @mdev: DRBD device.
3977 *
3978 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3979 */
3980int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3981{
3982 int rv = -EIO;
3983
0778286a 3984 drbd_resume_al(mdev);
b411b363
PR
3985 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3986 drbd_bm_clear_all(mdev);
3987 rv = drbd_bm_write(mdev);
3988 put_ldev(mdev);
3989 }
3990
3991 return rv;
3992}
3993
3994static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3995{
3996 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
02851e9f 3997 int rv = -EIO;
b411b363
PR
3998
3999 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
4000
02851e9f 4001 if (get_ldev(mdev)) {
20ceb2b2 4002 drbd_bm_lock(mdev, work->why, work->flags);
02851e9f
LE
4003 rv = work->io_fn(mdev);
4004 drbd_bm_unlock(mdev);
4005 put_ldev(mdev);
4006 }
b411b363
PR
4007
4008 clear_bit(BITMAP_IO, &mdev->flags);
127b3178 4009 smp_mb__after_clear_bit();
b411b363
PR
4010 wake_up(&mdev->misc_wait);
4011
4012 if (work->done)
4013 work->done(mdev, rv);
4014
4015 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
4016 work->why = NULL;
20ceb2b2 4017 work->flags = 0;
b411b363
PR
4018
4019 return 1;
4020}
4021
82f59cc6
LE
4022void drbd_ldev_destroy(struct drbd_conf *mdev)
4023{
4024 lc_destroy(mdev->resync);
4025 mdev->resync = NULL;
4026 lc_destroy(mdev->act_log);
4027 mdev->act_log = NULL;
4028 __no_warn(local,
4029 drbd_free_bc(mdev->ldev);
4030 mdev->ldev = NULL;);
4031
4032 if (mdev->md_io_tmpp) {
4033 __free_page(mdev->md_io_tmpp);
4034 mdev->md_io_tmpp = NULL;
4035 }
4036 clear_bit(GO_DISKLESS, &mdev->flags);
4037}
4038
e9e6f3ec
LE
4039static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4040{
4041 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
4042 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
4043 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
4044 * the protected members anymore, though, so once put_ldev reaches zero
4045 * again, it will be safe to free them. */
e9e6f3ec 4046 drbd_force_state(mdev, NS(disk, D_DISKLESS));
e9e6f3ec
LE
4047 return 1;
4048}
4049
4050void drbd_go_diskless(struct drbd_conf *mdev)
4051{
4052 D_ASSERT(mdev->state.disk == D_FAILED);
4053 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
9d282875 4054 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
e9e6f3ec
LE
4055}
4056
b411b363
PR
4057/**
4058 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
4059 * @mdev: DRBD device.
4060 * @io_fn: IO callback to be called when bitmap IO is possible
4061 * @done: callback to be called after the bitmap IO was performed
4062 * @why: Descriptive text of the reason for doing the IO
4063 *
4064 * While IO on the bitmap happens we freeze application IO thus we ensure
4065 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4066 * called from worker context. It MUST NOT be used while a previous such
4067 * work is still pending!
4068 */
4069void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4070 int (*io_fn)(struct drbd_conf *),
4071 void (*done)(struct drbd_conf *, int),
20ceb2b2 4072 char *why, enum bm_flag flags)
b411b363
PR
4073{
4074 D_ASSERT(current == mdev->worker.task);
4075
4076 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4077 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4078 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4079 if (mdev->bm_io_work.why)
4080 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4081 why, mdev->bm_io_work.why);
4082
4083 mdev->bm_io_work.io_fn = io_fn;
4084 mdev->bm_io_work.done = done;
4085 mdev->bm_io_work.why = why;
20ceb2b2 4086 mdev->bm_io_work.flags = flags;
b411b363 4087
22afd7ee 4088 spin_lock_irq(&mdev->req_lock);
b411b363
PR
4089 set_bit(BITMAP_IO, &mdev->flags);
4090 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 4091 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
b411b363 4092 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
b411b363 4093 }
22afd7ee 4094 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
4095}
4096
4097/**
4098 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4099 * @mdev: DRBD device.
4100 * @io_fn: IO callback to be called when bitmap IO is possible
4101 * @why: Descriptive text of the reason for doing the IO
4102 *
4103 * freezes application IO while that the actual IO operations runs. This
4104 * functions MAY NOT be called from worker context.
4105 */
20ceb2b2
LE
4106int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4107 char *why, enum bm_flag flags)
b411b363
PR
4108{
4109 int rv;
4110
4111 D_ASSERT(current != mdev->worker.task);
4112
20ceb2b2
LE
4113 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4114 drbd_suspend_io(mdev);
b411b363 4115
20ceb2b2 4116 drbd_bm_lock(mdev, why, flags);
b411b363
PR
4117 rv = io_fn(mdev);
4118 drbd_bm_unlock(mdev);
4119
20ceb2b2
LE
4120 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4121 drbd_resume_io(mdev);
b411b363
PR
4122
4123 return rv;
4124}
4125
4126void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4127{
4128 if ((mdev->ldev->md.flags & flag) != flag) {
4129 drbd_md_mark_dirty(mdev);
4130 mdev->ldev->md.flags |= flag;
4131 }
4132}
4133
4134void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4135{
4136 if ((mdev->ldev->md.flags & flag) != 0) {
4137 drbd_md_mark_dirty(mdev);
4138 mdev->ldev->md.flags &= ~flag;
4139 }
4140}
4141int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4142{
4143 return (bdev->md.flags & flag) != 0;
4144}
4145
4146static void md_sync_timer_fn(unsigned long data)
4147{
4148 struct drbd_conf *mdev = (struct drbd_conf *) data;
4149
4150 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4151}
4152
4153static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4154{
4155 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
4156#ifdef DEBUG
4157 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4158 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4159#endif
b411b363 4160 drbd_md_sync(mdev);
b411b363
PR
4161 return 1;
4162}
4163
4164#ifdef CONFIG_DRBD_FAULT_INJECTION
4165/* Fault insertion support including random number generator shamelessly
4166 * stolen from kernel/rcutorture.c */
4167struct fault_random_state {
4168 unsigned long state;
4169 unsigned long count;
4170};
4171
4172#define FAULT_RANDOM_MULT 39916801 /* prime */
4173#define FAULT_RANDOM_ADD 479001701 /* prime */
4174#define FAULT_RANDOM_REFRESH 10000
4175
4176/*
4177 * Crude but fast random-number generator. Uses a linear congruential
4178 * generator, with occasional help from get_random_bytes().
4179 */
4180static unsigned long
4181_drbd_fault_random(struct fault_random_state *rsp)
4182{
4183 long refresh;
4184
49829ea7 4185 if (!rsp->count--) {
b411b363
PR
4186 get_random_bytes(&refresh, sizeof(refresh));
4187 rsp->state += refresh;
4188 rsp->count = FAULT_RANDOM_REFRESH;
4189 }
4190 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4191 return swahw32(rsp->state);
4192}
4193
4194static char *
4195_drbd_fault_str(unsigned int type) {
4196 static char *_faults[] = {
4197 [DRBD_FAULT_MD_WR] = "Meta-data write",
4198 [DRBD_FAULT_MD_RD] = "Meta-data read",
4199 [DRBD_FAULT_RS_WR] = "Resync write",
4200 [DRBD_FAULT_RS_RD] = "Resync read",
4201 [DRBD_FAULT_DT_WR] = "Data write",
4202 [DRBD_FAULT_DT_RD] = "Data read",
4203 [DRBD_FAULT_DT_RA] = "Data read ahead",
4204 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
4205 [DRBD_FAULT_AL_EE] = "EE allocation",
4206 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
4207 };
4208
4209 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4210}
4211
4212unsigned int
4213_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4214{
4215 static struct fault_random_state rrs = {0, 0};
4216
4217 unsigned int ret = (
4218 (fault_devs == 0 ||
4219 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4220 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4221
4222 if (ret) {
4223 fault_count++;
4224
7383506c 4225 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
4226 dev_warn(DEV, "***Simulating %s failure\n",
4227 _drbd_fault_str(type));
4228 }
4229
4230 return ret;
4231}
4232#endif
4233
4234const char *drbd_buildtag(void)
4235{
4236 /* DRBD built from external sources has here a reference to the
4237 git hash of the source code. */
4238
4239 static char buildtag[38] = "\0uilt-in";
4240
4241 if (buildtag[0] == 0) {
4242#ifdef CONFIG_MODULES
4243 if (THIS_MODULE != NULL)
4244 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4245 else
4246#endif
4247 buildtag[0] = 'b';
4248 }
4249
4250 return buildtag;
4251}
4252
4253module_init(drbd_init)
4254module_exit(drbd_cleanup)
4255
b411b363
PR
4256EXPORT_SYMBOL(drbd_conn_str);
4257EXPORT_SYMBOL(drbd_role_str);
4258EXPORT_SYMBOL(drbd_disk_str);
4259EXPORT_SYMBOL(drbd_set_st_err_str);