]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/drbd/drbd_nl.c
drbd: moved receiver, worker and asender from mdev to tconn
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_nl.c
CommitLineData
b411b363
PR
1/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363
PR
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
32#include <linux/connector.h>
33#include <linux/blkpg.h>
34#include <linux/cpumask.h>
35#include "drbd_int.h"
265be2d0 36#include "drbd_req.h"
b411b363
PR
37#include "drbd_wrappers.h"
38#include <asm/unaligned.h>
39#include <linux/drbd_tag_magic.h>
40#include <linux/drbd_limits.h>
87f7be4c
PR
41#include <linux/compiler.h>
42#include <linux/kthread.h>
b411b363
PR
43
44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
47
48/* see get_sb_bdev and bd_claim */
49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
50
51/* Generate the tag_list to struct functions */
52#define NL_PACKET(name, number, fields) \
53static int name ## _from_tags(struct drbd_conf *mdev, \
54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55static int name ## _from_tags(struct drbd_conf *mdev, \
56 unsigned short *tags, struct name *arg) \
57{ \
58 int tag; \
59 int dlen; \
60 \
61 while ((tag = get_unaligned(tags++)) != TT_END) { \
62 dlen = get_unaligned(tags++); \
63 switch (tag_number(tag)) { \
64 fields \
65 default: \
66 if (tag & T_MANDATORY) { \
67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
68 return 0; \
69 } \
70 } \
71 tags = (unsigned short *)((char *)tags + dlen); \
72 } \
73 return 1; \
74}
75#define NL_INTEGER(pn, pr, member) \
76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
77 arg->member = get_unaligned((int *)(tags)); \
78 break;
79#define NL_INT64(pn, pr, member) \
80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
81 arg->member = get_unaligned((u64 *)(tags)); \
82 break;
83#define NL_BIT(pn, pr, member) \
84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
85 arg->member = *(char *)(tags) ? 1 : 0; \
86 break;
87#define NL_STRING(pn, pr, member, len) \
88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
89 if (dlen > len) { \
90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91 #member, dlen, (unsigned int)len); \
92 return 0; \
93 } \
94 arg->member ## _len = dlen; \
95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
96 break;
97#include "linux/drbd_nl.h"
98
99/* Generate the struct to tag_list functions */
100#define NL_PACKET(name, number, fields) \
101static unsigned short* \
102name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104static unsigned short* \
105name ## _to_tags(struct drbd_conf *mdev, \
106 struct name *arg, unsigned short *tags) \
107{ \
108 fields \
109 return tags; \
110}
111
112#define NL_INTEGER(pn, pr, member) \
113 put_unaligned(pn | pr | TT_INTEGER, tags++); \
114 put_unaligned(sizeof(int), tags++); \
115 put_unaligned(arg->member, (int *)tags); \
116 tags = (unsigned short *)((char *)tags+sizeof(int));
117#define NL_INT64(pn, pr, member) \
118 put_unaligned(pn | pr | TT_INT64, tags++); \
119 put_unaligned(sizeof(u64), tags++); \
120 put_unaligned(arg->member, (u64 *)tags); \
121 tags = (unsigned short *)((char *)tags+sizeof(u64));
122#define NL_BIT(pn, pr, member) \
123 put_unaligned(pn | pr | TT_BIT, tags++); \
124 put_unaligned(sizeof(char), tags++); \
125 *(char *)tags = arg->member; \
126 tags = (unsigned short *)((char *)tags+sizeof(char));
127#define NL_STRING(pn, pr, member, len) \
128 put_unaligned(pn | pr | TT_STRING, tags++); \
129 put_unaligned(arg->member ## _len, tags++); \
130 memcpy(tags, arg->member, arg->member ## _len); \
131 tags = (unsigned short *)((char *)tags + arg->member ## _len);
132#include "linux/drbd_nl.h"
133
134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
135void drbd_nl_send_reply(struct cn_msg *, int);
136
137int drbd_khelper(struct drbd_conf *mdev, char *cmd)
138{
139 char *envp[] = { "HOME=/",
140 "TERM=linux",
141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
142 NULL, /* Will be set to address family */
143 NULL, /* Will be set to address */
144 NULL };
145
146 char mb[12], af[20], ad[60], *afs;
147 char *argv[] = {usermode_helper, cmd, mb, NULL };
148 int ret;
149
150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
151
b2fb6dbe 152 if (get_net_conf(mdev->tconn)) {
89e58e75 153 switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
b411b363
PR
154 case AF_INET6:
155 afs = "ipv6";
156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
89e58e75 157 &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
b411b363
PR
158 break;
159 case AF_INET:
160 afs = "ipv4";
161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
89e58e75 162 &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
b411b363
PR
163 break;
164 default:
165 afs = "ssocks";
166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
89e58e75 167 &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
b411b363
PR
168 }
169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
170 envp[3]=af;
171 envp[4]=ad;
b2fb6dbe 172 put_net_conf(mdev->tconn);
b411b363
PR
173 }
174
1090c056
LE
175 /* The helper may take some time.
176 * write out any unsynced meta data changes now */
177 drbd_md_sync(mdev);
178
b411b363
PR
179 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
180
181 drbd_bcast_ev_helper(mdev, cmd);
182 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
183 if (ret)
184 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
185 usermode_helper, cmd, mb,
186 (ret >> 8) & 0xff, ret);
187 else
188 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
189 usermode_helper, cmd, mb,
190 (ret >> 8) & 0xff, ret);
191
192 if (ret < 0) /* Ignore any ERRNOs we got. */
193 ret = 0;
194
195 return ret;
196}
197
198enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
199{
200 char *ex_to_string;
201 int r;
202 enum drbd_disk_state nps;
203 enum drbd_fencing_p fp;
204
205 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
206
207 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
208 fp = mdev->ldev->dc.fencing;
209 put_ldev(mdev);
210 } else {
211 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
fb22c402
PR
212 nps = mdev->state.pdsk;
213 goto out;
b411b363
PR
214 }
215
b411b363
PR
216 r = drbd_khelper(mdev, "fence-peer");
217
218 switch ((r>>8) & 0xff) {
219 case 3: /* peer is inconsistent */
220 ex_to_string = "peer is inconsistent or worse";
221 nps = D_INCONSISTENT;
222 break;
223 case 4: /* peer got outdated, or was already outdated */
224 ex_to_string = "peer was fenced";
225 nps = D_OUTDATED;
226 break;
227 case 5: /* peer was down */
228 if (mdev->state.disk == D_UP_TO_DATE) {
229 /* we will(have) create(d) a new UUID anyways... */
230 ex_to_string = "peer is unreachable, assumed to be dead";
231 nps = D_OUTDATED;
232 } else {
233 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
234 nps = mdev->state.pdsk;
235 }
236 break;
237 case 6: /* Peer is primary, voluntarily outdate myself.
238 * This is useful when an unconnected R_SECONDARY is asked to
239 * become R_PRIMARY, but finds the other peer being active. */
240 ex_to_string = "peer is active";
241 dev_warn(DEV, "Peer is primary, outdating myself.\n");
242 nps = D_UNKNOWN;
243 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
244 break;
245 case 7:
246 if (fp != FP_STONITH)
247 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
248 ex_to_string = "peer was stonithed";
249 nps = D_OUTDATED;
250 break;
251 default:
252 /* The script is broken ... */
253 nps = D_UNKNOWN;
254 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
255 return nps;
256 }
257
258 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
259 (r>>8) & 0xff, ex_to_string);
fb22c402
PR
260
261out:
262 if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
263 /* The handler was not successful... unfreeze here, the
264 state engine can not unfreeze... */
265 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
266 }
267
b411b363
PR
268 return nps;
269}
270
87f7be4c
PR
271static int _try_outdate_peer_async(void *data)
272{
273 struct drbd_conf *mdev = (struct drbd_conf *)data;
274 enum drbd_disk_state nps;
21423fa7 275 union drbd_state ns;
87f7be4c
PR
276
277 nps = drbd_try_outdate_peer(mdev);
21423fa7
PR
278
279 /* Not using
280 drbd_request_state(mdev, NS(pdsk, nps));
99432fcc
PR
281 here, because we might were able to re-establish the connection
282 in the meantime. This can only partially be solved in the state's
283 engine is_valid_state() and is_valid_state_transition()
284 functions.
285
286 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
288 therefore we have to have the pre state change check here.
21423fa7
PR
289 */
290 spin_lock_irq(&mdev->req_lock);
291 ns = mdev->state;
292 if (ns.conn < C_WF_REPORT_PARAMS) {
293 ns.pdsk = nps;
294 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
295 }
296 spin_unlock_irq(&mdev->req_lock);
87f7be4c
PR
297
298 return 0;
299}
300
301void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
302{
303 struct task_struct *opa;
304
305 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
306 if (IS_ERR(opa))
307 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
308}
b411b363 309
bf885f8a
AG
310enum drbd_state_rv
311drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
b411b363
PR
312{
313 const int max_tries = 4;
bf885f8a 314 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
b411b363
PR
315 int try = 0;
316 int forced = 0;
317 union drbd_state mask, val;
318 enum drbd_disk_state nps;
319
320 if (new_role == R_PRIMARY)
321 request_ping(mdev); /* Detect a dead peer ASAP */
322
323 mutex_lock(&mdev->state_mutex);
324
325 mask.i = 0; mask.role = R_MASK;
326 val.i = 0; val.role = new_role;
327
328 while (try++ < max_tries) {
bf885f8a 329 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
b411b363
PR
330
331 /* in case we first succeeded to outdate,
332 * but now suddenly could establish a connection */
bf885f8a 333 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
b411b363
PR
334 val.pdsk = 0;
335 mask.pdsk = 0;
336 continue;
337 }
338
bf885f8a 339 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
d10a33c6
PR
340 (mdev->state.disk < D_UP_TO_DATE &&
341 mdev->state.disk >= D_INCONSISTENT)) {
b411b363
PR
342 mask.disk = D_MASK;
343 val.disk = D_UP_TO_DATE;
344 forced = 1;
345 continue;
346 }
347
bf885f8a 348 if (rv == SS_NO_UP_TO_DATE_DISK &&
b411b363
PR
349 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
350 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
351 nps = drbd_try_outdate_peer(mdev);
352
353 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
354 val.disk = D_UP_TO_DATE;
355 mask.disk = D_MASK;
356 }
357
358 val.pdsk = nps;
359 mask.pdsk = D_MASK;
360
361 continue;
362 }
363
bf885f8a 364 if (rv == SS_NOTHING_TO_DO)
b411b363 365 goto fail;
bf885f8a 366 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
b411b363
PR
367 nps = drbd_try_outdate_peer(mdev);
368
369 if (force && nps > D_OUTDATED) {
370 dev_warn(DEV, "Forced into split brain situation!\n");
371 nps = D_OUTDATED;
372 }
373
374 mask.pdsk = D_MASK;
375 val.pdsk = nps;
376
377 continue;
378 }
bf885f8a 379 if (rv == SS_TWO_PRIMARIES) {
b411b363
PR
380 /* Maybe the peer is detected as dead very soon...
381 retry at most once more in this case. */
89e58e75 382 schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
b411b363
PR
383 if (try < max_tries)
384 try = max_tries - 1;
385 continue;
386 }
bf885f8a
AG
387 if (rv < SS_SUCCESS) {
388 rv = _drbd_request_state(mdev, mask, val,
b411b363 389 CS_VERBOSE + CS_WAIT_COMPLETE);
bf885f8a 390 if (rv < SS_SUCCESS)
b411b363
PR
391 goto fail;
392 }
393 break;
394 }
395
bf885f8a 396 if (rv < SS_SUCCESS)
b411b363
PR
397 goto fail;
398
399 if (forced)
400 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
401
402 /* Wait until nothing is on the fly :) */
403 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
404
405 if (new_role == R_SECONDARY) {
81e84650 406 set_disk_ro(mdev->vdisk, true);
b411b363
PR
407 if (get_ldev(mdev)) {
408 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
409 put_ldev(mdev);
410 }
411 } else {
b2fb6dbe 412 if (get_net_conf(mdev->tconn)) {
89e58e75 413 mdev->tconn->net_conf->want_lose = 0;
b2fb6dbe 414 put_net_conf(mdev->tconn);
b411b363 415 }
81e84650 416 set_disk_ro(mdev->vdisk, false);
b411b363
PR
417 if (get_ldev(mdev)) {
418 if (((mdev->state.conn < C_CONNECTED ||
419 mdev->state.pdsk <= D_FAILED)
420 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
421 drbd_uuid_new_current(mdev);
422
423 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
424 put_ldev(mdev);
425 }
426 }
427
19f843aa
LE
428 /* writeout of activity log covered areas of the bitmap
429 * to stable storage done in after state change already */
b411b363
PR
430
431 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
432 /* if this was forced, we should consider sync */
433 if (forced)
434 drbd_send_uuids(mdev);
435 drbd_send_state(mdev);
436 }
437
438 drbd_md_sync(mdev);
439
440 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
441 fail:
442 mutex_unlock(&mdev->state_mutex);
bf885f8a 443 return rv;
b411b363
PR
444}
445
ef50a3e3
LE
446static struct drbd_conf *ensure_mdev(int minor, int create)
447{
448 struct drbd_conf *mdev;
449
450 if (minor >= minor_count)
451 return NULL;
452
453 mdev = minor_to_mdev(minor);
454
455 if (!mdev && create) {
456 struct gendisk *disk = NULL;
457 mdev = drbd_new_device(minor);
458
459 spin_lock_irq(&drbd_pp_lock);
460 if (minor_table[minor] == NULL) {
461 minor_table[minor] = mdev;
462 disk = mdev->vdisk;
463 mdev = NULL;
464 } /* else: we lost the race */
465 spin_unlock_irq(&drbd_pp_lock);
466
467 if (disk) /* we won the race above */
468 /* in case we ever add a drbd_delete_device(),
469 * don't forget the del_gendisk! */
470 add_disk(disk);
471 else /* we lost the race above */
472 drbd_free_mdev(mdev);
473
474 mdev = minor_to_mdev(minor);
475 }
476
477 return mdev;
478}
b411b363
PR
479
480static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
481 struct drbd_nl_cfg_reply *reply)
482{
483 struct primary primary_args;
484
485 memset(&primary_args, 0, sizeof(struct primary));
486 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
487 reply->ret_code = ERR_MANDATORY_TAG;
488 return 0;
489 }
490
491 reply->ret_code =
1f552430 492 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
b411b363
PR
493
494 return 0;
495}
496
497static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
498 struct drbd_nl_cfg_reply *reply)
499{
500 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
501
502 return 0;
503}
504
505/* initializes the md.*_offset members, so we are able to find
506 * the on disk meta data */
507static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
508 struct drbd_backing_dev *bdev)
509{
510 sector_t md_size_sect = 0;
511 switch (bdev->dc.meta_dev_idx) {
512 default:
513 /* v07 style fixed size indexed meta data */
514 bdev->md.md_size_sect = MD_RESERVED_SECT;
515 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
516 bdev->md.al_offset = MD_AL_OFFSET;
517 bdev->md.bm_offset = MD_BM_OFFSET;
518 break;
519 case DRBD_MD_INDEX_FLEX_EXT:
520 /* just occupy the full device; unit: sectors */
521 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
522 bdev->md.md_offset = 0;
523 bdev->md.al_offset = MD_AL_OFFSET;
524 bdev->md.bm_offset = MD_BM_OFFSET;
525 break;
526 case DRBD_MD_INDEX_INTERNAL:
527 case DRBD_MD_INDEX_FLEX_INT:
528 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
529 /* al size is still fixed */
530 bdev->md.al_offset = -MD_AL_MAX_SIZE;
531 /* we need (slightly less than) ~ this much bitmap sectors: */
532 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
533 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
534 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
535 md_size_sect = ALIGN(md_size_sect, 8);
536
537 /* plus the "drbd meta data super block",
538 * and the activity log; */
539 md_size_sect += MD_BM_OFFSET;
540
541 bdev->md.md_size_sect = md_size_sect;
542 /* bitmap offset is adjusted by 'super' block size */
543 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
544 break;
545 }
546}
547
4b0715f0 548/* input size is expected to be in KB */
b411b363
PR
549char *ppsize(char *buf, unsigned long long size)
550{
4b0715f0
LE
551 /* Needs 9 bytes at max including trailing NUL:
552 * -1ULL ==> "16384 EB" */
b411b363
PR
553 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
554 int base = 0;
4b0715f0 555 while (size >= 10000 && base < sizeof(units)-1) {
b411b363
PR
556 /* shift + round */
557 size = (size >> 10) + !!(size & (1<<9));
558 base++;
559 }
4b0715f0 560 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
b411b363
PR
561
562 return buf;
563}
564
565/* there is still a theoretical deadlock when called from receiver
566 * on an D_INCONSISTENT R_PRIMARY:
567 * remote READ does inc_ap_bio, receiver would need to receive answer
568 * packet from remote to dec_ap_bio again.
569 * receiver receive_sizes(), comes here,
570 * waits for ap_bio_cnt == 0. -> deadlock.
571 * but this cannot happen, actually, because:
572 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
573 * (not connected, or bad/no disk on peer):
574 * see drbd_fail_request_early, ap_bio_cnt is zero.
575 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
576 * peer may not initiate a resize.
577 */
578void drbd_suspend_io(struct drbd_conf *mdev)
579{
580 set_bit(SUSPEND_IO, &mdev->flags);
fb22c402 581 if (is_susp(mdev->state))
265be2d0 582 return;
b411b363
PR
583 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
584}
585
586void drbd_resume_io(struct drbd_conf *mdev)
587{
588 clear_bit(SUSPEND_IO, &mdev->flags);
589 wake_up(&mdev->misc_wait);
590}
591
592/**
593 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
594 * @mdev: DRBD device.
595 *
596 * Returns 0 on success, negative return values indicate errors.
597 * You should call drbd_md_sync() after calling this function.
598 */
24c4830c 599enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
b411b363
PR
600{
601 sector_t prev_first_sect, prev_size; /* previous meta location */
602 sector_t la_size;
603 sector_t size;
604 char ppb[10];
605
606 int md_moved, la_size_changed;
607 enum determine_dev_size rv = unchanged;
608
609 /* race:
610 * application request passes inc_ap_bio,
611 * but then cannot get an AL-reference.
612 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
613 *
614 * to avoid that:
615 * Suspend IO right here.
616 * still lock the act_log to not trigger ASSERTs there.
617 */
618 drbd_suspend_io(mdev);
619
620 /* no wait necessary anymore, actually we could assert that */
621 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
622
623 prev_first_sect = drbd_md_first_sector(mdev->ldev);
624 prev_size = mdev->ldev->md.md_size_sect;
625 la_size = mdev->ldev->md.la_size_sect;
626
627 /* TODO: should only be some assert here, not (re)init... */
628 drbd_md_set_sector_offsets(mdev, mdev->ldev);
629
d845030f 630 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
b411b363
PR
631
632 if (drbd_get_capacity(mdev->this_bdev) != size ||
633 drbd_bm_capacity(mdev) != size) {
634 int err;
02d9a94b 635 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
b411b363
PR
636 if (unlikely(err)) {
637 /* currently there is only one error: ENOMEM! */
638 size = drbd_bm_capacity(mdev)>>1;
639 if (size == 0) {
640 dev_err(DEV, "OUT OF MEMORY! "
641 "Could not allocate bitmap!\n");
642 } else {
643 dev_err(DEV, "BM resizing failed. "
644 "Leaving size unchanged at size = %lu KB\n",
645 (unsigned long)size);
646 }
647 rv = dev_size_error;
648 }
649 /* racy, see comments above. */
650 drbd_set_my_capacity(mdev, size);
651 mdev->ldev->md.la_size_sect = size;
652 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
653 (unsigned long long)size>>1);
654 }
655 if (rv == dev_size_error)
656 goto out;
657
658 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
659
660 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
661 || prev_size != mdev->ldev->md.md_size_sect;
662
663 if (la_size_changed || md_moved) {
24dccabb
AG
664 int err;
665
b411b363
PR
666 drbd_al_shrink(mdev); /* All extents inactive. */
667 dev_info(DEV, "Writing the whole bitmap, %s\n",
668 la_size_changed && md_moved ? "size changed and md moved" :
669 la_size_changed ? "size changed" : "md moved");
20ceb2b2
LE
670 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
671 err = drbd_bitmap_io(mdev, &drbd_bm_write,
672 "size changed", BM_LOCKED_MASK);
24dccabb
AG
673 if (err) {
674 rv = dev_size_error;
675 goto out;
676 }
b411b363
PR
677 drbd_md_mark_dirty(mdev);
678 }
679
680 if (size > la_size)
681 rv = grew;
682 if (size < la_size)
683 rv = shrunk;
684out:
685 lc_unlock(mdev->act_log);
686 wake_up(&mdev->al_wait);
687 drbd_resume_io(mdev);
688
689 return rv;
690}
691
692sector_t
a393db6f 693drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
b411b363
PR
694{
695 sector_t p_size = mdev->p_size; /* partner's disk size. */
696 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
697 sector_t m_size; /* my size */
698 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
699 sector_t size = 0;
700
701 m_size = drbd_get_max_capacity(bdev);
702
a393db6f
PR
703 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
704 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
705 p_size = m_size;
706 }
707
b411b363
PR
708 if (p_size && m_size) {
709 size = min_t(sector_t, p_size, m_size);
710 } else {
711 if (la_size) {
712 size = la_size;
713 if (m_size && m_size < size)
714 size = m_size;
715 if (p_size && p_size < size)
716 size = p_size;
717 } else {
718 if (m_size)
719 size = m_size;
720 if (p_size)
721 size = p_size;
722 }
723 }
724
725 if (size == 0)
726 dev_err(DEV, "Both nodes diskless!\n");
727
728 if (u_size) {
729 if (u_size > size)
730 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
731 (unsigned long)u_size>>1, (unsigned long)size>>1);
732 else
733 size = u_size;
734 }
735
736 return size;
737}
738
739/**
740 * drbd_check_al_size() - Ensures that the AL is of the right size
741 * @mdev: DRBD device.
742 *
743 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
744 * failed, and 0 on success. You should call drbd_md_sync() after you called
745 * this function.
746 */
747static int drbd_check_al_size(struct drbd_conf *mdev)
748{
749 struct lru_cache *n, *t;
750 struct lc_element *e;
751 unsigned int in_use;
752 int i;
753
841ce241 754 if (!expect(mdev->sync_conf.al_extents >= 7))
b411b363
PR
755 mdev->sync_conf.al_extents = 127;
756
757 if (mdev->act_log &&
758 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
759 return 0;
760
761 in_use = 0;
762 t = mdev->act_log;
763 n = lc_create("act_log", drbd_al_ext_cache,
764 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
765
766 if (n == NULL) {
767 dev_err(DEV, "Cannot allocate act_log lru!\n");
768 return -ENOMEM;
769 }
770 spin_lock_irq(&mdev->al_lock);
771 if (t) {
772 for (i = 0; i < t->nr_elements; i++) {
773 e = lc_element_by_index(t, i);
774 if (e->refcnt)
775 dev_err(DEV, "refcnt(%d)==%d\n",
776 e->lc_number, e->refcnt);
777 in_use += e->refcnt;
778 }
779 }
780 if (!in_use)
781 mdev->act_log = n;
782 spin_unlock_irq(&mdev->al_lock);
783 if (in_use) {
784 dev_err(DEV, "Activity log still in use!\n");
785 lc_destroy(n);
786 return -EBUSY;
787 } else {
788 if (t)
789 lc_destroy(t);
790 }
791 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
792 return 0;
793}
794
99432fcc 795static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
b411b363
PR
796{
797 struct request_queue * const q = mdev->rq_queue;
99432fcc
PR
798 int max_hw_sectors = max_bio_size >> 9;
799 int max_segments = 0;
800
801 if (get_ldev_if_state(mdev, D_ATTACHING)) {
802 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
803
804 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
805 max_segments = mdev->ldev->dc.max_bio_bvecs;
806 put_ldev(mdev);
807 }
b411b363 808
b411b363 809 blk_queue_logical_block_size(q, 512);
1816a2b4
LE
810 blk_queue_max_hw_sectors(q, max_hw_sectors);
811 /* This is the workaround for "bio would need to, but cannot, be split" */
812 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
813 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
b411b363 814
99432fcc
PR
815 if (get_ldev_if_state(mdev, D_ATTACHING)) {
816 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
817
818 blk_queue_stack_limits(q, b);
819
820 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
821 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
822 q->backing_dev_info.ra_pages,
823 b->backing_dev_info.ra_pages);
824 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
825 }
826 put_ldev(mdev);
827 }
828}
829
830void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
831{
832 int now, new, local, peer;
833
834 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
835 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
836 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
b411b363 837
99432fcc
PR
838 if (get_ldev_if_state(mdev, D_ATTACHING)) {
839 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
840 mdev->local_max_bio_size = local;
841 put_ldev(mdev);
b411b363 842 }
99432fcc
PR
843
844 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */
847 if (mdev->state.conn >= C_CONNECTED) {
848 if (mdev->agreed_pro_version < 94)
849 peer = mdev->peer_max_bio_size;
850 else if (mdev->agreed_pro_version == 94)
851 peer = DRBD_MAX_SIZE_H80_PACKET;
852 else /* drbd 8.3.8 onwards */
853 peer = DRBD_MAX_BIO_SIZE;
854 }
855
856 new = min_t(int, local, peer);
857
858 if (mdev->state.role == R_PRIMARY && new < now)
859 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
860
861 if (new != now)
862 dev_info(DEV, "max BIO size = %u\n", new);
863
864 drbd_setup_queue_param(mdev, new);
b411b363
PR
865}
866
867/* serialize deconfig (worker exiting, doing cleanup)
868 * and reconfig (drbdsetup disk, drbdsetup net)
869 *
c518d04f
LE
870 * Wait for a potentially exiting worker, then restart it,
871 * or start a new one. Flush any pending work, there may still be an
872 * after_state_change queued.
b411b363
PR
873 */
874static void drbd_reconfig_start(struct drbd_conf *mdev)
875{
6c6c7951 876 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
b411b363 877 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
e6b3ea83 878 drbd_thread_start(&mdev->tconn->worker);
c518d04f 879 drbd_flush_workqueue(mdev);
b411b363
PR
880}
881
882/* if still unconfigured, stops worker again.
883 * if configured now, clears CONFIG_PENDING.
884 * wakes potential waiters */
885static void drbd_reconfig_done(struct drbd_conf *mdev)
886{
887 spin_lock_irq(&mdev->req_lock);
888 if (mdev->state.disk == D_DISKLESS &&
889 mdev->state.conn == C_STANDALONE &&
890 mdev->state.role == R_SECONDARY) {
891 set_bit(DEVICE_DYING, &mdev->flags);
e6b3ea83 892 drbd_thread_stop_nowait(&mdev->tconn->worker);
b411b363
PR
893 } else
894 clear_bit(CONFIG_PENDING, &mdev->flags);
895 spin_unlock_irq(&mdev->req_lock);
896 wake_up(&mdev->state_wait);
897}
898
0778286a
PR
899/* Make sure IO is suspended before calling this function(). */
900static void drbd_suspend_al(struct drbd_conf *mdev)
901{
902 int s = 0;
903
904 if (lc_try_lock(mdev->act_log)) {
905 drbd_al_shrink(mdev);
906 lc_unlock(mdev->act_log);
907 } else {
908 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
909 return;
910 }
911
912 spin_lock_irq(&mdev->req_lock);
913 if (mdev->state.conn < C_CONNECTED)
914 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
915
916 spin_unlock_irq(&mdev->req_lock);
917
918 if (s)
919 dev_info(DEV, "Suspended AL updates\n");
920}
921
b411b363
PR
922/* does always return 0;
923 * interesting return code is in reply->ret_code */
924static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
925 struct drbd_nl_cfg_reply *reply)
926{
116676ca 927 enum drbd_ret_code retcode;
b411b363
PR
928 enum determine_dev_size dd;
929 sector_t max_possible_sectors;
930 sector_t min_md_device_sectors;
931 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
e525fd89 932 struct block_device *bdev;
b411b363
PR
933 struct lru_cache *resync_lru = NULL;
934 union drbd_state ns, os;
f2024e7c 935 enum drbd_state_rv rv;
b411b363
PR
936 int cp_discovered = 0;
937 int logical_block_size;
938
939 drbd_reconfig_start(mdev);
940
941 /* if you want to reconfigure, please tear down first */
942 if (mdev->state.disk > D_DISKLESS) {
943 retcode = ERR_DISK_CONFIGURED;
944 goto fail;
945 }
82f59cc6
LE
946 /* It may just now have detached because of IO error. Make sure
947 * drbd_ldev_destroy is done already, we may end up here very fast,
948 * e.g. if someone calls attach from the on-io-error handler,
949 * to realize a "hot spare" feature (not that I'd recommend that) */
950 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
b411b363
PR
951
952 /* allocation not in the IO path, cqueue thread context */
953 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
954 if (!nbc) {
955 retcode = ERR_NOMEM;
956 goto fail;
957 }
958
959 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
960 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
961 nbc->dc.fencing = DRBD_FENCING_DEF;
962 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
963
964 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
965 retcode = ERR_MANDATORY_TAG;
966 goto fail;
967 }
968
969 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
970 retcode = ERR_MD_IDX_INVALID;
971 goto fail;
972 }
973
b2fb6dbe 974 if (get_net_conf(mdev->tconn)) {
89e58e75 975 int prot = mdev->tconn->net_conf->wire_protocol;
b2fb6dbe 976 put_net_conf(mdev->tconn);
47ff2d0a
PR
977 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
978 retcode = ERR_STONITH_AND_PROT_A;
979 goto fail;
980 }
981 }
982
d4d77629
TH
983 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
984 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
e525fd89 985 if (IS_ERR(bdev)) {
b411b363 986 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
e525fd89 987 PTR_ERR(bdev));
b411b363
PR
988 retcode = ERR_OPEN_DISK;
989 goto fail;
990 }
e525fd89
TH
991 nbc->backing_bdev = bdev;
992
993 /*
994 * meta_dev_idx >= 0: external fixed size, possibly multiple
995 * drbd sharing one meta device. TODO in that case, paranoia
996 * check that [md_bdev, meta_dev_idx] is not yet used by some
997 * other drbd minor! (if you use drbd.conf + drbdadm, that
998 * should check it for you already; but if you don't, or
999 * someone fooled it, we need to double check here)
1000 */
d4d77629
TH
1001 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1002 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1003 (nbc->dc.meta_dev_idx < 0) ?
1004 (void *)mdev : (void *)drbd_m_holder);
e525fd89 1005 if (IS_ERR(bdev)) {
b411b363 1006 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
e525fd89 1007 PTR_ERR(bdev));
b411b363
PR
1008 retcode = ERR_OPEN_MD_DISK;
1009 goto fail;
1010 }
e525fd89 1011 nbc->md_bdev = bdev;
b411b363 1012
e525fd89
TH
1013 if ((nbc->backing_bdev == nbc->md_bdev) !=
1014 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1015 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1016 retcode = ERR_MD_IDX_INVALID;
b411b363
PR
1017 goto fail;
1018 }
1019
1020 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1021 61, sizeof(struct bm_extent),
1022 offsetof(struct bm_extent, lce));
1023 if (!resync_lru) {
1024 retcode = ERR_NOMEM;
e525fd89 1025 goto fail;
b411b363
PR
1026 }
1027
1028 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1029 drbd_md_set_sector_offsets(mdev, nbc);
1030
1031 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1032 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1033 (unsigned long long) drbd_get_max_capacity(nbc),
1034 (unsigned long long) nbc->dc.disk_size);
1035 retcode = ERR_DISK_TO_SMALL;
e525fd89 1036 goto fail;
b411b363
PR
1037 }
1038
1039 if (nbc->dc.meta_dev_idx < 0) {
1040 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1041 /* at least one MB, otherwise it does not make sense */
1042 min_md_device_sectors = (2<<10);
1043 } else {
1044 max_possible_sectors = DRBD_MAX_SECTORS;
1045 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1046 }
1047
b411b363
PR
1048 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1049 retcode = ERR_MD_DISK_TO_SMALL;
1050 dev_warn(DEV, "refusing attach: md-device too small, "
1051 "at least %llu sectors needed for this meta-disk type\n",
1052 (unsigned long long) min_md_device_sectors);
e525fd89 1053 goto fail;
b411b363
PR
1054 }
1055
1056 /* Make sure the new disk is big enough
1057 * (we may currently be R_PRIMARY with no local disk...) */
1058 if (drbd_get_max_capacity(nbc) <
1059 drbd_get_capacity(mdev->this_bdev)) {
1060 retcode = ERR_DISK_TO_SMALL;
e525fd89 1061 goto fail;
b411b363
PR
1062 }
1063
1064 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1065
1352994b
LE
1066 if (nbc->known_size > max_possible_sectors) {
1067 dev_warn(DEV, "==> truncating very big lower level device "
1068 "to currently maximum possible %llu sectors <==\n",
1069 (unsigned long long) max_possible_sectors);
1070 if (nbc->dc.meta_dev_idx >= 0)
1071 dev_warn(DEV, "==>> using internal or flexible "
1072 "meta data may help <<==\n");
1073 }
1074
b411b363
PR
1075 drbd_suspend_io(mdev);
1076 /* also wait for the last barrier ack. */
fb22c402 1077 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
b411b363
PR
1078 /* and for any other previously queued work */
1079 drbd_flush_workqueue(mdev);
1080
f2024e7c
AG
1081 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1082 retcode = rv; /* FIXME: Type mismatch. */
b411b363 1083 drbd_resume_io(mdev);
f2024e7c 1084 if (rv < SS_SUCCESS)
e525fd89 1085 goto fail;
b411b363
PR
1086
1087 if (!get_ldev_if_state(mdev, D_ATTACHING))
1088 goto force_diskless;
1089
1090 drbd_md_set_sector_offsets(mdev, nbc);
1091
4aa83b7b
LE
1092 /* allocate a second IO page if logical_block_size != 512 */
1093 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
1094 if (logical_block_size == 0)
1095 logical_block_size = MD_SECTOR_SIZE;
1096
1097 if (logical_block_size != MD_SECTOR_SIZE) {
1098 if (!mdev->md_io_tmpp) {
1099 struct page *page = alloc_page(GFP_NOIO);
1100 if (!page)
1101 goto force_diskless_dec;
1102
1103 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
1104 logical_block_size, MD_SECTOR_SIZE);
1105 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
1106
1107 mdev->md_io_tmpp = page;
1108 }
1109 }
1110
b411b363
PR
1111 if (!mdev->bitmap) {
1112 if (drbd_bm_init(mdev)) {
1113 retcode = ERR_NOMEM;
1114 goto force_diskless_dec;
1115 }
1116 }
1117
1118 retcode = drbd_md_read(mdev, nbc);
1119 if (retcode != NO_ERROR)
1120 goto force_diskless_dec;
1121
1122 if (mdev->state.conn < C_CONNECTED &&
1123 mdev->state.role == R_PRIMARY &&
1124 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1125 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1126 (unsigned long long)mdev->ed_uuid);
1127 retcode = ERR_DATA_NOT_CURRENT;
1128 goto force_diskless_dec;
1129 }
1130
1131 /* Since we are diskless, fix the activity log first... */
1132 if (drbd_check_al_size(mdev)) {
1133 retcode = ERR_NOMEM;
1134 goto force_diskless_dec;
1135 }
1136
1137 /* Prevent shrinking of consistent devices ! */
1138 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
a393db6f 1139 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
b411b363
PR
1140 dev_warn(DEV, "refusing to truncate a consistent device\n");
1141 retcode = ERR_DISK_TO_SMALL;
1142 goto force_diskless_dec;
1143 }
1144
1145 if (!drbd_al_read_log(mdev, nbc)) {
1146 retcode = ERR_IO_MD_DISK;
1147 goto force_diskless_dec;
1148 }
1149
b411b363
PR
1150 /* Reset the "barriers don't work" bits here, then force meta data to
1151 * be written, to ensure we determine if barriers are supported. */
1152 if (nbc->dc.no_md_flush)
a8a4e51e 1153 set_bit(MD_NO_FUA, &mdev->flags);
b411b363 1154 else
a8a4e51e 1155 clear_bit(MD_NO_FUA, &mdev->flags);
b411b363
PR
1156
1157 /* Point of no return reached.
1158 * Devices and memory are no longer released by error cleanup below.
1159 * now mdev takes over responsibility, and the state engine should
1160 * clean it up somewhere. */
1161 D_ASSERT(mdev->ldev == NULL);
1162 mdev->ldev = nbc;
1163 mdev->resync = resync_lru;
1164 nbc = NULL;
1165 resync_lru = NULL;
1166
2451fc3b
PR
1167 mdev->write_ordering = WO_bdev_flush;
1168 drbd_bump_write_ordering(mdev, WO_bdev_flush);
b411b363
PR
1169
1170 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1171 set_bit(CRASHED_PRIMARY, &mdev->flags);
1172 else
1173 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1174
894c6a94 1175 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
fb22c402 1176 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
b411b363
PR
1177 set_bit(CRASHED_PRIMARY, &mdev->flags);
1178 cp_discovered = 1;
1179 }
1180
1181 mdev->send_cnt = 0;
1182 mdev->recv_cnt = 0;
1183 mdev->read_cnt = 0;
1184 mdev->writ_cnt = 0;
1185
99432fcc 1186 drbd_reconsider_max_bio_size(mdev);
b411b363
PR
1187
1188 /* If I am currently not R_PRIMARY,
1189 * but meta data primary indicator is set,
1190 * I just now recover from a hard crash,
1191 * and have been R_PRIMARY before that crash.
1192 *
1193 * Now, if I had no connection before that crash
1194 * (have been degraded R_PRIMARY), chances are that
1195 * I won't find my peer now either.
1196 *
1197 * In that case, and _only_ in that case,
1198 * we use the degr-wfc-timeout instead of the default,
1199 * so we can automatically recover from a crash of a
1200 * degraded but active "cluster" after a certain timeout.
1201 */
1202 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1203 if (mdev->state.role != R_PRIMARY &&
1204 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1205 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1206 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1207
24c4830c 1208 dd = drbd_determine_dev_size(mdev, 0);
b411b363
PR
1209 if (dd == dev_size_error) {
1210 retcode = ERR_NOMEM_BITMAP;
1211 goto force_diskless_dec;
1212 } else if (dd == grew)
1213 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1214
1215 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1216 dev_info(DEV, "Assuming that all blocks are out of sync "
1217 "(aka FullSync)\n");
20ceb2b2
LE
1218 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1219 "set_n_write from attaching", BM_LOCKED_MASK)) {
b411b363
PR
1220 retcode = ERR_IO_MD_DISK;
1221 goto force_diskless_dec;
1222 }
1223 } else {
20ceb2b2
LE
1224 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1225 "read from attaching", BM_LOCKED_MASK) < 0) {
b411b363
PR
1226 retcode = ERR_IO_MD_DISK;
1227 goto force_diskless_dec;
1228 }
1229 }
1230
1231 if (cp_discovered) {
1232 drbd_al_apply_to_bm(mdev);
20ceb2b2
LE
1233 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1234 "crashed primary apply AL", BM_LOCKED_MASK)) {
19f843aa
LE
1235 retcode = ERR_IO_MD_DISK;
1236 goto force_diskless_dec;
1237 }
b411b363
PR
1238 }
1239
0778286a
PR
1240 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1241 drbd_suspend_al(mdev); /* IO is still suspended here... */
1242
b411b363
PR
1243 spin_lock_irq(&mdev->req_lock);
1244 os = mdev->state;
1245 ns.i = os.i;
1246 /* If MDF_CONSISTENT is not set go into inconsistent state,
1247 otherwise investigate MDF_WasUpToDate...
1248 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1249 otherwise into D_CONSISTENT state.
1250 */
1251 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1252 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1253 ns.disk = D_CONSISTENT;
1254 else
1255 ns.disk = D_OUTDATED;
1256 } else {
1257 ns.disk = D_INCONSISTENT;
1258 }
1259
1260 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1261 ns.pdsk = D_OUTDATED;
1262
1263 if ( ns.disk == D_CONSISTENT &&
1264 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1265 ns.disk = D_UP_TO_DATE;
1266
1267 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1268 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1269 this point, because drbd_request_state() modifies these
1270 flags. */
1271
1272 /* In case we are C_CONNECTED postpone any decision on the new disk
1273 state after the negotiation phase. */
1274 if (mdev->state.conn == C_CONNECTED) {
1275 mdev->new_state_tmp.i = ns.i;
1276 ns.i = os.i;
1277 ns.disk = D_NEGOTIATING;
dc66c74d
PR
1278
1279 /* We expect to receive up-to-date UUIDs soon.
1280 To avoid a race in receive_state, free p_uuid while
1281 holding req_lock. I.e. atomic with the state change */
1282 kfree(mdev->p_uuid);
1283 mdev->p_uuid = NULL;
b411b363
PR
1284 }
1285
1286 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1287 ns = mdev->state;
1288 spin_unlock_irq(&mdev->req_lock);
1289
1290 if (rv < SS_SUCCESS)
1291 goto force_diskless_dec;
1292
1293 if (mdev->state.role == R_PRIMARY)
1294 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1295 else
1296 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1297
1298 drbd_md_mark_dirty(mdev);
1299 drbd_md_sync(mdev);
1300
1301 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1302 put_ldev(mdev);
1303 reply->ret_code = retcode;
1304 drbd_reconfig_done(mdev);
1305 return 0;
1306
1307 force_diskless_dec:
1308 put_ldev(mdev);
1309 force_diskless:
82f59cc6 1310 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363 1311 drbd_md_sync(mdev);
b411b363
PR
1312 fail:
1313 if (nbc) {
e525fd89
TH
1314 if (nbc->backing_bdev)
1315 blkdev_put(nbc->backing_bdev,
1316 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1317 if (nbc->md_bdev)
1318 blkdev_put(nbc->md_bdev,
1319 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
1320 kfree(nbc);
1321 }
1322 lc_destroy(resync_lru);
1323
1324 reply->ret_code = retcode;
1325 drbd_reconfig_done(mdev);
1326 return 0;
1327}
1328
82f59cc6
LE
1329/* Detaching the disk is a process in multiple stages. First we need to lock
1330 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1331 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1332 * internal references as well.
1333 * Only then we have finally detached. */
b411b363
PR
1334static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1335 struct drbd_nl_cfg_reply *reply)
1336{
9a0d9d03
LE
1337 enum drbd_ret_code retcode;
1338 int ret;
82f59cc6 1339 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
9a0d9d03
LE
1340 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1341 /* D_FAILED will transition to DISKLESS. */
1342 ret = wait_event_interruptible(mdev->misc_wait,
1343 mdev->state.disk != D_FAILED);
82f59cc6 1344 drbd_resume_io(mdev);
9b2f61ae 1345 if ((int)retcode == (int)SS_IS_DISKLESS)
9a0d9d03
LE
1346 retcode = SS_NOTHING_TO_DO;
1347 if (ret)
1348 retcode = ERR_INTR;
1349 reply->ret_code = retcode;
b411b363
PR
1350 return 0;
1351}
1352
1353static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1354 struct drbd_nl_cfg_reply *reply)
1355{
bb3bfe96 1356 int i;
116676ca 1357 enum drbd_ret_code retcode;
b411b363
PR
1358 struct net_conf *new_conf = NULL;
1359 struct crypto_hash *tfm = NULL;
1360 struct crypto_hash *integrity_w_tfm = NULL;
1361 struct crypto_hash *integrity_r_tfm = NULL;
b411b363
PR
1362 struct drbd_conf *odev;
1363 char hmac_name[CRYPTO_MAX_ALG_NAME];
1364 void *int_dig_out = NULL;
1365 void *int_dig_in = NULL;
1366 void *int_dig_vv = NULL;
1367 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1368
1369 drbd_reconfig_start(mdev);
1370
1371 if (mdev->state.conn > C_STANDALONE) {
1372 retcode = ERR_NET_CONFIGURED;
1373 goto fail;
1374 }
1375
1376 /* allocation not in the IO path, cqueue thread context */
2db4e42e 1377 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
b411b363
PR
1378 if (!new_conf) {
1379 retcode = ERR_NOMEM;
1380 goto fail;
1381 }
1382
b411b363
PR
1383 new_conf->timeout = DRBD_TIMEOUT_DEF;
1384 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1385 new_conf->ping_int = DRBD_PING_INT_DEF;
1386 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1387 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1388 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1389 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1390 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1391 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1392 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1393 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1394 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1395 new_conf->want_lose = 0;
1396 new_conf->two_primaries = 0;
1397 new_conf->wire_protocol = DRBD_PROT_C;
1398 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1399 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
422028b1
PR
1400 new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
1401 new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
b411b363
PR
1402
1403 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1404 retcode = ERR_MANDATORY_TAG;
1405 goto fail;
1406 }
1407
1408 if (new_conf->two_primaries
1409 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1410 retcode = ERR_NOT_PROTO_C;
1411 goto fail;
47ff2d0a
PR
1412 }
1413
1414 if (get_ldev(mdev)) {
1415 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1416 put_ldev(mdev);
1417 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1418 retcode = ERR_STONITH_AND_PROT_A;
1419 goto fail;
1420 }
1421 }
b411b363 1422
422028b1
PR
1423 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1424 retcode = ERR_CONG_NOT_PROTO_A;
1425 goto fail;
1426 }
1427
b411b363
PR
1428 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1429 retcode = ERR_DISCARD;
1430 goto fail;
1431 }
1432
1433 retcode = NO_ERROR;
1434
1435 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1436 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1437 for (i = 0; i < minor_count; i++) {
1438 odev = minor_to_mdev(i);
1439 if (!odev || odev == mdev)
1440 continue;
b2fb6dbe 1441 if (get_net_conf(odev->tconn)) {
89e58e75
PR
1442 taken_addr = (struct sockaddr *)&odev->tconn->net_conf->my_addr;
1443 if (new_conf->my_addr_len == odev->tconn->net_conf->my_addr_len &&
b411b363
PR
1444 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1445 retcode = ERR_LOCAL_ADDR;
1446
89e58e75
PR
1447 taken_addr = (struct sockaddr *)&odev->tconn->net_conf->peer_addr;
1448 if (new_conf->peer_addr_len == odev->tconn->net_conf->peer_addr_len &&
b411b363
PR
1449 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1450 retcode = ERR_PEER_ADDR;
1451
b2fb6dbe 1452 put_net_conf(odev->tconn);
b411b363
PR
1453 if (retcode != NO_ERROR)
1454 goto fail;
1455 }
1456 }
1457
1458 if (new_conf->cram_hmac_alg[0] != 0) {
1459 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1460 new_conf->cram_hmac_alg);
1461 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1462 if (IS_ERR(tfm)) {
1463 tfm = NULL;
1464 retcode = ERR_AUTH_ALG;
1465 goto fail;
1466 }
1467
0798219f 1468 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
b411b363
PR
1469 retcode = ERR_AUTH_ALG_ND;
1470 goto fail;
1471 }
1472 }
1473
1474 if (new_conf->integrity_alg[0]) {
1475 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1476 if (IS_ERR(integrity_w_tfm)) {
1477 integrity_w_tfm = NULL;
1478 retcode=ERR_INTEGRITY_ALG;
1479 goto fail;
1480 }
1481
1482 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1483 retcode=ERR_INTEGRITY_ALG_ND;
1484 goto fail;
1485 }
1486
1487 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1488 if (IS_ERR(integrity_r_tfm)) {
1489 integrity_r_tfm = NULL;
1490 retcode=ERR_INTEGRITY_ALG;
1491 goto fail;
1492 }
1493 }
1494
b411b363
PR
1495 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1496
1497 if (integrity_w_tfm) {
1498 i = crypto_hash_digestsize(integrity_w_tfm);
1499 int_dig_out = kmalloc(i, GFP_KERNEL);
1500 if (!int_dig_out) {
1501 retcode = ERR_NOMEM;
1502 goto fail;
1503 }
1504 int_dig_in = kmalloc(i, GFP_KERNEL);
1505 if (!int_dig_in) {
1506 retcode = ERR_NOMEM;
1507 goto fail;
1508 }
1509 int_dig_vv = kmalloc(i, GFP_KERNEL);
1510 if (!int_dig_vv) {
1511 retcode = ERR_NOMEM;
1512 goto fail;
1513 }
1514 }
1515
1516 if (!mdev->bitmap) {
1517 if(drbd_bm_init(mdev)) {
1518 retcode = ERR_NOMEM;
1519 goto fail;
1520 }
1521 }
1522
f70b3511 1523 drbd_flush_workqueue(mdev);
b411b363 1524 spin_lock_irq(&mdev->req_lock);
89e58e75 1525 if (mdev->tconn->net_conf != NULL) {
b411b363
PR
1526 retcode = ERR_NET_CONFIGURED;
1527 spin_unlock_irq(&mdev->req_lock);
1528 goto fail;
1529 }
89e58e75 1530 mdev->tconn->net_conf = new_conf;
b411b363
PR
1531
1532 mdev->send_cnt = 0;
1533 mdev->recv_cnt = 0;
1534
b411b363
PR
1535 crypto_free_hash(mdev->cram_hmac_tfm);
1536 mdev->cram_hmac_tfm = tfm;
1537
1538 crypto_free_hash(mdev->integrity_w_tfm);
1539 mdev->integrity_w_tfm = integrity_w_tfm;
1540
1541 crypto_free_hash(mdev->integrity_r_tfm);
1542 mdev->integrity_r_tfm = integrity_r_tfm;
1543
1544 kfree(mdev->int_dig_out);
1545 kfree(mdev->int_dig_in);
1546 kfree(mdev->int_dig_vv);
1547 mdev->int_dig_out=int_dig_out;
1548 mdev->int_dig_in=int_dig_in;
1549 mdev->int_dig_vv=int_dig_vv;
f70b3511 1550 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
b411b363
PR
1551 spin_unlock_irq(&mdev->req_lock);
1552
b411b363
PR
1553 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1554 reply->ret_code = retcode;
1555 drbd_reconfig_done(mdev);
1556 return 0;
1557
1558fail:
1559 kfree(int_dig_out);
1560 kfree(int_dig_in);
1561 kfree(int_dig_vv);
1562 crypto_free_hash(tfm);
1563 crypto_free_hash(integrity_w_tfm);
1564 crypto_free_hash(integrity_r_tfm);
b411b363
PR
1565 kfree(new_conf);
1566
1567 reply->ret_code = retcode;
1568 drbd_reconfig_done(mdev);
1569 return 0;
1570}
1571
1572static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1573 struct drbd_nl_cfg_reply *reply)
1574{
1575 int retcode;
2561b9c1
PR
1576 struct disconnect dc;
1577
1578 memset(&dc, 0, sizeof(struct disconnect));
1579 if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
1580 retcode = ERR_MANDATORY_TAG;
1581 goto fail;
1582 }
1583
1584 if (dc.force) {
1585 spin_lock_irq(&mdev->req_lock);
1586 if (mdev->state.conn >= C_WF_CONNECTION)
1587 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
1588 spin_unlock_irq(&mdev->req_lock);
1589 goto done;
1590 }
b411b363
PR
1591
1592 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1593
1594 if (retcode == SS_NOTHING_TO_DO)
1595 goto done;
1596 else if (retcode == SS_ALREADY_STANDALONE)
1597 goto done;
1598 else if (retcode == SS_PRIMARY_NOP) {
1599 /* Our statche checking code wants to see the peer outdated. */
1600 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1601 pdsk, D_OUTDATED));
1602 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1603 /* The peer probably wants to see us outdated. */
1604 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1605 disk, D_OUTDATED),
1606 CS_ORDERED);
1607 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1608 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1609 retcode = SS_SUCCESS;
1610 }
1611 }
1612
1613 if (retcode < SS_SUCCESS)
1614 goto fail;
1615
1616 if (wait_event_interruptible(mdev->state_wait,
1617 mdev->state.conn != C_DISCONNECTING)) {
1618 /* Do not test for mdev->state.conn == C_STANDALONE, since
1619 someone else might connect us in the mean time! */
1620 retcode = ERR_INTR;
1621 goto fail;
1622 }
1623
1624 done:
1625 retcode = NO_ERROR;
1626 fail:
1627 drbd_md_sync(mdev);
1628 reply->ret_code = retcode;
1629 return 0;
1630}
1631
1632void resync_after_online_grow(struct drbd_conf *mdev)
1633{
1634 int iass; /* I am sync source */
1635
1636 dev_info(DEV, "Resync of new storage after online grow\n");
1637 if (mdev->state.role != mdev->state.peer)
1638 iass = (mdev->state.role == R_PRIMARY);
1639 else
1640 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1641
1642 if (iass)
1643 drbd_start_resync(mdev, C_SYNC_SOURCE);
1644 else
1645 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1646}
1647
1648static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1649 struct drbd_nl_cfg_reply *reply)
1650{
1651 struct resize rs;
1652 int retcode = NO_ERROR;
b411b363 1653 enum determine_dev_size dd;
6495d2c6 1654 enum dds_flags ddsf;
b411b363
PR
1655
1656 memset(&rs, 0, sizeof(struct resize));
1657 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1658 retcode = ERR_MANDATORY_TAG;
1659 goto fail;
1660 }
1661
1662 if (mdev->state.conn > C_CONNECTED) {
1663 retcode = ERR_RESIZE_RESYNC;
1664 goto fail;
1665 }
1666
1667 if (mdev->state.role == R_SECONDARY &&
1668 mdev->state.peer == R_SECONDARY) {
1669 retcode = ERR_NO_PRIMARY;
1670 goto fail;
1671 }
1672
1673 if (!get_ldev(mdev)) {
1674 retcode = ERR_NO_DISK;
1675 goto fail;
1676 }
1677
6495d2c6
PR
1678 if (rs.no_resync && mdev->agreed_pro_version < 93) {
1679 retcode = ERR_NEED_APV_93;
1680 goto fail;
1681 }
1682
087c2492 1683 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
b411b363 1684 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
b411b363
PR
1685
1686 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
6495d2c6 1687 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
24c4830c 1688 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
1689 drbd_md_sync(mdev);
1690 put_ldev(mdev);
1691 if (dd == dev_size_error) {
1692 retcode = ERR_NOMEM_BITMAP;
1693 goto fail;
1694 }
1695
087c2492 1696 if (mdev->state.conn == C_CONNECTED) {
b411b363
PR
1697 if (dd == grew)
1698 set_bit(RESIZE_PENDING, &mdev->flags);
1699
1700 drbd_send_uuids(mdev);
6495d2c6 1701 drbd_send_sizes(mdev, 1, ddsf);
b411b363
PR
1702 }
1703
1704 fail:
1705 reply->ret_code = retcode;
1706 return 0;
1707}
1708
1709static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1710 struct drbd_nl_cfg_reply *reply)
1711{
1712 int retcode = NO_ERROR;
1713 int err;
1714 int ovr; /* online verify running */
1715 int rsr; /* re-sync running */
1716 struct crypto_hash *verify_tfm = NULL;
1717 struct crypto_hash *csums_tfm = NULL;
1718 struct syncer_conf sc;
1719 cpumask_var_t new_cpu_mask;
778f271d
PR
1720 int *rs_plan_s = NULL;
1721 int fifo_size;
b411b363
PR
1722
1723 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1724 retcode = ERR_NOMEM;
1725 goto fail;
1726 }
1727
1728 if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1729 memset(&sc, 0, sizeof(struct syncer_conf));
1730 sc.rate = DRBD_RATE_DEF;
1731 sc.after = DRBD_AFTER_DEF;
1732 sc.al_extents = DRBD_AL_EXTENTS_DEF;
265be2d0 1733 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
9a31d716
PR
1734 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1735 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1736 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
0f0601f4
LE
1737 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1738 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
b411b363
PR
1739 } else
1740 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1741
1742 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1743 retcode = ERR_MANDATORY_TAG;
1744 goto fail;
1745 }
1746
1747 /* re-sync running */
1748 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1749 mdev->state.conn == C_SYNC_TARGET ||
1750 mdev->state.conn == C_PAUSED_SYNC_S ||
1751 mdev->state.conn == C_PAUSED_SYNC_T );
1752
1753 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1754 retcode = ERR_CSUMS_RESYNC_RUNNING;
1755 goto fail;
1756 }
1757
1758 if (!rsr && sc.csums_alg[0]) {
1759 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1760 if (IS_ERR(csums_tfm)) {
1761 csums_tfm = NULL;
1762 retcode = ERR_CSUMS_ALG;
1763 goto fail;
1764 }
1765
1766 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1767 retcode = ERR_CSUMS_ALG_ND;
1768 goto fail;
1769 }
1770 }
1771
1772 /* online verify running */
1773 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1774
1775 if (ovr) {
1776 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1777 retcode = ERR_VERIFY_RUNNING;
1778 goto fail;
1779 }
1780 }
1781
1782 if (!ovr && sc.verify_alg[0]) {
1783 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1784 if (IS_ERR(verify_tfm)) {
1785 verify_tfm = NULL;
1786 retcode = ERR_VERIFY_ALG;
1787 goto fail;
1788 }
1789
1790 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1791 retcode = ERR_VERIFY_ALG_ND;
1792 goto fail;
1793 }
1794 }
1795
1796 /* silently ignore cpu mask on UP kernel */
1797 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1798 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1799 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1800 if (err) {
1801 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1802 retcode = ERR_CPU_MASK_PARSE;
1803 goto fail;
1804 }
1805 }
1806
841ce241
AG
1807 if (!expect(sc.rate >= 1))
1808 sc.rate = 1;
1809 if (!expect(sc.al_extents >= 7))
1810 sc.al_extents = 127; /* arbitrary minimum */
b411b363
PR
1811#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1812 if (sc.al_extents > AL_MAX) {
1813 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1814 sc.al_extents = AL_MAX;
1815 }
1816#undef AL_MAX
1817
ef50a3e3
LE
1818 /* to avoid spurious errors when configuring minors before configuring
1819 * the minors they depend on: if necessary, first create the minor we
1820 * depend on */
1821 if (sc.after >= 0)
1822 ensure_mdev(sc.after, 1);
1823
b411b363
PR
1824 /* most sanity checks done, try to assign the new sync-after
1825 * dependency. need to hold the global lock in there,
1826 * to avoid a race in the dependency loop check. */
1827 retcode = drbd_alter_sa(mdev, sc.after);
1828 if (retcode != NO_ERROR)
1829 goto fail;
1830
778f271d
PR
1831 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1832 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1833 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1834 if (!rs_plan_s) {
1835 dev_err(DEV, "kmalloc of fifo_buffer failed");
1836 retcode = ERR_NOMEM;
1837 goto fail;
1838 }
1839 }
1840
b411b363
PR
1841 /* ok, assign the rest of it as well.
1842 * lock against receive_SyncParam() */
1843 spin_lock(&mdev->peer_seq_lock);
1844 mdev->sync_conf = sc;
1845
1846 if (!rsr) {
1847 crypto_free_hash(mdev->csums_tfm);
1848 mdev->csums_tfm = csums_tfm;
1849 csums_tfm = NULL;
1850 }
1851
1852 if (!ovr) {
1853 crypto_free_hash(mdev->verify_tfm);
1854 mdev->verify_tfm = verify_tfm;
1855 verify_tfm = NULL;
1856 }
778f271d
PR
1857
1858 if (fifo_size != mdev->rs_plan_s.size) {
1859 kfree(mdev->rs_plan_s.values);
1860 mdev->rs_plan_s.values = rs_plan_s;
1861 mdev->rs_plan_s.size = fifo_size;
1862 mdev->rs_planed = 0;
1863 rs_plan_s = NULL;
1864 }
1865
b411b363
PR
1866 spin_unlock(&mdev->peer_seq_lock);
1867
1868 if (get_ldev(mdev)) {
1869 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1870 drbd_al_shrink(mdev);
1871 err = drbd_check_al_size(mdev);
1872 lc_unlock(mdev->act_log);
1873 wake_up(&mdev->al_wait);
1874
1875 put_ldev(mdev);
1876 drbd_md_sync(mdev);
1877
1878 if (err) {
1879 retcode = ERR_NOMEM;
1880 goto fail;
1881 }
1882 }
1883
1884 if (mdev->state.conn >= C_CONNECTED)
1885 drbd_send_sync_param(mdev, &sc);
1886
1887 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1888 cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1889 drbd_calc_cpu_mask(mdev);
e6b3ea83
PR
1890 mdev->tconn->receiver.reset_cpu_mask = 1;
1891 mdev->tconn->asender.reset_cpu_mask = 1;
1892 mdev->tconn->worker.reset_cpu_mask = 1;
b411b363
PR
1893 }
1894
1895 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1896fail:
778f271d 1897 kfree(rs_plan_s);
b411b363
PR
1898 free_cpumask_var(new_cpu_mask);
1899 crypto_free_hash(csums_tfm);
1900 crypto_free_hash(verify_tfm);
1901 reply->ret_code = retcode;
1902 return 0;
1903}
1904
1905static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1906 struct drbd_nl_cfg_reply *reply)
1907{
1908 int retcode;
1909
194bfb32
LE
1910 /* If there is still bitmap IO pending, probably because of a previous
1911 * resync just being finished, wait for it before requesting a new resync. */
1912 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
1913
b411b363
PR
1914 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1915
1916 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1917 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1918
1919 while (retcode == SS_NEED_CONNECTION) {
1920 spin_lock_irq(&mdev->req_lock);
1921 if (mdev->state.conn < C_CONNECTED)
1922 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1923 spin_unlock_irq(&mdev->req_lock);
1924
1925 if (retcode != SS_NEED_CONNECTION)
1926 break;
1927
1928 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1929 }
1930
1931 reply->ret_code = retcode;
1932 return 0;
1933}
1934
0778286a
PR
1935static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
1936{
1937 int rv;
1938
1939 rv = drbd_bmio_set_n_write(mdev);
1940 drbd_suspend_al(mdev);
1941 return rv;
1942}
1943
b411b363
PR
1944static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1945 struct drbd_nl_cfg_reply *reply)
1946{
0778286a 1947 int retcode;
b411b363 1948
194bfb32
LE
1949 /* If there is still bitmap IO pending, probably because of a previous
1950 * resync just being finished, wait for it before requesting a new resync. */
1951 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
1952
0778286a
PR
1953 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
1954
1955 if (retcode < SS_SUCCESS) {
1956 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
1957 /* The peer will get a resync upon connect anyways. Just make that
1958 into a full resync. */
1959 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
1960 if (retcode >= SS_SUCCESS) {
0778286a 1961 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
20ceb2b2
LE
1962 "set_n_write from invalidate_peer",
1963 BM_LOCKED_SET_ALLOWED))
0778286a
PR
1964 retcode = ERR_IO_MD_DISK;
1965 }
1966 } else
1967 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1968 }
b411b363 1969
0778286a 1970 reply->ret_code = retcode;
b411b363
PR
1971 return 0;
1972}
1973
1974static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1975 struct drbd_nl_cfg_reply *reply)
1976{
1977 int retcode = NO_ERROR;
1978
1979 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
1980 retcode = ERR_PAUSE_IS_SET;
1981
1982 reply->ret_code = retcode;
1983 return 0;
1984}
1985
1986static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1987 struct drbd_nl_cfg_reply *reply)
1988{
1989 int retcode = NO_ERROR;
cd88d030 1990 union drbd_state s;
b411b363 1991
cd88d030
PR
1992 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
1993 s = mdev->state;
1994 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
1995 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
1996 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
1997 } else {
1998 retcode = ERR_PAUSE_IS_CLEAR;
1999 }
2000 }
b411b363
PR
2001
2002 reply->ret_code = retcode;
2003 return 0;
2004}
2005
2006static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2007 struct drbd_nl_cfg_reply *reply)
2008{
2009 reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
2010
2011 return 0;
2012}
2013
2014static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2015 struct drbd_nl_cfg_reply *reply)
2016{
43a5182c
PR
2017 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2018 drbd_uuid_new_current(mdev);
2019 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 2020 }
265be2d0 2021 drbd_suspend_io(mdev);
fb22c402 2022 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
265be2d0
PR
2023 if (reply->ret_code == SS_SUCCESS) {
2024 if (mdev->state.conn < C_CONNECTED)
2025 tl_clear(mdev);
2026 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
8554df1c 2027 tl_restart(mdev, FAIL_FROZEN_DISK_IO);
265be2d0
PR
2028 }
2029 drbd_resume_io(mdev);
2030
b411b363
PR
2031 return 0;
2032}
2033
2034static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2035 struct drbd_nl_cfg_reply *reply)
2036{
2037 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
2038 return 0;
2039}
2040
2041static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2042 struct drbd_nl_cfg_reply *reply)
2043{
2044 unsigned short *tl;
2045
2046 tl = reply->tag_list;
2047
2048 if (get_ldev(mdev)) {
2049 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
2050 put_ldev(mdev);
2051 }
2052
b2fb6dbe 2053 if (get_net_conf(mdev->tconn)) {
89e58e75 2054 tl = net_conf_to_tags(mdev, mdev->tconn->net_conf, tl);
b2fb6dbe 2055 put_net_conf(mdev->tconn);
b411b363
PR
2056 }
2057 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
2058
2059 put_unaligned(TT_END, tl++); /* Close the tag list */
2060
2061 return (int)((char *)tl - (char *)reply->tag_list);
2062}
2063
2064static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2065 struct drbd_nl_cfg_reply *reply)
2066{
2067 unsigned short *tl = reply->tag_list;
2068 union drbd_state s = mdev->state;
2069 unsigned long rs_left;
2070 unsigned int res;
2071
2072 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
2073
2074 /* no local ref, no bitmap, no syncer progress. */
2075 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
2076 if (get_ldev(mdev)) {
2077 drbd_get_syncer_progress(mdev, &rs_left, &res);
2078 tl = tl_add_int(tl, T_sync_progress, &res);
2079 put_ldev(mdev);
2080 }
2081 }
2082 put_unaligned(TT_END, tl++); /* Close the tag list */
2083
2084 return (int)((char *)tl - (char *)reply->tag_list);
2085}
2086
2087static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2088 struct drbd_nl_cfg_reply *reply)
2089{
2090 unsigned short *tl;
2091
2092 tl = reply->tag_list;
2093
2094 if (get_ldev(mdev)) {
2095 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
2096 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
2097 put_ldev(mdev);
2098 }
2099 put_unaligned(TT_END, tl++); /* Close the tag list */
2100
2101 return (int)((char *)tl - (char *)reply->tag_list);
2102}
2103
2104/**
2105 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
2106 * @mdev: DRBD device.
2107 * @nlp: Netlink/connector packet from drbdsetup
2108 * @reply: Reply packet for drbdsetup
2109 */
2110static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2111 struct drbd_nl_cfg_reply *reply)
2112{
2113 unsigned short *tl;
2114 char rv;
2115
2116 tl = reply->tag_list;
2117
2118 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2119 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
2120
2121 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
2122 put_unaligned(TT_END, tl++); /* Close the tag list */
2123
2124 return (int)((char *)tl - (char *)reply->tag_list);
2125}
2126
2127static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2128 struct drbd_nl_cfg_reply *reply)
2129{
2130 /* default to resume from last known position, if possible */
2131 struct start_ov args =
2132 { .start_sector = mdev->ov_start_sector };
2133
2134 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
2135 reply->ret_code = ERR_MANDATORY_TAG;
2136 return 0;
2137 }
873b0d5f
LE
2138
2139 /* If there is still bitmap IO pending, e.g. previous resync or verify
2140 * just being finished, wait for it before requesting a new resync. */
2141 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2142
b411b363
PR
2143 /* w_make_ov_request expects position to be aligned */
2144 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
2145 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2146 return 0;
2147}
2148
2149
2150static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2151 struct drbd_nl_cfg_reply *reply)
2152{
2153 int retcode = NO_ERROR;
2154 int skip_initial_sync = 0;
2155 int err;
2156
2157 struct new_c_uuid args;
2158
2159 memset(&args, 0, sizeof(struct new_c_uuid));
2160 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
2161 reply->ret_code = ERR_MANDATORY_TAG;
2162 return 0;
2163 }
2164
2165 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
2166
2167 if (!get_ldev(mdev)) {
2168 retcode = ERR_NO_DISK;
2169 goto out;
2170 }
2171
2172 /* this is "skip initial sync", assume to be clean */
2173 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
2174 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2175 dev_info(DEV, "Preparing to skip initial sync\n");
2176 skip_initial_sync = 1;
2177 } else if (mdev->state.conn != C_STANDALONE) {
2178 retcode = ERR_CONNECTED;
2179 goto out_dec;
2180 }
2181
2182 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2183 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2184
2185 if (args.clear_bm) {
20ceb2b2
LE
2186 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2187 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
b411b363
PR
2188 if (err) {
2189 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2190 retcode = ERR_IO_MD_DISK;
2191 }
2192 if (skip_initial_sync) {
2193 drbd_send_uuids_skip_initial_sync(mdev);
2194 _drbd_uuid_set(mdev, UI_BITMAP, 0);
62b0da3a 2195 drbd_print_uuids(mdev, "cleared bitmap UUID");
b411b363
PR
2196 spin_lock_irq(&mdev->req_lock);
2197 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2198 CS_VERBOSE, NULL);
2199 spin_unlock_irq(&mdev->req_lock);
2200 }
2201 }
2202
2203 drbd_md_sync(mdev);
2204out_dec:
2205 put_ldev(mdev);
2206out:
2207 mutex_unlock(&mdev->state_mutex);
2208
2209 reply->ret_code = retcode;
2210 return 0;
2211}
2212
b411b363
PR
2213struct cn_handler_struct {
2214 int (*function)(struct drbd_conf *,
2215 struct drbd_nl_cfg_req *,
2216 struct drbd_nl_cfg_reply *);
2217 int reply_body_size;
2218};
2219
2220static struct cn_handler_struct cnd_table[] = {
2221 [ P_primary ] = { &drbd_nl_primary, 0 },
2222 [ P_secondary ] = { &drbd_nl_secondary, 0 },
2223 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
2224 [ P_detach ] = { &drbd_nl_detach, 0 },
2225 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
2226 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
2227 [ P_resize ] = { &drbd_nl_resize, 0 },
2228 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
2229 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
2230 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
2231 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
2232 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
2233 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
2234 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
2235 [ P_outdate ] = { &drbd_nl_outdate, 0 },
2236 [ P_get_config ] = { &drbd_nl_get_config,
2237 sizeof(struct syncer_conf_tag_len_struct) +
2238 sizeof(struct disk_conf_tag_len_struct) +
2239 sizeof(struct net_conf_tag_len_struct) },
2240 [ P_get_state ] = { &drbd_nl_get_state,
2241 sizeof(struct get_state_tag_len_struct) +
2242 sizeof(struct sync_progress_tag_len_struct) },
2243 [ P_get_uuids ] = { &drbd_nl_get_uuids,
2244 sizeof(struct get_uuids_tag_len_struct) },
2245 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2246 sizeof(struct get_timeout_flag_tag_len_struct)},
2247 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2248 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2249};
2250
9f5180e5 2251static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
b411b363
PR
2252{
2253 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2254 struct cn_handler_struct *cm;
2255 struct cn_msg *cn_reply;
2256 struct drbd_nl_cfg_reply *reply;
2257 struct drbd_conf *mdev;
2258 int retcode, rr;
2259 int reply_size = sizeof(struct cn_msg)
2260 + sizeof(struct drbd_nl_cfg_reply)
2261 + sizeof(short int);
2262
2263 if (!try_module_get(THIS_MODULE)) {
2264 printk(KERN_ERR "drbd: try_module_get() failed!\n");
2265 return;
2266 }
2267
01a16b21 2268 if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
9f5180e5
PR
2269 retcode = ERR_PERM;
2270 goto fail;
2271 }
2272
ef50a3e3
LE
2273 mdev = ensure_mdev(nlp->drbd_minor,
2274 (nlp->flags & DRBD_NL_CREATE_DEVICE));
b411b363
PR
2275 if (!mdev) {
2276 retcode = ERR_MINOR_INVALID;
2277 goto fail;
2278 }
2279
42ff269d
LE
2280 if (nlp->packet_type >= P_nl_after_last_packet ||
2281 nlp->packet_type == P_return_code_only) {
b411b363
PR
2282 retcode = ERR_PACKET_NR;
2283 goto fail;
2284 }
2285
2286 cm = cnd_table + nlp->packet_type;
2287
2288 /* This may happen if packet number is 0: */
2289 if (cm->function == NULL) {
2290 retcode = ERR_PACKET_NR;
2291 goto fail;
2292 }
2293
2294 reply_size += cm->reply_body_size;
2295
2296 /* allocation not in the IO path, cqueue thread context */
3e3a7766 2297 cn_reply = kzalloc(reply_size, GFP_KERNEL);
b411b363
PR
2298 if (!cn_reply) {
2299 retcode = ERR_NOMEM;
2300 goto fail;
2301 }
2302 reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2303
2304 reply->packet_type =
42ff269d 2305 cm->reply_body_size ? nlp->packet_type : P_return_code_only;
b411b363
PR
2306 reply->minor = nlp->drbd_minor;
2307 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2308 /* reply->tag_list; might be modified by cm->function. */
2309
2310 rr = cm->function(mdev, nlp, reply);
2311
2312 cn_reply->id = req->id;
2313 cn_reply->seq = req->seq;
2314 cn_reply->ack = req->ack + 1;
2315 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2316 cn_reply->flags = 0;
2317
b411b363
PR
2318 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2319 if (rr && rr != -ESRCH)
2320 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2321
2322 kfree(cn_reply);
2323 module_put(THIS_MODULE);
2324 return;
2325 fail:
2326 drbd_nl_send_reply(req, retcode);
2327 module_put(THIS_MODULE);
2328}
2329
2330static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2331
2332static unsigned short *
2333__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2334 unsigned short len, int nul_terminated)
2335{
2336 unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2337 len = (len < l) ? len : l;
2338 put_unaligned(tag, tl++);
2339 put_unaligned(len, tl++);
2340 memcpy(tl, data, len);
2341 tl = (unsigned short*)((char*)tl + len);
2342 if (nul_terminated)
2343 *((char*)tl - 1) = 0;
2344 return tl;
2345}
2346
2347static unsigned short *
2348tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2349{
2350 return __tl_add_blob(tl, tag, data, len, 0);
2351}
2352
2353static unsigned short *
2354tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2355{
2356 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2357}
2358
2359static unsigned short *
2360tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2361{
2362 put_unaligned(tag, tl++);
2363 switch(tag_type(tag)) {
2364 case TT_INTEGER:
2365 put_unaligned(sizeof(int), tl++);
2366 put_unaligned(*(int *)val, (int *)tl);
2367 tl = (unsigned short*)((char*)tl+sizeof(int));
2368 break;
2369 case TT_INT64:
2370 put_unaligned(sizeof(u64), tl++);
2371 put_unaligned(*(u64 *)val, (u64 *)tl);
2372 tl = (unsigned short*)((char*)tl+sizeof(u64));
2373 break;
2374 default:
2375 /* someone did something stupid. */
2376 ;
2377 }
2378 return tl;
2379}
2380
2381void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2382{
2383 char buffer[sizeof(struct cn_msg)+
2384 sizeof(struct drbd_nl_cfg_reply)+
2385 sizeof(struct get_state_tag_len_struct)+
2386 sizeof(short int)];
2387 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2388 struct drbd_nl_cfg_reply *reply =
2389 (struct drbd_nl_cfg_reply *)cn_reply->data;
2390 unsigned short *tl = reply->tag_list;
2391
2392 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2393
2394 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2395
2396 put_unaligned(TT_END, tl++); /* Close the tag list */
2397
2398 cn_reply->id.idx = CN_IDX_DRBD;
2399 cn_reply->id.val = CN_VAL_DRBD;
2400
2401 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2402 cn_reply->ack = 0; /* not used here. */
2403 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2404 (int)((char *)tl - (char *)reply->tag_list);
2405 cn_reply->flags = 0;
2406
2407 reply->packet_type = P_get_state;
2408 reply->minor = mdev_to_minor(mdev);
2409 reply->ret_code = NO_ERROR;
2410
b411b363
PR
2411 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2412}
2413
2414void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2415{
2416 char buffer[sizeof(struct cn_msg)+
2417 sizeof(struct drbd_nl_cfg_reply)+
2418 sizeof(struct call_helper_tag_len_struct)+
2419 sizeof(short int)];
2420 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2421 struct drbd_nl_cfg_reply *reply =
2422 (struct drbd_nl_cfg_reply *)cn_reply->data;
2423 unsigned short *tl = reply->tag_list;
2424
2425 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2426
2427 tl = tl_add_str(tl, T_helper, helper_name);
2428 put_unaligned(TT_END, tl++); /* Close the tag list */
2429
2430 cn_reply->id.idx = CN_IDX_DRBD;
2431 cn_reply->id.val = CN_VAL_DRBD;
2432
2433 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2434 cn_reply->ack = 0; /* not used here. */
2435 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2436 (int)((char *)tl - (char *)reply->tag_list);
2437 cn_reply->flags = 0;
2438
2439 reply->packet_type = P_call_helper;
2440 reply->minor = mdev_to_minor(mdev);
2441 reply->ret_code = NO_ERROR;
2442
b411b363
PR
2443 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2444}
2445
2446void drbd_bcast_ee(struct drbd_conf *mdev,
2447 const char *reason, const int dgs,
2448 const char* seen_hash, const char* calc_hash,
2449 const struct drbd_epoch_entry* e)
2450{
2451 struct cn_msg *cn_reply;
2452 struct drbd_nl_cfg_reply *reply;
b411b363 2453 unsigned short *tl;
45bb912b
LE
2454 struct page *page;
2455 unsigned len;
b411b363
PR
2456
2457 if (!e)
2458 return;
2459 if (!reason || !reason[0])
2460 return;
2461
2462 /* apparently we have to memcpy twice, first to prepare the data for the
2463 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2464 * netlink skb. */
2465 /* receiver thread context, which is not in the writeout path (of this node),
2466 * but may be in the writeout path of the _other_ node.
2467 * GFP_NOIO to avoid potential "distributed deadlock". */
3e3a7766 2468 cn_reply = kzalloc(
b411b363
PR
2469 sizeof(struct cn_msg)+
2470 sizeof(struct drbd_nl_cfg_reply)+
2471 sizeof(struct dump_ee_tag_len_struct)+
2472 sizeof(short int),
2473 GFP_NOIO);
2474
2475 if (!cn_reply) {
2476 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
010f6e67 2477 (unsigned long long)e->i.sector, e->i.size);
b411b363
PR
2478 return;
2479 }
2480
2481 reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2482 tl = reply->tag_list;
2483
2484 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2485 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2486 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
010f6e67 2487 tl = tl_add_int(tl, T_ee_sector, &e->i.sector);
b411b363
PR
2488 tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2489
3129b1b9 2490 /* dump the first 32k */
010f6e67 2491 len = min_t(unsigned, e->i.size, 32 << 10);
b411b363 2492 put_unaligned(T_ee_data, tl++);
3129b1b9 2493 put_unaligned(len, tl++);
b411b363 2494
45bb912b
LE
2495 page = e->pages;
2496 page_chain_for_each(page) {
2497 void *d = kmap_atomic(page, KM_USER0);
2498 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2499 memcpy(tl, d, l);
2500 kunmap_atomic(d, KM_USER0);
2501 tl = (unsigned short*)((char*)tl + l);
2502 len -= l;
3129b1b9
LE
2503 if (len == 0)
2504 break;
b411b363
PR
2505 }
2506 put_unaligned(TT_END, tl++); /* Close the tag list */
2507
2508 cn_reply->id.idx = CN_IDX_DRBD;
2509 cn_reply->id.val = CN_VAL_DRBD;
2510
2511 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2512 cn_reply->ack = 0; // not used here.
2513 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2514 (int)((char*)tl - (char*)reply->tag_list);
2515 cn_reply->flags = 0;
2516
2517 reply->packet_type = P_dump_ee;
2518 reply->minor = mdev_to_minor(mdev);
2519 reply->ret_code = NO_ERROR;
2520
b411b363
PR
2521 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2522 kfree(cn_reply);
2523}
2524
2525void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2526{
2527 char buffer[sizeof(struct cn_msg)+
2528 sizeof(struct drbd_nl_cfg_reply)+
2529 sizeof(struct sync_progress_tag_len_struct)+
2530 sizeof(short int)];
2531 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2532 struct drbd_nl_cfg_reply *reply =
2533 (struct drbd_nl_cfg_reply *)cn_reply->data;
2534 unsigned short *tl = reply->tag_list;
2535 unsigned long rs_left;
2536 unsigned int res;
2537
2538 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2539 if (!get_ldev(mdev))
2540 return;
2541 drbd_get_syncer_progress(mdev, &rs_left, &res);
2542 put_ldev(mdev);
2543
2544 tl = tl_add_int(tl, T_sync_progress, &res);
2545 put_unaligned(TT_END, tl++); /* Close the tag list */
2546
2547 cn_reply->id.idx = CN_IDX_DRBD;
2548 cn_reply->id.val = CN_VAL_DRBD;
2549
2550 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2551 cn_reply->ack = 0; /* not used here. */
2552 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2553 (int)((char *)tl - (char *)reply->tag_list);
2554 cn_reply->flags = 0;
2555
2556 reply->packet_type = P_sync_progress;
2557 reply->minor = mdev_to_minor(mdev);
2558 reply->ret_code = NO_ERROR;
2559
b411b363
PR
2560 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2561}
2562
2563int __init drbd_nl_init(void)
2564{
2565 static struct cb_id cn_id_drbd;
2566 int err, try=10;
2567
2568 cn_id_drbd.val = CN_VAL_DRBD;
2569 do {
2570 cn_id_drbd.idx = cn_idx;
2571 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2572 if (!err)
2573 break;
2574 cn_idx = (cn_idx + CN_IDX_STEP);
2575 } while (try--);
2576
2577 if (err) {
2578 printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2579 return err;
2580 }
2581
2582 return 0;
2583}
2584
2585void drbd_nl_cleanup(void)
2586{
2587 static struct cb_id cn_id_drbd;
2588
2589 cn_id_drbd.idx = cn_idx;
2590 cn_id_drbd.val = CN_VAL_DRBD;
2591
2592 cn_del_callback(&cn_id_drbd);
2593}
2594
2595void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2596{
2597 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2598 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2599 struct drbd_nl_cfg_reply *reply =
2600 (struct drbd_nl_cfg_reply *)cn_reply->data;
2601 int rr;
2602
3e3a7766 2603 memset(buffer, 0, sizeof(buffer));
b411b363
PR
2604 cn_reply->id = req->id;
2605
2606 cn_reply->seq = req->seq;
2607 cn_reply->ack = req->ack + 1;
2608 cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2609 cn_reply->flags = 0;
2610
42ff269d 2611 reply->packet_type = P_return_code_only;
b411b363
PR
2612 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2613 reply->ret_code = ret_code;
2614
b411b363
PR
2615 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2616 if (rr && rr != -ESRCH)
2617 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2618}
2619