]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/drbd/drbd_nl.c
drbd: Also need to check for DRBD_GENLA_F_MANDATORY flags before nla_find_nested()
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_nl.c
CommitLineData
b411b363
PR
1/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363
PR
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
b411b363
PR
32#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
265be2d0 35#include "drbd_req.h"
b411b363
PR
36#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
b411b363 38#include <linux/drbd_limits.h>
87f7be4c 39#include <linux/kthread.h>
b411b363 40
3b98c0c2
LE
41#include <net/genetlink.h>
42
43/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
789c1b62
AG
50int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
85f75dd7 52int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
53
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
f399002e 56int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
57int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
f399002e 59int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
60int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
f399002e 71int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
72int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
78#include <linux/genl_magic_func.h>
79
80/* used blkdev_get_by_path, to claim our meta data device(s) */
b411b363
PR
81static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
3b98c0c2
LE
83/* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
86 */
87static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
89 unsigned int minor;
90 /* assigned from request attributes, if present */
91 unsigned int volume;
92#define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
7c3063cc 95 char *resource_name;
3b98c0c2
LE
96
97 /* reply buffer */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
104} adm_ctx;
105
106static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107{
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
b411b363 111}
3b98c0c2
LE
112
113/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
8432b314 115int drbd_msg_put_info(const char *info)
3b98c0c2
LE
116{
117 struct sk_buff *skb = adm_ctx.reply_skb;
118 struct nlattr *nla;
119 int err = -EMSGSIZE;
120
121 if (!info || !info[0])
122 return 0;
123
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125 if (!nla)
126 return err;
127
128 err = nla_put_string(skb, T_info_text, info);
129 if (err) {
130 nla_nest_cancel(skb, nla);
131 return err;
132 } else
133 nla_nest_end(skb, nla);
134 return 0;
b411b363
PR
135}
136
3b98c0c2
LE
137/* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
141 */
142#define DRBD_ADM_NEED_MINOR 1
143#define DRBD_ADM_NEED_CONN 2
144static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145 unsigned flags)
146{
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
149 int err;
150
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
156 return -EPERM;
157
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1e2a2551
AG
159 if (!adm_ctx.reply_skb) {
160 err = -ENOMEM;
3b98c0c2 161 goto fail;
1e2a2551 162 }
3b98c0c2
LE
163
164 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
165 info, &drbd_genl_family, 0, cmd);
166 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
167 * but anyways */
1e2a2551
AG
168 if (!adm_ctx.reply_dh) {
169 err = -ENOMEM;
3b98c0c2 170 goto fail;
1e2a2551 171 }
3b98c0c2
LE
172
173 adm_ctx.reply_dh->minor = d_in->minor;
174 adm_ctx.reply_dh->ret_code = NO_ERROR;
175
176 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
177 struct nlattr *nla;
178 /* parse and validate only */
f399002e 179 err = drbd_cfg_context_from_attrs(NULL, info);
3b98c0c2
LE
180 if (err)
181 goto fail;
182
183 /* It was present, and valid,
184 * copy it over to the reply skb. */
185 err = nla_put_nohdr(adm_ctx.reply_skb,
186 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
187 info->attrs[DRBD_NLA_CFG_CONTEXT]);
188 if (err)
189 goto fail;
190
191 /* and assign stuff to the global adm_ctx */
192 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
193 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
7c3063cc 194 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
3b98c0c2 195 if (nla)
7c3063cc 196 adm_ctx.resource_name = nla_data(nla);
3b98c0c2
LE
197 } else
198 adm_ctx.volume = VOLUME_UNSPECIFIED;
199
200 adm_ctx.minor = d_in->minor;
201 adm_ctx.mdev = minor_to_mdev(d_in->minor);
7c3063cc 202 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
3b98c0c2
LE
203
204 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
205 drbd_msg_put_info("unknown minor");
206 return ERR_MINOR_INVALID;
207 }
208 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
209 drbd_msg_put_info("unknown connection");
210 return ERR_INVALID_REQUEST;
211 }
212
213 /* some more paranoia, if the request was over-determined */
527f4b24
LE
214 if (adm_ctx.mdev && adm_ctx.tconn &&
215 adm_ctx.mdev->tconn != adm_ctx.tconn) {
216 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
7c3063cc
AG
217 adm_ctx.minor, adm_ctx.resource_name,
218 adm_ctx.mdev->tconn->name);
527f4b24
LE
219 drbd_msg_put_info("minor exists in different connection");
220 return ERR_INVALID_REQUEST;
221 }
3b98c0c2
LE
222 if (adm_ctx.mdev &&
223 adm_ctx.volume != VOLUME_UNSPECIFIED &&
224 adm_ctx.volume != adm_ctx.mdev->vnr) {
225 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
226 adm_ctx.minor, adm_ctx.volume,
227 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
527f4b24 228 drbd_msg_put_info("minor exists as different volume");
3b98c0c2
LE
229 return ERR_INVALID_REQUEST;
230 }
0ace9dfa 231
3b98c0c2
LE
232 return NO_ERROR;
233
234fail:
235 nlmsg_free(adm_ctx.reply_skb);
236 adm_ctx.reply_skb = NULL;
1e2a2551 237 return err;
3b98c0c2
LE
238}
239
240static int drbd_adm_finish(struct genl_info *info, int retcode)
241{
242 struct nlattr *nla;
7c3063cc 243 const char *resource_name = NULL;
3b98c0c2 244
0ace9dfa
PR
245 if (adm_ctx.tconn) {
246 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
247 adm_ctx.tconn = NULL;
248 }
249
3b98c0c2
LE
250 if (!adm_ctx.reply_skb)
251 return -ENOMEM;
252
253 adm_ctx.reply_dh->ret_code = retcode;
254
255 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
256 if (nla) {
7c3063cc
AG
257 int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
258 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
259 if (nla && !IS_ERR(nla))
260 resource_name = nla_data(nla);
3b98c0c2
LE
261 }
262
263 drbd_adm_send_reply(adm_ctx.reply_skb, info);
264 return 0;
265}
b411b363 266
6b75dced 267static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
b411b363 268{
6b75dced 269 char *afs;
44ed167d 270 struct net_conf *nc;
b411b363 271
44ed167d
PR
272 rcu_read_lock();
273 nc = rcu_dereference(tconn->net_conf);
274 if (nc) {
275 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
b411b363
PR
276 case AF_INET6:
277 afs = "ipv6";
6b75dced 278 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
44ed167d 279 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
b411b363
PR
280 break;
281 case AF_INET:
282 afs = "ipv4";
6b75dced 283 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
44ed167d 284 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
b411b363
PR
285 break;
286 default:
287 afs = "ssocks";
6b75dced 288 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
44ed167d 289 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
b411b363 290 }
6b75dced 291 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
b411b363 292 }
44ed167d 293 rcu_read_unlock();
6b75dced
PR
294}
295
296int drbd_khelper(struct drbd_conf *mdev, char *cmd)
297{
298 char *envp[] = { "HOME=/",
299 "TERM=linux",
300 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
301 (char[20]) { }, /* address family */
302 (char[60]) { }, /* address */
303 NULL };
304 char mb[12];
305 char *argv[] = {usermode_helper, cmd, mb, NULL };
306 struct sib_info sib;
307 int ret;
308
309 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
310 setup_khelper_env(mdev->tconn, envp);
b411b363 311
1090c056
LE
312 /* The helper may take some time.
313 * write out any unsynced meta data changes now */
314 drbd_md_sync(mdev);
315
b411b363 316 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
3b98c0c2
LE
317 sib.sib_reason = SIB_HELPER_PRE;
318 sib.helper_name = cmd;
319 drbd_bcast_event(mdev, &sib);
b411b363
PR
320 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
321 if (ret)
322 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
323 usermode_helper, cmd, mb,
324 (ret >> 8) & 0xff, ret);
325 else
326 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
327 usermode_helper, cmd, mb,
328 (ret >> 8) & 0xff, ret);
3b98c0c2
LE
329 sib.sib_reason = SIB_HELPER_POST;
330 sib.helper_exit_code = ret;
331 drbd_bcast_event(mdev, &sib);
b411b363
PR
332
333 if (ret < 0) /* Ignore any ERRNOs we got. */
334 ret = 0;
335
336 return ret;
337}
338
6b75dced
PR
339static void conn_md_sync(struct drbd_tconn *tconn)
340{
341 struct drbd_conf *mdev;
e90285e0 342 int vnr;
6b75dced 343
c141ebda
PR
344 rcu_read_lock();
345 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
346 kref_get(&mdev->kref);
347 rcu_read_unlock();
6b75dced 348 drbd_md_sync(mdev);
c141ebda
PR
349 kref_put(&mdev->kref, &drbd_minor_destroy);
350 rcu_read_lock();
351 }
352 rcu_read_unlock();
6b75dced
PR
353}
354
355int conn_khelper(struct drbd_tconn *tconn, char *cmd)
356{
357 char *envp[] = { "HOME=/",
358 "TERM=linux",
359 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
360 (char[20]) { }, /* address family */
361 (char[60]) { }, /* address */
362 NULL };
363 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
364 int ret;
365
366 setup_khelper_env(tconn, envp);
367 conn_md_sync(tconn);
368
369 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
370 /* TODO: conn_bcast_event() ?? */
371
372 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
373 if (ret)
374 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
375 usermode_helper, cmd, tconn->name,
376 (ret >> 8) & 0xff, ret);
377 else
378 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
379 usermode_helper, cmd, tconn->name,
380 (ret >> 8) & 0xff, ret);
381 /* TODO: conn_bcast_event() ?? */
382
383 if (ret < 0) /* Ignore any ERRNOs we got. */
384 ret = 0;
385
386 return ret;
387}
388
cb703454 389static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
b411b363 390{
cb703454
PR
391 enum drbd_fencing_p fp = FP_NOT_AVAIL;
392 struct drbd_conf *mdev;
393 int vnr;
394
695d08fa 395 rcu_read_lock();
cb703454
PR
396 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
397 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
daeda1cc
PR
398 fp = max_t(enum drbd_fencing_p, fp,
399 rcu_dereference(mdev->ldev->disk_conf)->fencing);
cb703454
PR
400 put_ldev(mdev);
401 }
402 }
695d08fa 403 rcu_read_unlock();
cb703454
PR
404
405 return fp;
406}
407
408bool conn_try_outdate_peer(struct drbd_tconn *tconn)
409{
410 union drbd_state mask = { };
411 union drbd_state val = { };
412 enum drbd_fencing_p fp;
b411b363
PR
413 char *ex_to_string;
414 int r;
b411b363 415
cb703454
PR
416 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
417 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
418 return false;
419 }
b411b363 420
cb703454
PR
421 fp = highest_fencing_policy(tconn);
422 switch (fp) {
423 case FP_NOT_AVAIL:
424 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
fb22c402 425 goto out;
cb703454
PR
426 case FP_DONT_CARE:
427 return true;
428 default: ;
b411b363
PR
429 }
430
cb703454 431 r = conn_khelper(tconn, "fence-peer");
b411b363
PR
432
433 switch ((r>>8) & 0xff) {
434 case 3: /* peer is inconsistent */
435 ex_to_string = "peer is inconsistent or worse";
cb703454
PR
436 mask.pdsk = D_MASK;
437 val.pdsk = D_INCONSISTENT;
b411b363
PR
438 break;
439 case 4: /* peer got outdated, or was already outdated */
440 ex_to_string = "peer was fenced";
cb703454
PR
441 mask.pdsk = D_MASK;
442 val.pdsk = D_OUTDATED;
b411b363
PR
443 break;
444 case 5: /* peer was down */
cb703454 445 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
b411b363
PR
446 /* we will(have) create(d) a new UUID anyways... */
447 ex_to_string = "peer is unreachable, assumed to be dead";
cb703454
PR
448 mask.pdsk = D_MASK;
449 val.pdsk = D_OUTDATED;
b411b363
PR
450 } else {
451 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
b411b363
PR
452 }
453 break;
454 case 6: /* Peer is primary, voluntarily outdate myself.
455 * This is useful when an unconnected R_SECONDARY is asked to
456 * become R_PRIMARY, but finds the other peer being active. */
457 ex_to_string = "peer is active";
cb703454
PR
458 conn_warn(tconn, "Peer is primary, outdating myself.\n");
459 mask.disk = D_MASK;
460 val.disk = D_OUTDATED;
b411b363
PR
461 break;
462 case 7:
463 if (fp != FP_STONITH)
cb703454 464 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
b411b363 465 ex_to_string = "peer was stonithed";
cb703454
PR
466 mask.pdsk = D_MASK;
467 val.pdsk = D_OUTDATED;
b411b363
PR
468 break;
469 default:
470 /* The script is broken ... */
cb703454
PR
471 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
472 return false; /* Eventually leave IO frozen */
b411b363
PR
473 }
474
cb703454
PR
475 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
476 (r>>8) & 0xff, ex_to_string);
fb22c402 477
cb703454 478 out:
fb22c402 479
cb703454
PR
480 /* Not using
481 conn_request_state(tconn, mask, val, CS_VERBOSE);
482 here, because we might were able to re-establish the connection in the
483 meantime. */
484 spin_lock_irq(&tconn->req_lock);
485 if (tconn->cstate < C_WF_REPORT_PARAMS)
486 _conn_request_state(tconn, mask, val, CS_VERBOSE);
487 spin_unlock_irq(&tconn->req_lock);
488
489 return conn_highest_pdsk(tconn) <= D_OUTDATED;
b411b363
PR
490}
491
87f7be4c
PR
492static int _try_outdate_peer_async(void *data)
493{
cb703454 494 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
87f7be4c 495
cb703454 496 conn_try_outdate_peer(tconn);
87f7be4c 497
9dc9fbb3 498 kref_put(&tconn->kref, &conn_destroy);
87f7be4c
PR
499 return 0;
500}
501
cb703454 502void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
87f7be4c
PR
503{
504 struct task_struct *opa;
505
9dc9fbb3 506 kref_get(&tconn->kref);
cb703454 507 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
9dc9fbb3 508 if (IS_ERR(opa)) {
cb703454 509 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
9dc9fbb3
PR
510 kref_put(&tconn->kref, &conn_destroy);
511 }
87f7be4c 512}
b411b363 513
bf885f8a
AG
514enum drbd_state_rv
515drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
b411b363
PR
516{
517 const int max_tries = 4;
bf885f8a 518 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
44ed167d 519 struct net_conf *nc;
b411b363
PR
520 int try = 0;
521 int forced = 0;
522 union drbd_state mask, val;
b411b363
PR
523
524 if (new_role == R_PRIMARY)
0625ac19 525 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
b411b363 526
8410da8f 527 mutex_lock(mdev->state_mutex);
b411b363
PR
528
529 mask.i = 0; mask.role = R_MASK;
530 val.i = 0; val.role = new_role;
531
532 while (try++ < max_tries) {
bf885f8a 533 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
b411b363
PR
534
535 /* in case we first succeeded to outdate,
536 * but now suddenly could establish a connection */
bf885f8a 537 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
b411b363
PR
538 val.pdsk = 0;
539 mask.pdsk = 0;
540 continue;
541 }
542
bf885f8a 543 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
d10a33c6
PR
544 (mdev->state.disk < D_UP_TO_DATE &&
545 mdev->state.disk >= D_INCONSISTENT)) {
b411b363
PR
546 mask.disk = D_MASK;
547 val.disk = D_UP_TO_DATE;
548 forced = 1;
549 continue;
550 }
551
bf885f8a 552 if (rv == SS_NO_UP_TO_DATE_DISK &&
b411b363
PR
553 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
554 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
b411b363 555
cb703454 556 if (conn_try_outdate_peer(mdev->tconn)) {
b411b363
PR
557 val.disk = D_UP_TO_DATE;
558 mask.disk = D_MASK;
559 }
b411b363
PR
560 continue;
561 }
562
bf885f8a 563 if (rv == SS_NOTHING_TO_DO)
3b98c0c2 564 goto out;
bf885f8a 565 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
cb703454 566 if (!conn_try_outdate_peer(mdev->tconn) && force) {
b411b363 567 dev_warn(DEV, "Forced into split brain situation!\n");
cb703454
PR
568 mask.pdsk = D_MASK;
569 val.pdsk = D_OUTDATED;
b411b363 570
cb703454 571 }
b411b363
PR
572 continue;
573 }
bf885f8a 574 if (rv == SS_TWO_PRIMARIES) {
b411b363
PR
575 /* Maybe the peer is detected as dead very soon...
576 retry at most once more in this case. */
44ed167d
PR
577 int timeo;
578 rcu_read_lock();
579 nc = rcu_dereference(mdev->tconn->net_conf);
580 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
581 rcu_read_unlock();
582 schedule_timeout_interruptible(timeo);
b411b363
PR
583 if (try < max_tries)
584 try = max_tries - 1;
585 continue;
586 }
bf885f8a
AG
587 if (rv < SS_SUCCESS) {
588 rv = _drbd_request_state(mdev, mask, val,
b411b363 589 CS_VERBOSE + CS_WAIT_COMPLETE);
bf885f8a 590 if (rv < SS_SUCCESS)
3b98c0c2 591 goto out;
b411b363
PR
592 }
593 break;
594 }
595
bf885f8a 596 if (rv < SS_SUCCESS)
3b98c0c2 597 goto out;
b411b363
PR
598
599 if (forced)
600 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
601
602 /* Wait until nothing is on the fly :) */
603 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
604
605 if (new_role == R_SECONDARY) {
81e84650 606 set_disk_ro(mdev->vdisk, true);
b411b363
PR
607 if (get_ldev(mdev)) {
608 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
609 put_ldev(mdev);
610 }
611 } else {
a0095508 612 mutex_lock(&mdev->tconn->conf_update);
91fd4dad 613 nc = mdev->tconn->net_conf;
44ed167d 614 if (nc)
6139f60d 615 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
a0095508 616 mutex_unlock(&mdev->tconn->conf_update);
91fd4dad 617
81e84650 618 set_disk_ro(mdev->vdisk, false);
b411b363
PR
619 if (get_ldev(mdev)) {
620 if (((mdev->state.conn < C_CONNECTED ||
621 mdev->state.pdsk <= D_FAILED)
622 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
623 drbd_uuid_new_current(mdev);
624
625 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
626 put_ldev(mdev);
627 }
628 }
629
19f843aa
LE
630 /* writeout of activity log covered areas of the bitmap
631 * to stable storage done in after state change already */
b411b363
PR
632
633 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
634 /* if this was forced, we should consider sync */
635 if (forced)
636 drbd_send_uuids(mdev);
637 drbd_send_state(mdev);
638 }
639
640 drbd_md_sync(mdev);
641
642 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
3b98c0c2 643out:
8410da8f 644 mutex_unlock(mdev->state_mutex);
bf885f8a 645 return rv;
b411b363
PR
646}
647
3b98c0c2 648static const char *from_attrs_err_to_txt(int err)
b411b363 649{
3b98c0c2
LE
650 return err == -ENOMSG ? "required attribute missing" :
651 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
f399002e 652 err == -EEXIST ? "can not change invariant setting" :
3b98c0c2 653 "invalid attribute value";
b411b363
PR
654}
655
3b98c0c2 656int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
b411b363 657{
3b98c0c2
LE
658 struct set_role_parms parms;
659 int err;
660 enum drbd_ret_code retcode;
b411b363 661
3b98c0c2
LE
662 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
663 if (!adm_ctx.reply_skb)
664 return retcode;
665 if (retcode != NO_ERROR)
666 goto out;
667
668 memset(&parms, 0, sizeof(parms));
669 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
f399002e 670 err = set_role_parms_from_attrs(&parms, info);
3b98c0c2
LE
671 if (err) {
672 retcode = ERR_MANDATORY_TAG;
673 drbd_msg_put_info(from_attrs_err_to_txt(err));
674 goto out;
675 }
676 }
677
678 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
679 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
680 else
681 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
682out:
683 drbd_adm_finish(info, retcode);
b411b363
PR
684 return 0;
685}
686
687/* initializes the md.*_offset members, so we are able to find
688 * the on disk meta data */
689static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
690 struct drbd_backing_dev *bdev)
691{
692 sector_t md_size_sect = 0;
daeda1cc
PR
693 int meta_dev_idx;
694
695 rcu_read_lock();
696 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
697
698 switch (meta_dev_idx) {
b411b363
PR
699 default:
700 /* v07 style fixed size indexed meta data */
701 bdev->md.md_size_sect = MD_RESERVED_SECT;
702 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
703 bdev->md.al_offset = MD_AL_OFFSET;
704 bdev->md.bm_offset = MD_BM_OFFSET;
705 break;
706 case DRBD_MD_INDEX_FLEX_EXT:
707 /* just occupy the full device; unit: sectors */
708 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
709 bdev->md.md_offset = 0;
710 bdev->md.al_offset = MD_AL_OFFSET;
711 bdev->md.bm_offset = MD_BM_OFFSET;
712 break;
713 case DRBD_MD_INDEX_INTERNAL:
714 case DRBD_MD_INDEX_FLEX_INT:
715 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
716 /* al size is still fixed */
7ad651b5 717 bdev->md.al_offset = -MD_AL_SECTORS;
b411b363
PR
718 /* we need (slightly less than) ~ this much bitmap sectors: */
719 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
720 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
721 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
722 md_size_sect = ALIGN(md_size_sect, 8);
723
724 /* plus the "drbd meta data super block",
725 * and the activity log; */
726 md_size_sect += MD_BM_OFFSET;
727
728 bdev->md.md_size_sect = md_size_sect;
729 /* bitmap offset is adjusted by 'super' block size */
730 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
731 break;
732 }
daeda1cc 733 rcu_read_unlock();
b411b363
PR
734}
735
4b0715f0 736/* input size is expected to be in KB */
b411b363
PR
737char *ppsize(char *buf, unsigned long long size)
738{
4b0715f0
LE
739 /* Needs 9 bytes at max including trailing NUL:
740 * -1ULL ==> "16384 EB" */
b411b363
PR
741 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
742 int base = 0;
4b0715f0 743 while (size >= 10000 && base < sizeof(units)-1) {
b411b363
PR
744 /* shift + round */
745 size = (size >> 10) + !!(size & (1<<9));
746 base++;
747 }
4b0715f0 748 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
b411b363
PR
749
750 return buf;
751}
752
753/* there is still a theoretical deadlock when called from receiver
754 * on an D_INCONSISTENT R_PRIMARY:
755 * remote READ does inc_ap_bio, receiver would need to receive answer
756 * packet from remote to dec_ap_bio again.
757 * receiver receive_sizes(), comes here,
758 * waits for ap_bio_cnt == 0. -> deadlock.
759 * but this cannot happen, actually, because:
760 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
761 * (not connected, or bad/no disk on peer):
762 * see drbd_fail_request_early, ap_bio_cnt is zero.
763 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
764 * peer may not initiate a resize.
765 */
3b98c0c2
LE
766/* Note these are not to be confused with
767 * drbd_adm_suspend_io/drbd_adm_resume_io,
768 * which are (sub) state changes triggered by admin (drbdsetup),
769 * and can be long lived.
770 * This changes an mdev->flag, is triggered by drbd internals,
771 * and should be short-lived. */
b411b363
PR
772void drbd_suspend_io(struct drbd_conf *mdev)
773{
774 set_bit(SUSPEND_IO, &mdev->flags);
2aebfabb 775 if (drbd_suspended(mdev))
265be2d0 776 return;
b411b363
PR
777 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
778}
779
780void drbd_resume_io(struct drbd_conf *mdev)
781{
782 clear_bit(SUSPEND_IO, &mdev->flags);
783 wake_up(&mdev->misc_wait);
784}
785
786/**
787 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
788 * @mdev: DRBD device.
789 *
790 * Returns 0 on success, negative return values indicate errors.
791 * You should call drbd_md_sync() after calling this function.
792 */
24c4830c 793enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
b411b363
PR
794{
795 sector_t prev_first_sect, prev_size; /* previous meta location */
ef5e44a6 796 sector_t la_size, u_size;
b411b363
PR
797 sector_t size;
798 char ppb[10];
799
800 int md_moved, la_size_changed;
801 enum determine_dev_size rv = unchanged;
802
803 /* race:
804 * application request passes inc_ap_bio,
805 * but then cannot get an AL-reference.
806 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
807 *
808 * to avoid that:
809 * Suspend IO right here.
810 * still lock the act_log to not trigger ASSERTs there.
811 */
812 drbd_suspend_io(mdev);
813
814 /* no wait necessary anymore, actually we could assert that */
815 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
816
817 prev_first_sect = drbd_md_first_sector(mdev->ldev);
818 prev_size = mdev->ldev->md.md_size_sect;
819 la_size = mdev->ldev->md.la_size_sect;
820
821 /* TODO: should only be some assert here, not (re)init... */
822 drbd_md_set_sector_offsets(mdev, mdev->ldev);
823
daeda1cc
PR
824 rcu_read_lock();
825 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
826 rcu_read_unlock();
ef5e44a6 827 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
b411b363
PR
828
829 if (drbd_get_capacity(mdev->this_bdev) != size ||
830 drbd_bm_capacity(mdev) != size) {
831 int err;
02d9a94b 832 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
b411b363
PR
833 if (unlikely(err)) {
834 /* currently there is only one error: ENOMEM! */
835 size = drbd_bm_capacity(mdev)>>1;
836 if (size == 0) {
837 dev_err(DEV, "OUT OF MEMORY! "
838 "Could not allocate bitmap!\n");
839 } else {
840 dev_err(DEV, "BM resizing failed. "
841 "Leaving size unchanged at size = %lu KB\n",
842 (unsigned long)size);
843 }
844 rv = dev_size_error;
845 }
846 /* racy, see comments above. */
847 drbd_set_my_capacity(mdev, size);
848 mdev->ldev->md.la_size_sect = size;
849 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
850 (unsigned long long)size>>1);
851 }
852 if (rv == dev_size_error)
853 goto out;
854
855 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
856
857 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
858 || prev_size != mdev->ldev->md.md_size_sect;
859
860 if (la_size_changed || md_moved) {
24dccabb
AG
861 int err;
862
b411b363
PR
863 drbd_al_shrink(mdev); /* All extents inactive. */
864 dev_info(DEV, "Writing the whole bitmap, %s\n",
865 la_size_changed && md_moved ? "size changed and md moved" :
866 la_size_changed ? "size changed" : "md moved");
20ceb2b2
LE
867 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
868 err = drbd_bitmap_io(mdev, &drbd_bm_write,
869 "size changed", BM_LOCKED_MASK);
24dccabb
AG
870 if (err) {
871 rv = dev_size_error;
872 goto out;
873 }
b411b363
PR
874 drbd_md_mark_dirty(mdev);
875 }
876
877 if (size > la_size)
878 rv = grew;
879 if (size < la_size)
880 rv = shrunk;
881out:
882 lc_unlock(mdev->act_log);
883 wake_up(&mdev->al_wait);
884 drbd_resume_io(mdev);
885
886 return rv;
887}
888
889sector_t
ef5e44a6
PR
890drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
891 sector_t u_size, int assume_peer_has_space)
b411b363
PR
892{
893 sector_t p_size = mdev->p_size; /* partner's disk size. */
894 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
895 sector_t m_size; /* my size */
b411b363
PR
896 sector_t size = 0;
897
898 m_size = drbd_get_max_capacity(bdev);
899
a393db6f
PR
900 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
901 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
902 p_size = m_size;
903 }
904
b411b363
PR
905 if (p_size && m_size) {
906 size = min_t(sector_t, p_size, m_size);
907 } else {
908 if (la_size) {
909 size = la_size;
910 if (m_size && m_size < size)
911 size = m_size;
912 if (p_size && p_size < size)
913 size = p_size;
914 } else {
915 if (m_size)
916 size = m_size;
917 if (p_size)
918 size = p_size;
919 }
920 }
921
922 if (size == 0)
923 dev_err(DEV, "Both nodes diskless!\n");
924
925 if (u_size) {
926 if (u_size > size)
927 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
928 (unsigned long)u_size>>1, (unsigned long)size>>1);
929 else
930 size = u_size;
931 }
932
933 return size;
934}
935
936/**
937 * drbd_check_al_size() - Ensures that the AL is of the right size
938 * @mdev: DRBD device.
939 *
940 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
941 * failed, and 0 on success. You should call drbd_md_sync() after you called
942 * this function.
943 */
f399002e 944static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
b411b363
PR
945{
946 struct lru_cache *n, *t;
947 struct lc_element *e;
948 unsigned int in_use;
949 int i;
950
b411b363 951 if (mdev->act_log &&
f399002e 952 mdev->act_log->nr_elements == dc->al_extents)
b411b363
PR
953 return 0;
954
955 in_use = 0;
956 t = mdev->act_log;
7ad651b5 957 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
f399002e 958 dc->al_extents, sizeof(struct lc_element), 0);
b411b363
PR
959
960 if (n == NULL) {
961 dev_err(DEV, "Cannot allocate act_log lru!\n");
962 return -ENOMEM;
963 }
964 spin_lock_irq(&mdev->al_lock);
965 if (t) {
966 for (i = 0; i < t->nr_elements; i++) {
967 e = lc_element_by_index(t, i);
968 if (e->refcnt)
969 dev_err(DEV, "refcnt(%d)==%d\n",
970 e->lc_number, e->refcnt);
971 in_use += e->refcnt;
972 }
973 }
974 if (!in_use)
975 mdev->act_log = n;
976 spin_unlock_irq(&mdev->al_lock);
977 if (in_use) {
978 dev_err(DEV, "Activity log still in use!\n");
979 lc_destroy(n);
980 return -EBUSY;
981 } else {
982 if (t)
983 lc_destroy(t);
984 }
985 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
986 return 0;
987}
988
99432fcc 989static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
b411b363
PR
990{
991 struct request_queue * const q = mdev->rq_queue;
99432fcc
PR
992 int max_hw_sectors = max_bio_size >> 9;
993 int max_segments = 0;
994
995 if (get_ldev_if_state(mdev, D_ATTACHING)) {
996 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
997
998 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
daeda1cc
PR
999 rcu_read_lock();
1000 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1001 rcu_read_unlock();
99432fcc
PR
1002 put_ldev(mdev);
1003 }
b411b363 1004
b411b363 1005 blk_queue_logical_block_size(q, 512);
1816a2b4
LE
1006 blk_queue_max_hw_sectors(q, max_hw_sectors);
1007 /* This is the workaround for "bio would need to, but cannot, be split" */
1008 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1009 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
b411b363 1010
99432fcc
PR
1011 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1012 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1013
1014 blk_queue_stack_limits(q, b);
1015
1016 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1017 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1018 q->backing_dev_info.ra_pages,
1019 b->backing_dev_info.ra_pages);
1020 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1021 }
1022 put_ldev(mdev);
1023 }
1024}
1025
1026void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1027{
1028 int now, new, local, peer;
1029
1030 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1031 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1032 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
b411b363 1033
99432fcc
PR
1034 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1035 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1036 mdev->local_max_bio_size = local;
1037 put_ldev(mdev);
b411b363 1038 }
99432fcc
PR
1039
1040 /* We may ignore peer limits if the peer is modern enough.
1041 Because new from 8.3.8 onwards the peer can use multiple
1042 BIOs for a single peer_request */
1043 if (mdev->state.conn >= C_CONNECTED) {
31890f4a 1044 if (mdev->tconn->agreed_pro_version < 94)
99432fcc 1045 peer = mdev->peer_max_bio_size;
31890f4a 1046 else if (mdev->tconn->agreed_pro_version == 94)
99432fcc
PR
1047 peer = DRBD_MAX_SIZE_H80_PACKET;
1048 else /* drbd 8.3.8 onwards */
1049 peer = DRBD_MAX_BIO_SIZE;
1050 }
1051
1052 new = min_t(int, local, peer);
1053
1054 if (mdev->state.role == R_PRIMARY && new < now)
1055 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1056
1057 if (new != now)
1058 dev_info(DEV, "max BIO size = %u\n", new);
1059
1060 drbd_setup_queue_param(mdev, new);
b411b363
PR
1061}
1062
a18e9d1e 1063/* Starts the worker thread */
0e29d163 1064static void conn_reconfig_start(struct drbd_tconn *tconn)
b411b363 1065{
0e29d163
PR
1066 drbd_thread_start(&tconn->worker);
1067 conn_flush_workqueue(tconn);
b411b363
PR
1068}
1069
a18e9d1e 1070/* if still unconfigured, stops worker again. */
0e29d163 1071static void conn_reconfig_done(struct drbd_tconn *tconn)
b411b363 1072{
992d6e91 1073 bool stop_threads;
0e29d163 1074 spin_lock_irq(&tconn->req_lock);
992d6e91 1075 stop_threads = conn_all_vols_unconf(tconn);
0e29d163 1076 spin_unlock_irq(&tconn->req_lock);
992d6e91
LE
1077 if (stop_threads) {
1078 /* asender is implicitly stopped by receiver
81fa2e67 1079 * in conn_disconnect() */
992d6e91
LE
1080 drbd_thread_stop(&tconn->receiver);
1081 drbd_thread_stop(&tconn->worker);
1082 }
b411b363
PR
1083}
1084
0778286a
PR
1085/* Make sure IO is suspended before calling this function(). */
1086static void drbd_suspend_al(struct drbd_conf *mdev)
1087{
1088 int s = 0;
1089
61610420 1090 if (!lc_try_lock(mdev->act_log)) {
0778286a
PR
1091 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1092 return;
1093 }
1094
61610420 1095 drbd_al_shrink(mdev);
87eeee41 1096 spin_lock_irq(&mdev->tconn->req_lock);
0778286a
PR
1097 if (mdev->state.conn < C_CONNECTED)
1098 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
87eeee41 1099 spin_unlock_irq(&mdev->tconn->req_lock);
61610420 1100 lc_unlock(mdev->act_log);
0778286a
PR
1101
1102 if (s)
1103 dev_info(DEV, "Suspended AL updates\n");
1104}
1105
5979e361
LE
1106
1107static bool should_set_defaults(struct genl_info *info)
1108{
1109 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1110 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1111}
1112
d589a21e
PR
1113static void enforce_disk_conf_limits(struct disk_conf *dc)
1114{
1115 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1116 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1117 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1118 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1119
1120 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1121 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1122}
1123
f399002e
LE
1124int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1125{
1126 enum drbd_ret_code retcode;
1127 struct drbd_conf *mdev;
daeda1cc 1128 struct disk_conf *new_disk_conf, *old_disk_conf;
813472ce 1129 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
f399002e 1130 int err, fifo_size;
f399002e
LE
1131
1132 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1133 if (!adm_ctx.reply_skb)
1134 return retcode;
1135 if (retcode != NO_ERROR)
1136 goto out;
1137
1138 mdev = adm_ctx.mdev;
1139
1140 /* we also need a disk
1141 * to change the options on */
1142 if (!get_ldev(mdev)) {
1143 retcode = ERR_NO_DISK;
1144 goto out;
1145 }
1146
daeda1cc 1147 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
5ecc72c3 1148 if (!new_disk_conf) {
f399002e
LE
1149 retcode = ERR_NOMEM;
1150 goto fail;
1151 }
1152
daeda1cc
PR
1153 mutex_lock(&mdev->tconn->conf_update);
1154 old_disk_conf = mdev->ldev->disk_conf;
1155 *new_disk_conf = *old_disk_conf;
5979e361 1156 if (should_set_defaults(info))
b966b5dd 1157 set_disk_conf_defaults(new_disk_conf);
5979e361 1158
5ecc72c3 1159 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
c75b9b10 1160 if (err && err != -ENOMSG) {
f399002e
LE
1161 retcode = ERR_MANDATORY_TAG;
1162 drbd_msg_put_info(from_attrs_err_to_txt(err));
1163 }
1164
5ecc72c3
LE
1165 if (!expect(new_disk_conf->resync_rate >= 1))
1166 new_disk_conf->resync_rate = 1;
f399002e 1167
d589a21e 1168 enforce_disk_conf_limits(new_disk_conf);
f399002e 1169
5ecc72c3 1170 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 1171 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
1172 new_plan = fifo_alloc(fifo_size);
1173 if (!new_plan) {
f399002e
LE
1174 dev_err(DEV, "kmalloc of fifo_buffer failed");
1175 retcode = ERR_NOMEM;
daeda1cc 1176 goto fail_unlock;
f399002e
LE
1177 }
1178 }
1179
f399002e
LE
1180 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1181 drbd_al_shrink(mdev);
5ecc72c3 1182 err = drbd_check_al_size(mdev, new_disk_conf);
f399002e
LE
1183 lc_unlock(mdev->act_log);
1184 wake_up(&mdev->al_wait);
1185
1186 if (err) {
1187 retcode = ERR_NOMEM;
daeda1cc 1188 goto fail_unlock;
f399002e
LE
1189 }
1190
dc97b708 1191 write_lock_irq(&global_state_lock);
95f8efd0 1192 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
dc97b708 1193 if (retcode == NO_ERROR) {
daeda1cc 1194 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
95f8efd0 1195 drbd_resync_after_changed(mdev);
dc97b708
PR
1196 }
1197 write_unlock_irq(&global_state_lock);
f399002e 1198
daeda1cc
PR
1199 if (retcode != NO_ERROR)
1200 goto fail_unlock;
f399002e 1201
813472ce
PR
1202 if (new_plan) {
1203 old_plan = mdev->rs_plan_s;
1204 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
9958c857 1205 }
9958c857 1206
c141ebda 1207 mutex_unlock(&mdev->tconn->conf_update);
daeda1cc 1208 drbd_md_sync(mdev);
f399002e
LE
1209
1210 if (mdev->state.conn >= C_CONNECTED)
1211 drbd_send_sync_param(mdev);
1212
daeda1cc
PR
1213 synchronize_rcu();
1214 kfree(old_disk_conf);
813472ce 1215 kfree(old_plan);
daeda1cc
PR
1216 goto success;
1217
1218fail_unlock:
1219 mutex_unlock(&mdev->tconn->conf_update);
f399002e 1220 fail:
5ecc72c3 1221 kfree(new_disk_conf);
813472ce 1222 kfree(new_plan);
daeda1cc
PR
1223success:
1224 put_ldev(mdev);
f399002e
LE
1225 out:
1226 drbd_adm_finish(info, retcode);
1227 return 0;
1228}
1229
3b98c0c2 1230int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
b411b363 1231{
3b98c0c2
LE
1232 struct drbd_conf *mdev;
1233 int err;
116676ca 1234 enum drbd_ret_code retcode;
b411b363
PR
1235 enum determine_dev_size dd;
1236 sector_t max_possible_sectors;
1237 sector_t min_md_device_sectors;
1238 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
daeda1cc 1239 struct disk_conf *new_disk_conf = NULL;
e525fd89 1240 struct block_device *bdev;
b411b363 1241 struct lru_cache *resync_lru = NULL;
9958c857 1242 struct fifo_buffer *new_plan = NULL;
b411b363 1243 union drbd_state ns, os;
f2024e7c 1244 enum drbd_state_rv rv;
44ed167d 1245 struct net_conf *nc;
b411b363 1246 int cp_discovered = 0;
b411b363 1247
3b98c0c2
LE
1248 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1249 if (!adm_ctx.reply_skb)
1250 return retcode;
1251 if (retcode != NO_ERROR)
40cbf085 1252 goto finish;
3b98c0c2
LE
1253
1254 mdev = adm_ctx.mdev;
0e29d163 1255 conn_reconfig_start(mdev->tconn);
b411b363
PR
1256
1257 /* if you want to reconfigure, please tear down first */
1258 if (mdev->state.disk > D_DISKLESS) {
1259 retcode = ERR_DISK_CONFIGURED;
1260 goto fail;
1261 }
82f59cc6
LE
1262 /* It may just now have detached because of IO error. Make sure
1263 * drbd_ldev_destroy is done already, we may end up here very fast,
1264 * e.g. if someone calls attach from the on-io-error handler,
1265 * to realize a "hot spare" feature (not that I'd recommend that) */
1266 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
b411b363 1267
3b98c0c2 1268 /* allocation not in the IO path, drbdsetup context */
b411b363
PR
1269 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1270 if (!nbc) {
1271 retcode = ERR_NOMEM;
1272 goto fail;
1273 }
daeda1cc
PR
1274 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1275 if (!new_disk_conf) {
1276 retcode = ERR_NOMEM;
1277 goto fail;
1278 }
1279 nbc->disk_conf = new_disk_conf;
b411b363 1280
daeda1cc
PR
1281 set_disk_conf_defaults(new_disk_conf);
1282 err = disk_conf_from_attrs(new_disk_conf, info);
3b98c0c2 1283 if (err) {
b411b363 1284 retcode = ERR_MANDATORY_TAG;
3b98c0c2 1285 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
1286 goto fail;
1287 }
1288
d589a21e
PR
1289 enforce_disk_conf_limits(new_disk_conf);
1290
9958c857
PR
1291 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1292 if (!new_plan) {
1293 retcode = ERR_NOMEM;
1294 goto fail;
1295 }
1296
daeda1cc 1297 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
b411b363
PR
1298 retcode = ERR_MD_IDX_INVALID;
1299 goto fail;
1300 }
1301
44ed167d
PR
1302 rcu_read_lock();
1303 nc = rcu_dereference(mdev->tconn->net_conf);
1304 if (nc) {
daeda1cc 1305 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
44ed167d 1306 rcu_read_unlock();
47ff2d0a
PR
1307 retcode = ERR_STONITH_AND_PROT_A;
1308 goto fail;
1309 }
1310 }
44ed167d 1311 rcu_read_unlock();
47ff2d0a 1312
daeda1cc 1313 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
d4d77629 1314 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
e525fd89 1315 if (IS_ERR(bdev)) {
daeda1cc 1316 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
e525fd89 1317 PTR_ERR(bdev));
b411b363
PR
1318 retcode = ERR_OPEN_DISK;
1319 goto fail;
1320 }
e525fd89
TH
1321 nbc->backing_bdev = bdev;
1322
1323 /*
1324 * meta_dev_idx >= 0: external fixed size, possibly multiple
1325 * drbd sharing one meta device. TODO in that case, paranoia
1326 * check that [md_bdev, meta_dev_idx] is not yet used by some
1327 * other drbd minor! (if you use drbd.conf + drbdadm, that
1328 * should check it for you already; but if you don't, or
1329 * someone fooled it, we need to double check here)
1330 */
daeda1cc 1331 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
d4d77629 1332 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
daeda1cc 1333 (new_disk_conf->meta_dev_idx < 0) ?
d4d77629 1334 (void *)mdev : (void *)drbd_m_holder);
e525fd89 1335 if (IS_ERR(bdev)) {
daeda1cc 1336 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
e525fd89 1337 PTR_ERR(bdev));
b411b363
PR
1338 retcode = ERR_OPEN_MD_DISK;
1339 goto fail;
1340 }
e525fd89 1341 nbc->md_bdev = bdev;
b411b363 1342
e525fd89 1343 if ((nbc->backing_bdev == nbc->md_bdev) !=
daeda1cc
PR
1344 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1345 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
e525fd89 1346 retcode = ERR_MD_IDX_INVALID;
b411b363
PR
1347 goto fail;
1348 }
1349
1350 resync_lru = lc_create("resync", drbd_bm_ext_cache,
46a15bc3 1351 1, 61, sizeof(struct bm_extent),
b411b363
PR
1352 offsetof(struct bm_extent, lce));
1353 if (!resync_lru) {
1354 retcode = ERR_NOMEM;
e525fd89 1355 goto fail;
b411b363
PR
1356 }
1357
1358 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1359 drbd_md_set_sector_offsets(mdev, nbc);
1360
daeda1cc 1361 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
b411b363
PR
1362 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1363 (unsigned long long) drbd_get_max_capacity(nbc),
daeda1cc 1364 (unsigned long long) new_disk_conf->disk_size);
67b58bf7 1365 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1366 goto fail;
b411b363
PR
1367 }
1368
daeda1cc 1369 if (new_disk_conf->meta_dev_idx < 0) {
b411b363
PR
1370 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1371 /* at least one MB, otherwise it does not make sense */
1372 min_md_device_sectors = (2<<10);
1373 } else {
1374 max_possible_sectors = DRBD_MAX_SECTORS;
daeda1cc 1375 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
b411b363
PR
1376 }
1377
b411b363 1378 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
67b58bf7 1379 retcode = ERR_MD_DISK_TOO_SMALL;
b411b363
PR
1380 dev_warn(DEV, "refusing attach: md-device too small, "
1381 "at least %llu sectors needed for this meta-disk type\n",
1382 (unsigned long long) min_md_device_sectors);
e525fd89 1383 goto fail;
b411b363
PR
1384 }
1385
1386 /* Make sure the new disk is big enough
1387 * (we may currently be R_PRIMARY with no local disk...) */
1388 if (drbd_get_max_capacity(nbc) <
1389 drbd_get_capacity(mdev->this_bdev)) {
67b58bf7 1390 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1391 goto fail;
b411b363
PR
1392 }
1393
1394 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1395
1352994b
LE
1396 if (nbc->known_size > max_possible_sectors) {
1397 dev_warn(DEV, "==> truncating very big lower level device "
1398 "to currently maximum possible %llu sectors <==\n",
1399 (unsigned long long) max_possible_sectors);
daeda1cc 1400 if (new_disk_conf->meta_dev_idx >= 0)
1352994b
LE
1401 dev_warn(DEV, "==>> using internal or flexible "
1402 "meta data may help <<==\n");
1403 }
1404
b411b363
PR
1405 drbd_suspend_io(mdev);
1406 /* also wait for the last barrier ack. */
2aebfabb 1407 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
b411b363 1408 /* and for any other previously queued work */
a21e9298 1409 drbd_flush_workqueue(mdev);
b411b363 1410
f2024e7c
AG
1411 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1412 retcode = rv; /* FIXME: Type mismatch. */
b411b363 1413 drbd_resume_io(mdev);
f2024e7c 1414 if (rv < SS_SUCCESS)
e525fd89 1415 goto fail;
b411b363
PR
1416
1417 if (!get_ldev_if_state(mdev, D_ATTACHING))
1418 goto force_diskless;
1419
1420 drbd_md_set_sector_offsets(mdev, nbc);
1421
1422 if (!mdev->bitmap) {
1423 if (drbd_bm_init(mdev)) {
1424 retcode = ERR_NOMEM;
1425 goto force_diskless_dec;
1426 }
1427 }
1428
1429 retcode = drbd_md_read(mdev, nbc);
1430 if (retcode != NO_ERROR)
1431 goto force_diskless_dec;
1432
1433 if (mdev->state.conn < C_CONNECTED &&
1434 mdev->state.role == R_PRIMARY &&
1435 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1436 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1437 (unsigned long long)mdev->ed_uuid);
1438 retcode = ERR_DATA_NOT_CURRENT;
1439 goto force_diskless_dec;
1440 }
1441
1442 /* Since we are diskless, fix the activity log first... */
daeda1cc 1443 if (drbd_check_al_size(mdev, new_disk_conf)) {
b411b363
PR
1444 retcode = ERR_NOMEM;
1445 goto force_diskless_dec;
1446 }
1447
1448 /* Prevent shrinking of consistent devices ! */
1449 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
daeda1cc 1450 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
b411b363 1451 dev_warn(DEV, "refusing to truncate a consistent device\n");
67b58bf7 1452 retcode = ERR_DISK_TOO_SMALL;
b411b363
PR
1453 goto force_diskless_dec;
1454 }
1455
1456 if (!drbd_al_read_log(mdev, nbc)) {
1457 retcode = ERR_IO_MD_DISK;
1458 goto force_diskless_dec;
1459 }
1460
b411b363
PR
1461 /* Reset the "barriers don't work" bits here, then force meta data to
1462 * be written, to ensure we determine if barriers are supported. */
e544046a 1463 if (new_disk_conf->md_flushes)
a8a4e51e 1464 clear_bit(MD_NO_FUA, &mdev->flags);
e544046a
AG
1465 else
1466 set_bit(MD_NO_FUA, &mdev->flags);
b411b363
PR
1467
1468 /* Point of no return reached.
1469 * Devices and memory are no longer released by error cleanup below.
1470 * now mdev takes over responsibility, and the state engine should
1471 * clean it up somewhere. */
1472 D_ASSERT(mdev->ldev == NULL);
1473 mdev->ldev = nbc;
1474 mdev->resync = resync_lru;
9958c857 1475 mdev->rs_plan_s = new_plan;
b411b363
PR
1476 nbc = NULL;
1477 resync_lru = NULL;
daeda1cc 1478 new_disk_conf = NULL;
9958c857 1479 new_plan = NULL;
b411b363 1480
2451fc3b
PR
1481 mdev->write_ordering = WO_bdev_flush;
1482 drbd_bump_write_ordering(mdev, WO_bdev_flush);
b411b363
PR
1483
1484 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1485 set_bit(CRASHED_PRIMARY, &mdev->flags);
1486 else
1487 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1488
894c6a94 1489 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
da9fbc27 1490 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
b411b363
PR
1491 set_bit(CRASHED_PRIMARY, &mdev->flags);
1492 cp_discovered = 1;
1493 }
1494
1495 mdev->send_cnt = 0;
1496 mdev->recv_cnt = 0;
1497 mdev->read_cnt = 0;
1498 mdev->writ_cnt = 0;
1499
99432fcc 1500 drbd_reconsider_max_bio_size(mdev);
b411b363
PR
1501
1502 /* If I am currently not R_PRIMARY,
1503 * but meta data primary indicator is set,
1504 * I just now recover from a hard crash,
1505 * and have been R_PRIMARY before that crash.
1506 *
1507 * Now, if I had no connection before that crash
1508 * (have been degraded R_PRIMARY), chances are that
1509 * I won't find my peer now either.
1510 *
1511 * In that case, and _only_ in that case,
1512 * we use the degr-wfc-timeout instead of the default,
1513 * so we can automatically recover from a crash of a
1514 * degraded but active "cluster" after a certain timeout.
1515 */
1516 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1517 if (mdev->state.role != R_PRIMARY &&
1518 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1519 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1520 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1521
24c4830c 1522 dd = drbd_determine_dev_size(mdev, 0);
b411b363
PR
1523 if (dd == dev_size_error) {
1524 retcode = ERR_NOMEM_BITMAP;
1525 goto force_diskless_dec;
1526 } else if (dd == grew)
1527 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1528
1529 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1530 dev_info(DEV, "Assuming that all blocks are out of sync "
1531 "(aka FullSync)\n");
20ceb2b2
LE
1532 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1533 "set_n_write from attaching", BM_LOCKED_MASK)) {
b411b363
PR
1534 retcode = ERR_IO_MD_DISK;
1535 goto force_diskless_dec;
1536 }
1537 } else {
20ceb2b2 1538 if (drbd_bitmap_io(mdev, &drbd_bm_read,
22ab6a30 1539 "read from attaching", BM_LOCKED_MASK)) {
b411b363
PR
1540 retcode = ERR_IO_MD_DISK;
1541 goto force_diskless_dec;
1542 }
1543 }
1544
1545 if (cp_discovered) {
1546 drbd_al_apply_to_bm(mdev);
20ceb2b2
LE
1547 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1548 "crashed primary apply AL", BM_LOCKED_MASK)) {
19f843aa
LE
1549 retcode = ERR_IO_MD_DISK;
1550 goto force_diskless_dec;
1551 }
b411b363
PR
1552 }
1553
0778286a
PR
1554 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1555 drbd_suspend_al(mdev); /* IO is still suspended here... */
1556
87eeee41 1557 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b
PR
1558 os = drbd_read_state(mdev);
1559 ns = os;
b411b363
PR
1560 /* If MDF_CONSISTENT is not set go into inconsistent state,
1561 otherwise investigate MDF_WasUpToDate...
1562 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1563 otherwise into D_CONSISTENT state.
1564 */
1565 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1566 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1567 ns.disk = D_CONSISTENT;
1568 else
1569 ns.disk = D_OUTDATED;
1570 } else {
1571 ns.disk = D_INCONSISTENT;
1572 }
1573
1574 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1575 ns.pdsk = D_OUTDATED;
1576
daeda1cc
PR
1577 rcu_read_lock();
1578 if (ns.disk == D_CONSISTENT &&
1579 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
b411b363 1580 ns.disk = D_UP_TO_DATE;
daeda1cc 1581 rcu_read_unlock();
b411b363
PR
1582
1583 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1584 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1585 this point, because drbd_request_state() modifies these
1586 flags. */
1587
1588 /* In case we are C_CONNECTED postpone any decision on the new disk
1589 state after the negotiation phase. */
1590 if (mdev->state.conn == C_CONNECTED) {
1591 mdev->new_state_tmp.i = ns.i;
1592 ns.i = os.i;
1593 ns.disk = D_NEGOTIATING;
dc66c74d
PR
1594
1595 /* We expect to receive up-to-date UUIDs soon.
1596 To avoid a race in receive_state, free p_uuid while
1597 holding req_lock. I.e. atomic with the state change */
1598 kfree(mdev->p_uuid);
1599 mdev->p_uuid = NULL;
b411b363
PR
1600 }
1601
1602 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
87eeee41 1603 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
1604
1605 if (rv < SS_SUCCESS)
1606 goto force_diskless_dec;
1607
1608 if (mdev->state.role == R_PRIMARY)
1609 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1610 else
1611 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1612
1613 drbd_md_mark_dirty(mdev);
1614 drbd_md_sync(mdev);
1615
1616 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1617 put_ldev(mdev);
0e29d163 1618 conn_reconfig_done(mdev->tconn);
3b98c0c2 1619 drbd_adm_finish(info, retcode);
b411b363
PR
1620 return 0;
1621
1622 force_diskless_dec:
1623 put_ldev(mdev);
1624 force_diskless:
82f59cc6 1625 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363 1626 drbd_md_sync(mdev);
b411b363 1627 fail:
40cbf085 1628 conn_reconfig_done(mdev->tconn);
b411b363 1629 if (nbc) {
e525fd89
TH
1630 if (nbc->backing_bdev)
1631 blkdev_put(nbc->backing_bdev,
1632 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1633 if (nbc->md_bdev)
1634 blkdev_put(nbc->md_bdev,
1635 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
1636 kfree(nbc);
1637 }
daeda1cc 1638 kfree(new_disk_conf);
b411b363 1639 lc_destroy(resync_lru);
9958c857 1640 kfree(new_plan);
b411b363 1641
40cbf085 1642 finish:
3b98c0c2 1643 drbd_adm_finish(info, retcode);
b411b363
PR
1644 return 0;
1645}
1646
85f75dd7
LE
1647static int adm_detach(struct drbd_conf *mdev)
1648{
19f83c76 1649 enum drbd_state_rv retcode;
009ba89d 1650 int ret;
85f75dd7 1651 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
009ba89d
LE
1652 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1653 /* D_FAILED will transition to DISKLESS. */
1654 ret = wait_event_interruptible(mdev->misc_wait,
1655 mdev->state.disk != D_FAILED);
85f75dd7 1656 drbd_resume_io(mdev);
009ba89d
LE
1657 if ((int)retcode == (int)SS_IS_DISKLESS)
1658 retcode = SS_NOTHING_TO_DO;
1659 if (ret)
1660 retcode = ERR_INTR;
85f75dd7
LE
1661 return retcode;
1662}
1663
82f59cc6
LE
1664/* Detaching the disk is a process in multiple stages. First we need to lock
1665 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1666 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1667 * internal references as well.
1668 * Only then we have finally detached. */
3b98c0c2 1669int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
b411b363 1670{
9a0d9d03 1671 enum drbd_ret_code retcode;
3b98c0c2
LE
1672
1673 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1674 if (!adm_ctx.reply_skb)
1675 return retcode;
1676 if (retcode != NO_ERROR)
1677 goto out;
1678
85f75dd7 1679 retcode = adm_detach(adm_ctx.mdev);
3b98c0c2
LE
1680out:
1681 drbd_adm_finish(info, retcode);
b411b363
PR
1682 return 0;
1683}
1684
f399002e
LE
1685static bool conn_resync_running(struct drbd_tconn *tconn)
1686{
1687 struct drbd_conf *mdev;
695d08fa 1688 bool rv = false;
f399002e
LE
1689 int vnr;
1690
695d08fa 1691 rcu_read_lock();
f399002e
LE
1692 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1693 if (mdev->state.conn == C_SYNC_SOURCE ||
1694 mdev->state.conn == C_SYNC_TARGET ||
1695 mdev->state.conn == C_PAUSED_SYNC_S ||
695d08fa
PR
1696 mdev->state.conn == C_PAUSED_SYNC_T) {
1697 rv = true;
1698 break;
1699 }
f399002e 1700 }
695d08fa
PR
1701 rcu_read_unlock();
1702
1703 return rv;
f399002e
LE
1704}
1705
1706static bool conn_ov_running(struct drbd_tconn *tconn)
1707{
1708 struct drbd_conf *mdev;
695d08fa 1709 bool rv = false;
f399002e
LE
1710 int vnr;
1711
695d08fa 1712 rcu_read_lock();
f399002e
LE
1713 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1714 if (mdev->state.conn == C_VERIFY_S ||
695d08fa
PR
1715 mdev->state.conn == C_VERIFY_T) {
1716 rv = true;
1717 break;
1718 }
f399002e 1719 }
695d08fa
PR
1720 rcu_read_unlock();
1721
1722 return rv;
f399002e
LE
1723}
1724
cd64397c 1725static enum drbd_ret_code
44ed167d 1726_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
cd64397c
PR
1727{
1728 struct drbd_conf *mdev;
1729 int i;
1730
dcb20d1a
PR
1731 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1732 if (new_conf->wire_protocol != old_conf->wire_protocol)
1733 return ERR_NEED_APV_100;
1734
1735 if (new_conf->two_primaries != old_conf->two_primaries)
1736 return ERR_NEED_APV_100;
1737
1738 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1739 return ERR_NEED_APV_100;
1740
1741 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1742 return ERR_NEED_APV_100;
1743 }
1744
1745 if (!new_conf->two_primaries &&
1746 conn_highest_role(tconn) == R_PRIMARY &&
1747 conn_highest_peer(tconn) == R_PRIMARY)
1748 return ERR_NEED_ALLOW_TWO_PRI;
b032b6fa 1749
cd64397c
PR
1750 if (new_conf->two_primaries &&
1751 (new_conf->wire_protocol != DRBD_PROT_C))
1752 return ERR_NOT_PROTO_C;
1753
cd64397c
PR
1754 idr_for_each_entry(&tconn->volumes, mdev, i) {
1755 if (get_ldev(mdev)) {
daeda1cc 1756 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
cd64397c 1757 put_ldev(mdev);
44ed167d 1758 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
cd64397c 1759 return ERR_STONITH_AND_PROT_A;
cd64397c 1760 }
6139f60d 1761 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
cd64397c 1762 return ERR_DISCARD;
cd64397c 1763 }
cd64397c
PR
1764
1765 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1766 return ERR_CONG_NOT_PROTO_A;
1767
1768 return NO_ERROR;
1769}
1770
44ed167d
PR
1771static enum drbd_ret_code
1772check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1773{
1774 static enum drbd_ret_code rv;
1775 struct drbd_conf *mdev;
1776 int i;
1777
1778 rcu_read_lock();
1779 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1780 rcu_read_unlock();
1781
1782 /* tconn->volumes protected by genl_lock() here */
1783 idr_for_each_entry(&tconn->volumes, mdev, i) {
1784 if (!mdev->bitmap) {
1785 if(drbd_bm_init(mdev))
1786 return ERR_NOMEM;
1787 }
1788 }
1789
1790 return rv;
1791}
1792
0fd0ea06
PR
1793struct crypto {
1794 struct crypto_hash *verify_tfm;
1795 struct crypto_hash *csums_tfm;
1796 struct crypto_hash *cram_hmac_tfm;
8d412fc6 1797 struct crypto_hash *integrity_tfm;
0fd0ea06
PR
1798 void *int_dig_in;
1799 void *int_dig_vv;
1800};
1801
1802static int
4b6ad6d4 1803alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
0fd0ea06
PR
1804{
1805 if (!tfm_name[0])
1806 return NO_ERROR;
1807
1808 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1809 if (IS_ERR(*tfm)) {
1810 *tfm = NULL;
1811 return err_alg;
1812 }
1813
0fd0ea06
PR
1814 return NO_ERROR;
1815}
1816
1817static enum drbd_ret_code
1818alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1819{
1820 char hmac_name[CRYPTO_MAX_ALG_NAME];
1821 enum drbd_ret_code rv;
1822 int hash_size;
1823
4b6ad6d4
AG
1824 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1825 ERR_CSUMS_ALG);
0fd0ea06
PR
1826 if (rv != NO_ERROR)
1827 return rv;
4b6ad6d4
AG
1828 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1829 ERR_VERIFY_ALG);
0fd0ea06
PR
1830 if (rv != NO_ERROR)
1831 return rv;
4b6ad6d4
AG
1832 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1833 ERR_INTEGRITY_ALG);
0fd0ea06
PR
1834 if (rv != NO_ERROR)
1835 return rv;
0fd0ea06
PR
1836 if (new_conf->cram_hmac_alg[0] != 0) {
1837 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1838 new_conf->cram_hmac_alg);
1839
4b6ad6d4
AG
1840 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1841 ERR_AUTH_ALG);
0fd0ea06 1842 }
8d412fc6
AG
1843 if (crypto->integrity_tfm) {
1844 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
0fd0ea06
PR
1845 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1846 if (!crypto->int_dig_in)
1847 return ERR_NOMEM;
1848 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1849 if (!crypto->int_dig_vv)
1850 return ERR_NOMEM;
1851 }
1852
1853 return rv;
1854}
1855
1856static void free_crypto(struct crypto *crypto)
1857{
1858 kfree(crypto->int_dig_in);
1859 kfree(crypto->int_dig_vv);
1860 crypto_free_hash(crypto->cram_hmac_tfm);
8d412fc6 1861 crypto_free_hash(crypto->integrity_tfm);
0fd0ea06
PR
1862 crypto_free_hash(crypto->csums_tfm);
1863 crypto_free_hash(crypto->verify_tfm);
1864}
1865
f399002e
LE
1866int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1867{
1868 enum drbd_ret_code retcode;
1869 struct drbd_tconn *tconn;
44ed167d 1870 struct net_conf *old_conf, *new_conf = NULL;
f399002e
LE
1871 int err;
1872 int ovr; /* online verify running */
1873 int rsr; /* re-sync running */
0fd0ea06 1874 struct crypto crypto = { };
f399002e
LE
1875
1876 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1877 if (!adm_ctx.reply_skb)
1878 return retcode;
1879 if (retcode != NO_ERROR)
1880 goto out;
1881
1882 tconn = adm_ctx.tconn;
1883
1884 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1885 if (!new_conf) {
1886 retcode = ERR_NOMEM;
1887 goto out;
1888 }
1889
44ed167d
PR
1890 conn_reconfig_start(tconn);
1891
88104ca4 1892 mutex_lock(&tconn->data.mutex);
a0095508 1893 mutex_lock(&tconn->conf_update);
91fd4dad 1894 old_conf = tconn->net_conf;
44ed167d
PR
1895
1896 if (!old_conf) {
f399002e
LE
1897 drbd_msg_put_info("net conf missing, try connect");
1898 retcode = ERR_INVALID_REQUEST;
91fd4dad 1899 goto fail;
f399002e
LE
1900 }
1901
44ed167d 1902 *new_conf = *old_conf;
5979e361 1903 if (should_set_defaults(info))
b966b5dd 1904 set_net_conf_defaults(new_conf);
f399002e 1905
f399002e 1906 err = net_conf_from_attrs_for_change(new_conf, info);
c75b9b10 1907 if (err && err != -ENOMSG) {
f399002e
LE
1908 retcode = ERR_MANDATORY_TAG;
1909 drbd_msg_put_info(from_attrs_err_to_txt(err));
1910 goto fail;
1911 }
1912
cd64397c
PR
1913 retcode = check_net_options(tconn, new_conf);
1914 if (retcode != NO_ERROR)
1915 goto fail;
1916
f399002e
LE
1917 /* re-sync running */
1918 rsr = conn_resync_running(tconn);
0fd0ea06 1919 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
f399002e 1920 retcode = ERR_CSUMS_RESYNC_RUNNING;
91fd4dad 1921 goto fail;
f399002e
LE
1922 }
1923
f399002e
LE
1924 /* online verify running */
1925 ovr = conn_ov_running(tconn);
0fd0ea06
PR
1926 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1927 retcode = ERR_VERIFY_RUNNING;
1928 goto fail;
f399002e
LE
1929 }
1930
0fd0ea06
PR
1931 retcode = alloc_crypto(&crypto, new_conf);
1932 if (retcode != NO_ERROR)
1933 goto fail;
f399002e 1934
44ed167d 1935 rcu_assign_pointer(tconn->net_conf, new_conf);
f399002e
LE
1936
1937 if (!rsr) {
1938 crypto_free_hash(tconn->csums_tfm);
0fd0ea06
PR
1939 tconn->csums_tfm = crypto.csums_tfm;
1940 crypto.csums_tfm = NULL;
f399002e
LE
1941 }
1942 if (!ovr) {
1943 crypto_free_hash(tconn->verify_tfm);
0fd0ea06
PR
1944 tconn->verify_tfm = crypto.verify_tfm;
1945 crypto.verify_tfm = NULL;
f399002e
LE
1946 }
1947
0fd0ea06
PR
1948 kfree(tconn->int_dig_in);
1949 tconn->int_dig_in = crypto.int_dig_in;
1950 kfree(tconn->int_dig_vv);
1951 tconn->int_dig_vv = crypto.int_dig_vv;
8d412fc6
AG
1952 crypto_free_hash(tconn->integrity_tfm);
1953 tconn->integrity_tfm = crypto.integrity_tfm;
d659f2aa 1954 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
88104ca4 1955 /* Do this without trying to take tconn->data.mutex again. */
d659f2aa 1956 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
0fd0ea06 1957
0fd0ea06
PR
1958 crypto_free_hash(tconn->cram_hmac_tfm);
1959 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1960
a0095508 1961 mutex_unlock(&tconn->conf_update);
88104ca4 1962 mutex_unlock(&tconn->data.mutex);
91fd4dad
PR
1963 synchronize_rcu();
1964 kfree(old_conf);
1965
f399002e
LE
1966 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1967 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1968
91fd4dad
PR
1969 goto done;
1970
f399002e 1971 fail:
a0095508 1972 mutex_unlock(&tconn->conf_update);
88104ca4 1973 mutex_unlock(&tconn->data.mutex);
0fd0ea06 1974 free_crypto(&crypto);
f399002e 1975 kfree(new_conf);
91fd4dad 1976 done:
f399002e
LE
1977 conn_reconfig_done(tconn);
1978 out:
1979 drbd_adm_finish(info, retcode);
1980 return 0;
1981}
1982
3b98c0c2 1983int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
b411b363 1984{
3b98c0c2 1985 struct drbd_conf *mdev;
44ed167d 1986 struct net_conf *old_conf, *new_conf = NULL;
0fd0ea06 1987 struct crypto crypto = { };
80883197 1988 struct drbd_tconn *oconn;
3b98c0c2 1989 struct drbd_tconn *tconn;
b411b363 1990 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
3b98c0c2
LE
1991 enum drbd_ret_code retcode;
1992 int i;
1993 int err;
b411b363 1994
3b98c0c2
LE
1995 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1996 if (!adm_ctx.reply_skb)
1997 return retcode;
1998 if (retcode != NO_ERROR)
1999 goto out;
2000
2001 tconn = adm_ctx.tconn;
80883197 2002 conn_reconfig_start(tconn);
b411b363 2003
80883197 2004 if (tconn->cstate > C_STANDALONE) {
b411b363
PR
2005 retcode = ERR_NET_CONFIGURED;
2006 goto fail;
2007 }
2008
2009 /* allocation not in the IO path, cqueue thread context */
5979e361 2010 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
b411b363
PR
2011 if (!new_conf) {
2012 retcode = ERR_NOMEM;
2013 goto fail;
2014 }
2015
b966b5dd 2016 set_net_conf_defaults(new_conf);
f399002e
LE
2017
2018 err = net_conf_from_attrs(new_conf, info);
3b98c0c2 2019 if (err) {
b411b363 2020 retcode = ERR_MANDATORY_TAG;
3b98c0c2 2021 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2022 goto fail;
2023 }
2024
cd64397c
PR
2025 retcode = check_net_options(tconn, new_conf);
2026 if (retcode != NO_ERROR)
422028b1 2027 goto fail;
422028b1 2028
b411b363
PR
2029 retcode = NO_ERROR;
2030
2031 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2032 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
543cc10b 2033
c141ebda 2034 /* No need for _rcu here. All reconfiguration is
543cc10b
LE
2035 * strictly serialized on genl_lock(). We are protected against
2036 * concurrent reconfiguration/addition/deletion */
80883197 2037 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
44ed167d 2038 struct net_conf *nc;
80883197 2039 if (oconn == tconn)
b411b363 2040 continue;
44ed167d
PR
2041
2042 rcu_read_lock();
2043 nc = rcu_dereference(oconn->net_conf);
2044 if (nc) {
2045 taken_addr = (struct sockaddr *)&nc->my_addr;
2046 if (new_conf->my_addr_len == nc->my_addr_len &&
b411b363
PR
2047 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2048 retcode = ERR_LOCAL_ADDR;
2049
44ed167d
PR
2050 taken_addr = (struct sockaddr *)&nc->peer_addr;
2051 if (new_conf->peer_addr_len == nc->peer_addr_len &&
b411b363
PR
2052 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2053 retcode = ERR_PEER_ADDR;
b411b363 2054 }
44ed167d
PR
2055 rcu_read_unlock();
2056 if (retcode != NO_ERROR)
2057 goto fail;
b411b363
PR
2058 }
2059
0fd0ea06
PR
2060 retcode = alloc_crypto(&crypto, new_conf);
2061 if (retcode != NO_ERROR)
2062 goto fail;
b411b363 2063
b411b363
PR
2064 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2065
80883197 2066 conn_flush_workqueue(tconn);
91fd4dad 2067
a0095508 2068 mutex_lock(&tconn->conf_update);
91fd4dad
PR
2069 old_conf = tconn->net_conf;
2070 if (old_conf) {
b411b363 2071 retcode = ERR_NET_CONFIGURED;
a0095508 2072 mutex_unlock(&tconn->conf_update);
b411b363
PR
2073 goto fail;
2074 }
44ed167d 2075 rcu_assign_pointer(tconn->net_conf, new_conf);
b411b363 2076
91fd4dad 2077 conn_free_crypto(tconn);
0fd0ea06
PR
2078 tconn->int_dig_in = crypto.int_dig_in;
2079 tconn->int_dig_vv = crypto.int_dig_vv;
2080 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
8d412fc6 2081 tconn->integrity_tfm = crypto.integrity_tfm;
0fd0ea06
PR
2082 tconn->csums_tfm = crypto.csums_tfm;
2083 tconn->verify_tfm = crypto.verify_tfm;
b411b363 2084
a0095508 2085 mutex_unlock(&tconn->conf_update);
91fd4dad 2086
695d08fa 2087 rcu_read_lock();
80883197
PR
2088 idr_for_each_entry(&tconn->volumes, mdev, i) {
2089 mdev->send_cnt = 0;
2090 mdev->recv_cnt = 0;
80883197 2091 }
695d08fa 2092 rcu_read_unlock();
5ee743e9
LE
2093
2094 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2095
80883197 2096 conn_reconfig_done(tconn);
3b98c0c2 2097 drbd_adm_finish(info, retcode);
b411b363
PR
2098 return 0;
2099
2100fail:
0fd0ea06 2101 free_crypto(&crypto);
b411b363
PR
2102 kfree(new_conf);
2103
80883197 2104 conn_reconfig_done(tconn);
3b98c0c2
LE
2105out:
2106 drbd_adm_finish(info, retcode);
b411b363
PR
2107 return 0;
2108}
2109
85f75dd7
LE
2110static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2111{
2112 enum drbd_state_rv rv;
85f75dd7 2113
f3dfa40a
LE
2114 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2115 force ? CS_HARD : 0);
85f75dd7
LE
2116
2117 switch (rv) {
2118 case SS_NOTHING_TO_DO:
f3dfa40a 2119 break;
85f75dd7
LE
2120 case SS_ALREADY_STANDALONE:
2121 return SS_SUCCESS;
2122 case SS_PRIMARY_NOP:
2123 /* Our state checking code wants to see the peer outdated. */
2124 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
f3dfa40a 2125 pdsk, D_OUTDATED), CS_VERBOSE);
85f75dd7
LE
2126 break;
2127 case SS_CW_FAILED_BY_PEER:
2128 /* The peer probably wants to see us outdated. */
2129 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2130 disk, D_OUTDATED), 0);
2131 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
f3dfa40a
LE
2132 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2133 CS_HARD);
85f75dd7
LE
2134 }
2135 break;
2136 default:;
2137 /* no special handling necessary */
2138 }
2139
f3dfa40a
LE
2140 if (rv >= SS_SUCCESS) {
2141 enum drbd_state_rv rv2;
2142 /* No one else can reconfigure the network while I am here.
2143 * The state handling only uses drbd_thread_stop_nowait(),
2144 * we want to really wait here until the receiver is no more.
2145 */
2146 drbd_thread_stop(&adm_ctx.tconn->receiver);
2147
2148 /* Race breaker. This additional state change request may be
2149 * necessary, if this was a forced disconnect during a receiver
2150 * restart. We may have "killed" the receiver thread just
2151 * after drbdd_init() returned. Typically, we should be
2152 * C_STANDALONE already, now, and this becomes a no-op.
2153 */
2154 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2155 CS_VERBOSE | CS_HARD);
2156 if (rv2 < SS_SUCCESS)
2157 conn_err(tconn,
2158 "unexpected rv2=%d in conn_try_disconnect()\n",
2159 rv2);
2160 }
85f75dd7
LE
2161 return rv;
2162}
2163
3b98c0c2 2164int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
b411b363 2165{
3b98c0c2
LE
2166 struct disconnect_parms parms;
2167 struct drbd_tconn *tconn;
85f75dd7 2168 enum drbd_state_rv rv;
3b98c0c2
LE
2169 enum drbd_ret_code retcode;
2170 int err;
2561b9c1 2171
3b98c0c2
LE
2172 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2173 if (!adm_ctx.reply_skb)
2174 return retcode;
2175 if (retcode != NO_ERROR)
2561b9c1 2176 goto fail;
3b98c0c2
LE
2177
2178 tconn = adm_ctx.tconn;
2179 memset(&parms, 0, sizeof(parms));
2180 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
f399002e 2181 err = disconnect_parms_from_attrs(&parms, info);
3b98c0c2
LE
2182 if (err) {
2183 retcode = ERR_MANDATORY_TAG;
2184 drbd_msg_put_info(from_attrs_err_to_txt(err));
2185 goto fail;
2186 }
2561b9c1
PR
2187 }
2188
85f75dd7
LE
2189 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2190 if (rv < SS_SUCCESS)
f3dfa40a
LE
2191 retcode = rv; /* FIXME: Type mismatch. */
2192 else
2193 retcode = NO_ERROR;
b411b363 2194 fail:
3b98c0c2 2195 drbd_adm_finish(info, retcode);
b411b363
PR
2196 return 0;
2197}
2198
2199void resync_after_online_grow(struct drbd_conf *mdev)
2200{
2201 int iass; /* I am sync source */
2202
2203 dev_info(DEV, "Resync of new storage after online grow\n");
2204 if (mdev->state.role != mdev->state.peer)
2205 iass = (mdev->state.role == R_PRIMARY);
2206 else
25703f83 2207 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
b411b363
PR
2208
2209 if (iass)
2210 drbd_start_resync(mdev, C_SYNC_SOURCE);
2211 else
2212 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2213}
2214
3b98c0c2 2215int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
b411b363 2216{
daeda1cc 2217 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3b98c0c2
LE
2218 struct resize_parms rs;
2219 struct drbd_conf *mdev;
2220 enum drbd_ret_code retcode;
b411b363 2221 enum determine_dev_size dd;
6495d2c6 2222 enum dds_flags ddsf;
daeda1cc 2223 sector_t u_size;
3b98c0c2 2224 int err;
b411b363 2225
3b98c0c2
LE
2226 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2227 if (!adm_ctx.reply_skb)
2228 return retcode;
2229 if (retcode != NO_ERROR)
b411b363 2230 goto fail;
3b98c0c2
LE
2231
2232 memset(&rs, 0, sizeof(struct resize_parms));
2233 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
f399002e 2234 err = resize_parms_from_attrs(&rs, info);
3b98c0c2
LE
2235 if (err) {
2236 retcode = ERR_MANDATORY_TAG;
2237 drbd_msg_put_info(from_attrs_err_to_txt(err));
2238 goto fail;
2239 }
b411b363
PR
2240 }
2241
3b98c0c2 2242 mdev = adm_ctx.mdev;
b411b363
PR
2243 if (mdev->state.conn > C_CONNECTED) {
2244 retcode = ERR_RESIZE_RESYNC;
2245 goto fail;
2246 }
2247
2248 if (mdev->state.role == R_SECONDARY &&
2249 mdev->state.peer == R_SECONDARY) {
2250 retcode = ERR_NO_PRIMARY;
2251 goto fail;
2252 }
2253
2254 if (!get_ldev(mdev)) {
2255 retcode = ERR_NO_DISK;
2256 goto fail;
2257 }
2258
31890f4a 2259 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
6495d2c6
PR
2260 retcode = ERR_NEED_APV_93;
2261 goto fail;
2262 }
2263
daeda1cc
PR
2264 rcu_read_lock();
2265 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2266 rcu_read_unlock();
2267 if (u_size != (sector_t)rs.resize_size) {
2268 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2269 if (!new_disk_conf) {
2270 retcode = ERR_NOMEM;
2271 goto fail;
2272 }
2273 }
2274
087c2492 2275 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
b411b363 2276 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
b411b363 2277
daeda1cc
PR
2278 if (new_disk_conf) {
2279 mutex_lock(&mdev->tconn->conf_update);
2280 old_disk_conf = mdev->ldev->disk_conf;
2281 *new_disk_conf = *old_disk_conf;
2282 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2283 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2284 mutex_unlock(&mdev->tconn->conf_update);
2285 synchronize_rcu();
2286 kfree(old_disk_conf);
2287 }
2288
6495d2c6 2289 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
24c4830c 2290 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
2291 drbd_md_sync(mdev);
2292 put_ldev(mdev);
2293 if (dd == dev_size_error) {
2294 retcode = ERR_NOMEM_BITMAP;
2295 goto fail;
2296 }
2297
087c2492 2298 if (mdev->state.conn == C_CONNECTED) {
b411b363
PR
2299 if (dd == grew)
2300 set_bit(RESIZE_PENDING, &mdev->flags);
2301
2302 drbd_send_uuids(mdev);
6495d2c6 2303 drbd_send_sizes(mdev, 1, ddsf);
b411b363
PR
2304 }
2305
2306 fail:
3b98c0c2 2307 drbd_adm_finish(info, retcode);
b411b363
PR
2308 return 0;
2309}
2310
b966b5dd
AG
2311void drbd_set_res_opts_defaults(struct res_opts *r)
2312{
2313 return set_res_opts_defaults(r);
2314}
2315
f399002e 2316int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
b411b363 2317{
3b98c0c2 2318 enum drbd_ret_code retcode;
b411b363 2319 cpumask_var_t new_cpu_mask;
f399002e 2320 struct drbd_tconn *tconn;
b57a1e27 2321 struct res_opts res_opts;
f399002e 2322 int err;
b411b363 2323
f399002e 2324 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3b98c0c2
LE
2325 if (!adm_ctx.reply_skb)
2326 return retcode;
2327 if (retcode != NO_ERROR)
2328 goto fail;
f399002e 2329 tconn = adm_ctx.tconn;
3b98c0c2 2330
b411b363
PR
2331 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2332 retcode = ERR_NOMEM;
3b98c0c2 2333 drbd_msg_put_info("unable to allocate cpumask");
b411b363
PR
2334 goto fail;
2335 }
2336
b57a1e27 2337 res_opts = tconn->res_opts;
5979e361 2338 if (should_set_defaults(info))
b966b5dd 2339 set_res_opts_defaults(&res_opts);
b411b363 2340
b57a1e27 2341 err = res_opts_from_attrs(&res_opts, info);
c75b9b10 2342 if (err && err != -ENOMSG) {
b411b363 2343 retcode = ERR_MANDATORY_TAG;
3b98c0c2 2344 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2345 goto fail;
2346 }
2347
b411b363 2348 /* silently ignore cpu mask on UP kernel */
b57a1e27
LE
2349 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2350 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
b411b363
PR
2351 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2352 if (err) {
f399002e 2353 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
b411b363
PR
2354 retcode = ERR_CPU_MASK_PARSE;
2355 goto fail;
2356 }
2357 }
2358
b411b363 2359
b57a1e27 2360 tconn->res_opts = res_opts;
b411b363 2361
f399002e
LE
2362 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2363 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2364 drbd_calc_cpu_mask(tconn);
2365 tconn->receiver.reset_cpu_mask = 1;
2366 tconn->asender.reset_cpu_mask = 1;
2367 tconn->worker.reset_cpu_mask = 1;
b411b363
PR
2368 }
2369
b411b363
PR
2370fail:
2371 free_cpumask_var(new_cpu_mask);
3b98c0c2
LE
2372
2373 drbd_adm_finish(info, retcode);
b411b363
PR
2374 return 0;
2375}
2376
3b98c0c2 2377int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
b411b363 2378{
3b98c0c2
LE
2379 struct drbd_conf *mdev;
2380 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2381
2382 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2383 if (!adm_ctx.reply_skb)
2384 return retcode;
2385 if (retcode != NO_ERROR)
2386 goto out;
2387
2388 mdev = adm_ctx.mdev;
b411b363 2389
194bfb32
LE
2390 /* If there is still bitmap IO pending, probably because of a previous
2391 * resync just being finished, wait for it before requesting a new resync. */
2392 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2393
b411b363
PR
2394 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2395
2396 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2397 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2398
2399 while (retcode == SS_NEED_CONNECTION) {
87eeee41 2400 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
2401 if (mdev->state.conn < C_CONNECTED)
2402 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
87eeee41 2403 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
2404
2405 if (retcode != SS_NEED_CONNECTION)
2406 break;
2407
2408 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2409 }
2410
3b98c0c2
LE
2411out:
2412 drbd_adm_finish(info, retcode);
b411b363
PR
2413 return 0;
2414}
2415
0778286a
PR
2416static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2417{
2418 int rv;
2419
2420 rv = drbd_bmio_set_n_write(mdev);
2421 drbd_suspend_al(mdev);
2422 return rv;
2423}
2424
3b98c0c2
LE
2425static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2426 union drbd_state mask, union drbd_state val)
b411b363 2427{
3b98c0c2 2428 enum drbd_ret_code retcode;
194bfb32 2429
3b98c0c2
LE
2430 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2431 if (!adm_ctx.reply_skb)
2432 return retcode;
2433 if (retcode != NO_ERROR)
2434 goto out;
b411b363 2435
3b98c0c2
LE
2436 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2437out:
2438 drbd_adm_finish(info, retcode);
b411b363
PR
2439 return 0;
2440}
2441
3b98c0c2 2442int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
b411b363 2443{
3b98c0c2
LE
2444 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2445}
b411b363 2446
3b98c0c2
LE
2447int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2448{
2449 enum drbd_ret_code retcode;
2450
2451 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2452 if (!adm_ctx.reply_skb)
2453 return retcode;
2454 if (retcode != NO_ERROR)
2455 goto out;
b411b363 2456
3b98c0c2
LE
2457 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2458 retcode = ERR_PAUSE_IS_SET;
2459out:
2460 drbd_adm_finish(info, retcode);
b411b363
PR
2461 return 0;
2462}
2463
3b98c0c2 2464int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
b411b363 2465{
da9fbc27 2466 union drbd_dev_state s;
3b98c0c2
LE
2467 enum drbd_ret_code retcode;
2468
2469 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2470 if (!adm_ctx.reply_skb)
2471 return retcode;
2472 if (retcode != NO_ERROR)
2473 goto out;
b411b363 2474
3b98c0c2
LE
2475 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2476 s = adm_ctx.mdev->state;
cd88d030
PR
2477 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2478 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2479 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2480 } else {
2481 retcode = ERR_PAUSE_IS_CLEAR;
2482 }
2483 }
b411b363 2484
3b98c0c2
LE
2485out:
2486 drbd_adm_finish(info, retcode);
b411b363
PR
2487 return 0;
2488}
2489
3b98c0c2 2490int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
b411b363 2491{
3b98c0c2 2492 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
b411b363
PR
2493}
2494
3b98c0c2 2495int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
b411b363 2496{
3b98c0c2
LE
2497 struct drbd_conf *mdev;
2498 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2499
2500 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2501 if (!adm_ctx.reply_skb)
2502 return retcode;
2503 if (retcode != NO_ERROR)
2504 goto out;
2505
2506 mdev = adm_ctx.mdev;
43a5182c
PR
2507 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2508 drbd_uuid_new_current(mdev);
2509 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 2510 }
265be2d0 2511 drbd_suspend_io(mdev);
3b98c0c2
LE
2512 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2513 if (retcode == SS_SUCCESS) {
265be2d0 2514 if (mdev->state.conn < C_CONNECTED)
2f5cdd0b 2515 tl_clear(mdev->tconn);
265be2d0 2516 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2f5cdd0b 2517 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
265be2d0
PR
2518 }
2519 drbd_resume_io(mdev);
2520
3b98c0c2
LE
2521out:
2522 drbd_adm_finish(info, retcode);
b411b363
PR
2523 return 0;
2524}
2525
3b98c0c2 2526int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
b411b363 2527{
3b98c0c2 2528 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
b411b363
PR
2529}
2530
7c3063cc 2531int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *resource_name, unsigned vnr)
543cc10b
LE
2532{
2533 struct nlattr *nla;
2534 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2535 if (!nla)
2536 goto nla_put_failure;
2537 if (vnr != VOLUME_UNSPECIFIED)
2538 NLA_PUT_U32(skb, T_ctx_volume, vnr);
7c3063cc 2539 NLA_PUT_STRING(skb, T_ctx_resource_name, resource_name);
543cc10b
LE
2540 nla_nest_end(skb, nla);
2541 return 0;
2542
2543nla_put_failure:
2544 if (nla)
2545 nla_nest_cancel(skb, nla);
2546 return -EMSGSIZE;
2547}
2548
3b98c0c2
LE
2549int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2550 const struct sib_info *sib)
b411b363 2551{
3b98c0c2 2552 struct state_info *si = NULL; /* for sizeof(si->member); */
44ed167d 2553 struct net_conf *nc;
3b98c0c2
LE
2554 struct nlattr *nla;
2555 int got_ldev;
3b98c0c2
LE
2556 int err = 0;
2557 int exclude_sensitive;
2558
2559 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2560 * to. So we better exclude_sensitive information.
2561 *
2562 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2563 * in the context of the requesting user process. Exclude sensitive
2564 * information, unless current has superuser.
2565 *
2566 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2567 * relies on the current implementation of netlink_dump(), which
2568 * executes the dump callback successively from netlink_recvmsg(),
2569 * always in the context of the receiving process */
2570 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2571
2572 got_ldev = get_ldev(mdev);
3b98c0c2
LE
2573
2574 /* We need to add connection name and volume number information still.
2575 * Minor number is in drbd_genlmsghdr. */
543cc10b 2576 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
3b98c0c2 2577 goto nla_put_failure;
3b98c0c2 2578
f399002e
LE
2579 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2580 goto nla_put_failure;
2581
daeda1cc 2582 rcu_read_lock();
3b98c0c2 2583 if (got_ldev)
daeda1cc 2584 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
3b98c0c2 2585 goto nla_put_failure;
44ed167d 2586
44ed167d
PR
2587 nc = rcu_dereference(mdev->tconn->net_conf);
2588 if (nc)
2589 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2590 rcu_read_unlock();
2591 if (err)
2592 goto nla_put_failure;
3b98c0c2 2593
3b98c0c2
LE
2594 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2595 if (!nla)
2596 goto nla_put_failure;
2597 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2598 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2599 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2600 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2601
2602 if (got_ldev) {
2603 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2604 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2605 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2606 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2607 if (C_SYNC_SOURCE <= mdev->state.conn &&
2608 C_PAUSED_SYNC_T >= mdev->state.conn) {
2609 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2610 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2611 }
b411b363
PR
2612 }
2613
3b98c0c2
LE
2614 if (sib) {
2615 switch(sib->sib_reason) {
2616 case SIB_SYNC_PROGRESS:
2617 case SIB_GET_STATUS_REPLY:
2618 break;
2619 case SIB_STATE_CHANGE:
2620 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2621 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2622 break;
2623 case SIB_HELPER_POST:
2624 NLA_PUT_U32(skb,
2625 T_helper_exit_code, sib->helper_exit_code);
2626 /* fall through */
2627 case SIB_HELPER_PRE:
2628 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2629 break;
2630 }
b411b363 2631 }
3b98c0c2 2632 nla_nest_end(skb, nla);
b411b363 2633
3b98c0c2
LE
2634 if (0)
2635nla_put_failure:
2636 err = -EMSGSIZE;
2637 if (got_ldev)
2638 put_ldev(mdev);
3b98c0c2 2639 return err;
b411b363
PR
2640}
2641
3b98c0c2 2642int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
b411b363 2643{
3b98c0c2
LE
2644 enum drbd_ret_code retcode;
2645 int err;
b411b363 2646
3b98c0c2
LE
2647 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2648 if (!adm_ctx.reply_skb)
2649 return retcode;
2650 if (retcode != NO_ERROR)
2651 goto out;
b411b363 2652
3b98c0c2
LE
2653 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2654 if (err) {
2655 nlmsg_free(adm_ctx.reply_skb);
2656 return err;
b411b363 2657 }
3b98c0c2
LE
2658out:
2659 drbd_adm_finish(info, retcode);
2660 return 0;
b411b363
PR
2661}
2662
71932efc 2663int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
b411b363 2664{
3b98c0c2
LE
2665 struct drbd_conf *mdev;
2666 struct drbd_genlmsghdr *dh;
543cc10b
LE
2667 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2668 struct drbd_tconn *tconn = NULL;
2669 struct drbd_tconn *tmp;
2670 unsigned volume = cb->args[1];
2671
2672 /* Open coded, deferred, iteration:
2673 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2674 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2675 * ...
2676 * }
2677 * }
2678 * where tconn is cb->args[0];
2679 * and i is cb->args[1];
2680 *
71932efc
LE
2681 * cb->args[2] indicates if we shall loop over all resources,
2682 * or just dump all volumes of a single resource.
2683 *
3b98c0c2
LE
2684 * This may miss entries inserted after this dump started,
2685 * or entries deleted before they are reached.
543cc10b
LE
2686 *
2687 * We need to make sure the mdev won't disappear while
2688 * we are looking at it, and revalidate our iterators
2689 * on each iteration.
2690 */
3b98c0c2 2691
9dc9fbb3 2692 /* synchronize with conn_create()/conn_destroy() */
c141ebda 2693 rcu_read_lock();
543cc10b 2694 /* revalidate iterator position */
ec0bddbc 2695 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
543cc10b
LE
2696 if (pos == NULL) {
2697 /* first iteration */
2698 pos = tmp;
2699 tconn = pos;
2700 break;
2701 }
2702 if (tmp == pos) {
2703 tconn = pos;
2704 break;
2705 }
2706 }
2707 if (tconn) {
71932efc 2708next_tconn:
543cc10b
LE
2709 mdev = idr_get_next(&tconn->volumes, &volume);
2710 if (!mdev) {
2711 /* No more volumes to dump on this tconn.
2712 * Advance tconn iterator. */
ec0bddbc
PR
2713 pos = list_entry_rcu(tconn->all_tconn.next,
2714 struct drbd_tconn, all_tconn);
71932efc 2715 /* Did we dump any volume on this tconn yet? */
543cc10b 2716 if (volume != 0) {
71932efc
LE
2717 /* If we reached the end of the list,
2718 * or only a single resource dump was requested,
2719 * we are done. */
2720 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2721 goto out;
543cc10b 2722 volume = 0;
71932efc 2723 tconn = pos;
543cc10b
LE
2724 goto next_tconn;
2725 }
2726 }
2727
3b98c0c2
LE
2728 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2729 cb->nlh->nlmsg_seq, &drbd_genl_family,
2730 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2731 if (!dh)
543cc10b
LE
2732 goto out;
2733
2734 if (!mdev) {
2735 /* this is a tconn without a single volume */
2736 dh->minor = -1U;
2737 dh->ret_code = NO_ERROR;
2738 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2739 genlmsg_cancel(skb, dh);
2740 else
2741 genlmsg_end(skb, dh);
2742 goto out;
2743 }
3b98c0c2 2744
543cc10b
LE
2745 D_ASSERT(mdev->vnr == volume);
2746 D_ASSERT(mdev->tconn == tconn);
3b98c0c2 2747
543cc10b 2748 dh->minor = mdev_to_minor(mdev);
3b98c0c2
LE
2749 dh->ret_code = NO_ERROR;
2750
2751 if (nla_put_status_info(skb, mdev, NULL)) {
2752 genlmsg_cancel(skb, dh);
543cc10b 2753 goto out;
3b98c0c2
LE
2754 }
2755 genlmsg_end(skb, dh);
2756 }
b411b363 2757
543cc10b 2758out:
c141ebda 2759 rcu_read_unlock();
543cc10b
LE
2760 /* where to start the next iteration */
2761 cb->args[0] = (long)pos;
2762 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
b411b363 2763
543cc10b
LE
2764 /* No more tconns/volumes/minors found results in an empty skb.
2765 * Which will terminate the dump. */
3b98c0c2 2766 return skb->len;
b411b363
PR
2767}
2768
71932efc
LE
2769/*
2770 * Request status of all resources, or of all volumes within a single resource.
2771 *
2772 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2773 * Which means we cannot use the family->attrbuf or other such members, because
2774 * dump is NOT protected by the genl_lock(). During dump, we only have access
2775 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2776 *
2777 * Once things are setup properly, we call into get_one_status().
2778 */
2779int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2780{
2781 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2782 struct nlattr *nla;
7c3063cc 2783 const char *resource_name;
71932efc 2784 struct drbd_tconn *tconn;
7c3063cc 2785 int maxtype;
71932efc
LE
2786
2787 /* Is this a followup call? */
2788 if (cb->args[0]) {
2789 /* ... of a single resource dump,
2790 * and the resource iterator has been advanced already? */
2791 if (cb->args[2] && cb->args[2] != cb->args[0])
2792 return 0; /* DONE. */
2793 goto dump;
2794 }
2795
2796 /* First call (from netlink_dump_start). We need to figure out
2797 * which resource(s) the user wants us to dump. */
2798 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2799 nlmsg_attrlen(cb->nlh, hdrlen),
2800 DRBD_NLA_CFG_CONTEXT);
2801
2802 /* No explicit context given. Dump all. */
2803 if (!nla)
2804 goto dump;
7c3063cc
AG
2805 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2806 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2807 if (IS_ERR(nla))
2808 return PTR_ERR(nla);
71932efc
LE
2809 /* context given, but no name present? */
2810 if (!nla)
2811 return -EINVAL;
7c3063cc
AG
2812 resource_name = nla_data(nla);
2813 tconn = conn_get_by_name(resource_name);
0ace9dfa 2814
71932efc
LE
2815 if (!tconn)
2816 return -ENODEV;
2817
0ace9dfa
PR
2818 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2819
71932efc
LE
2820 /* prime iterators, and set "filter" mode mark:
2821 * only dump this tconn. */
2822 cb->args[0] = (long)tconn;
2823 /* cb->args[1] = 0; passed in this way. */
2824 cb->args[2] = (long)tconn;
2825
2826dump:
2827 return get_one_status(skb, cb);
2828}
2829
3b98c0c2 2830int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
b411b363 2831{
3b98c0c2
LE
2832 enum drbd_ret_code retcode;
2833 struct timeout_parms tp;
2834 int err;
b411b363 2835
3b98c0c2
LE
2836 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2837 if (!adm_ctx.reply_skb)
2838 return retcode;
2839 if (retcode != NO_ERROR)
2840 goto out;
b411b363 2841
3b98c0c2
LE
2842 tp.timeout_type =
2843 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2844 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2845 UT_DEFAULT;
b411b363 2846
3b98c0c2
LE
2847 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2848 if (err) {
2849 nlmsg_free(adm_ctx.reply_skb);
2850 return err;
2851 }
2852out:
2853 drbd_adm_finish(info, retcode);
2854 return 0;
b411b363
PR
2855}
2856
3b98c0c2 2857int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
b411b363 2858{
3b98c0c2
LE
2859 struct drbd_conf *mdev;
2860 enum drbd_ret_code retcode;
b411b363 2861
3b98c0c2
LE
2862 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2863 if (!adm_ctx.reply_skb)
2864 return retcode;
2865 if (retcode != NO_ERROR)
2866 goto out;
873b0d5f 2867
3b98c0c2
LE
2868 mdev = adm_ctx.mdev;
2869 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2870 /* resume from last known position, if possible */
2871 struct start_ov_parms parms =
2872 { .ov_start_sector = mdev->ov_start_sector };
f399002e 2873 int err = start_ov_parms_from_attrs(&parms, info);
3b98c0c2
LE
2874 if (err) {
2875 retcode = ERR_MANDATORY_TAG;
2876 drbd_msg_put_info(from_attrs_err_to_txt(err));
2877 goto out;
2878 }
2879 /* w_make_ov_request expects position to be aligned */
2880 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2881 }
873b0d5f
LE
2882 /* If there is still bitmap IO pending, e.g. previous resync or verify
2883 * just being finished, wait for it before requesting a new resync. */
2884 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3b98c0c2
LE
2885 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2886out:
2887 drbd_adm_finish(info, retcode);
b411b363
PR
2888 return 0;
2889}
2890
2891
3b98c0c2 2892int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
b411b363 2893{
3b98c0c2
LE
2894 struct drbd_conf *mdev;
2895 enum drbd_ret_code retcode;
b411b363
PR
2896 int skip_initial_sync = 0;
2897 int err;
3b98c0c2 2898 struct new_c_uuid_parms args;
b411b363 2899
3b98c0c2
LE
2900 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2901 if (!adm_ctx.reply_skb)
2902 return retcode;
2903 if (retcode != NO_ERROR)
2904 goto out_nolock;
b411b363 2905
3b98c0c2
LE
2906 mdev = adm_ctx.mdev;
2907 memset(&args, 0, sizeof(args));
2908 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
f399002e 2909 err = new_c_uuid_parms_from_attrs(&args, info);
3b98c0c2
LE
2910 if (err) {
2911 retcode = ERR_MANDATORY_TAG;
2912 drbd_msg_put_info(from_attrs_err_to_txt(err));
2913 goto out_nolock;
2914 }
b411b363
PR
2915 }
2916
8410da8f 2917 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
b411b363
PR
2918
2919 if (!get_ldev(mdev)) {
2920 retcode = ERR_NO_DISK;
2921 goto out;
2922 }
2923
2924 /* this is "skip initial sync", assume to be clean */
31890f4a 2925 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
2926 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2927 dev_info(DEV, "Preparing to skip initial sync\n");
2928 skip_initial_sync = 1;
2929 } else if (mdev->state.conn != C_STANDALONE) {
2930 retcode = ERR_CONNECTED;
2931 goto out_dec;
2932 }
2933
2934 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2935 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2936
2937 if (args.clear_bm) {
20ceb2b2
LE
2938 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2939 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
b411b363
PR
2940 if (err) {
2941 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2942 retcode = ERR_IO_MD_DISK;
2943 }
2944 if (skip_initial_sync) {
2945 drbd_send_uuids_skip_initial_sync(mdev);
2946 _drbd_uuid_set(mdev, UI_BITMAP, 0);
62b0da3a 2947 drbd_print_uuids(mdev, "cleared bitmap UUID");
87eeee41 2948 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
2949 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2950 CS_VERBOSE, NULL);
87eeee41 2951 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
2952 }
2953 }
2954
2955 drbd_md_sync(mdev);
2956out_dec:
2957 put_ldev(mdev);
2958out:
8410da8f 2959 mutex_unlock(mdev->state_mutex);
3b98c0c2
LE
2960out_nolock:
2961 drbd_adm_finish(info, retcode);
774b3055
PR
2962 return 0;
2963}
2964
3b98c0c2 2965static enum drbd_ret_code
7c3063cc 2966drbd_check_resource_name(const char *name)
774b3055 2967{
3b98c0c2 2968 if (!name || !name[0]) {
7c3063cc 2969 drbd_msg_put_info("resource name missing");
3b98c0c2 2970 return ERR_MANDATORY_TAG;
774b3055 2971 }
3b98c0c2
LE
2972 /* if we want to use these in sysfs/configfs/debugfs some day,
2973 * we must not allow slashes */
2974 if (strchr(name, '/')) {
7c3063cc 2975 drbd_msg_put_info("invalid resource name");
3b98c0c2 2976 return ERR_INVALID_REQUEST;
774b3055 2977 }
3b98c0c2 2978 return NO_ERROR;
774b3055
PR
2979}
2980
789c1b62 2981int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 2982{
3b98c0c2 2983 enum drbd_ret_code retcode;
9f5180e5 2984
3b98c0c2
LE
2985 retcode = drbd_adm_prepare(skb, info, 0);
2986 if (!adm_ctx.reply_skb)
2987 return retcode;
2988 if (retcode != NO_ERROR)
2989 goto out;
b411b363 2990
7c3063cc 2991 retcode = drbd_check_resource_name(adm_ctx.resource_name);
3b98c0c2
LE
2992 if (retcode != NO_ERROR)
2993 goto out;
b411b363 2994
3b98c0c2 2995 if (adm_ctx.tconn) {
38f19616
LE
2996 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2997 retcode = ERR_INVALID_REQUEST;
789c1b62 2998 drbd_msg_put_info("resource exists");
38f19616
LE
2999 }
3000 /* else: still NO_ERROR */
3b98c0c2 3001 goto out;
b411b363
PR
3002 }
3003
7c3063cc 3004 if (!conn_create(adm_ctx.resource_name))
b411b363 3005 retcode = ERR_NOMEM;
3b98c0c2
LE
3006out:
3007 drbd_adm_finish(info, retcode);
3008 return 0;
b411b363
PR
3009}
3010
3b98c0c2 3011int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 3012{
3b98c0c2
LE
3013 struct drbd_genlmsghdr *dh = info->userhdr;
3014 enum drbd_ret_code retcode;
b411b363 3015
3b98c0c2
LE
3016 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3017 if (!adm_ctx.reply_skb)
3018 return retcode;
3019 if (retcode != NO_ERROR)
3020 goto out;
b411b363 3021
3b98c0c2
LE
3022 /* FIXME drop minor_count parameter, limit to MINORMASK */
3023 if (dh->minor >= minor_count) {
3024 drbd_msg_put_info("requested minor out of range");
3025 retcode = ERR_INVALID_REQUEST;
3026 goto out;
b411b363 3027 }
0c8e36d9 3028 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3b98c0c2
LE
3029 drbd_msg_put_info("requested volume id out of range");
3030 retcode = ERR_INVALID_REQUEST;
3031 goto out;
b411b363 3032 }
b411b363 3033
38f19616
LE
3034 /* drbd_adm_prepare made sure already
3035 * that mdev->tconn and mdev->vnr match the request. */
3036 if (adm_ctx.mdev) {
3037 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3038 retcode = ERR_MINOR_EXISTS;
3039 /* else: still NO_ERROR */
3040 goto out;
3041 }
3042
3b98c0c2
LE
3043 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3044out:
3045 drbd_adm_finish(info, retcode);
3046 return 0;
b411b363
PR
3047}
3048
85f75dd7
LE
3049static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3050{
3051 if (mdev->state.disk == D_DISKLESS &&
3052 /* no need to be mdev->state.conn == C_STANDALONE &&
3053 * we may want to delete a minor from a live replication group.
3054 */
3055 mdev->state.role == R_SECONDARY) {
81fa2e67
PR
3056 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3057 idr_remove(&minors, mdev_to_minor(mdev));
3058 del_gendisk(mdev->vdisk);
3059 synchronize_rcu();
3060 kref_put(&mdev->kref, &drbd_minor_destroy);
85f75dd7
LE
3061 return NO_ERROR;
3062 } else
3063 return ERR_MINOR_CONFIGURED;
3064}
3065
3b98c0c2 3066int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 3067{
3b98c0c2 3068 enum drbd_ret_code retcode;
b411b363 3069
3b98c0c2
LE
3070 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3071 if (!adm_ctx.reply_skb)
3072 return retcode;
3073 if (retcode != NO_ERROR)
3074 goto out;
b411b363 3075
85f75dd7 3076 retcode = adm_delete_minor(adm_ctx.mdev);
85f75dd7
LE
3077out:
3078 drbd_adm_finish(info, retcode);
3079 return 0;
3080}
3081
3082int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3083{
f3dfa40a 3084 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
85f75dd7
LE
3085 struct drbd_conf *mdev;
3086 unsigned i;
3087
3088 retcode = drbd_adm_prepare(skb, info, 0);
3089 if (!adm_ctx.reply_skb)
3090 return retcode;
3091 if (retcode != NO_ERROR)
3092 goto out;
3093
3094 if (!adm_ctx.tconn) {
789c1b62 3095 retcode = ERR_RES_NOT_KNOWN;
85f75dd7
LE
3096 goto out;
3097 }
3098
85f75dd7
LE
3099 /* demote */
3100 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3101 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3102 if (retcode < SS_SUCCESS) {
3103 drbd_msg_put_info("failed to demote");
c141ebda 3104 goto out;
85f75dd7
LE
3105 }
3106 }
3107
f3dfa40a
LE
3108 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3109 if (retcode < SS_SUCCESS) {
85f75dd7 3110 drbd_msg_put_info("failed to disconnect");
f3dfa40a 3111 goto out;
85f75dd7
LE
3112 }
3113
3114 /* detach */
3115 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
f3dfa40a
LE
3116 retcode = adm_detach(mdev);
3117 if (retcode < SS_SUCCESS) {
85f75dd7 3118 drbd_msg_put_info("failed to detach");
c141ebda 3119 goto out;
85f75dd7
LE
3120 }
3121 }
3122
f3dfa40a
LE
3123 /* If we reach this, all volumes (of this tconn) are Secondary,
3124 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
c141ebda 3125 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
f3dfa40a
LE
3126 drbd_thread_stop(&adm_ctx.tconn->worker);
3127
3128 /* Now, nothing can fail anymore */
3129
85f75dd7
LE
3130 /* delete volumes */
3131 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3132 retcode = adm_delete_minor(mdev);
3133 if (retcode != NO_ERROR) {
3134 /* "can not happen" */
3135 drbd_msg_put_info("failed to delete volume");
ef356262 3136 goto out;
85f75dd7
LE
3137 }
3138 }
3139
85f75dd7
LE
3140 /* delete connection */
3141 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
ec0bddbc
PR
3142 list_del_rcu(&adm_ctx.tconn->all_tconn);
3143 synchronize_rcu();
9dc9fbb3
PR
3144 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3145
85f75dd7
LE
3146 retcode = NO_ERROR;
3147 } else {
3148 /* "can not happen" */
789c1b62 3149 retcode = ERR_RES_IN_USE;
85f75dd7 3150 drbd_msg_put_info("failed to delete connection");
85f75dd7 3151 }
ef356262 3152 goto out;
3b98c0c2
LE
3153out:
3154 drbd_adm_finish(info, retcode);
3155 return 0;
b411b363
PR
3156}
3157
789c1b62 3158int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 3159{
3b98c0c2 3160 enum drbd_ret_code retcode;
b411b363 3161
3b98c0c2
LE
3162 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3163 if (!adm_ctx.reply_skb)
3164 return retcode;
3165 if (retcode != NO_ERROR)
3166 goto out;
3167
3168 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
ec0bddbc
PR
3169 list_del_rcu(&adm_ctx.tconn->all_tconn);
3170 synchronize_rcu();
9dc9fbb3
PR
3171 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3172
3b98c0c2
LE
3173 retcode = NO_ERROR;
3174 } else {
789c1b62 3175 retcode = ERR_RES_IN_USE;
b411b363
PR
3176 }
3177
992d6e91
LE
3178 if (retcode == NO_ERROR)
3179 drbd_thread_stop(&adm_ctx.tconn->worker);
3b98c0c2
LE
3180out:
3181 drbd_adm_finish(info, retcode);
b411b363
PR
3182 return 0;
3183}
3184
3b98c0c2 3185void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
b411b363 3186{
3b98c0c2
LE
3187 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3188 struct sk_buff *msg;
3189 struct drbd_genlmsghdr *d_out;
3190 unsigned seq;
3191 int err = -ENOMEM;
3192
3193 seq = atomic_inc_return(&drbd_genl_seq);
3194 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3195 if (!msg)
3196 goto failed;
3197
3198 err = -EMSGSIZE;
3199 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3200 if (!d_out) /* cannot happen, but anyways. */
3201 goto nla_put_failure;
3202 d_out->minor = mdev_to_minor(mdev);
6f9b5f84 3203 d_out->ret_code = NO_ERROR;
3b98c0c2
LE
3204
3205 if (nla_put_status_info(msg, mdev, sib))
3206 goto nla_put_failure;
3207 genlmsg_end(msg, d_out);
3208 err = drbd_genl_multicast_events(msg, 0);
3209 /* msg has been consumed or freed in netlink_broadcast() */
3210 if (err && err != -ESRCH)
3211 goto failed;
b411b363 3212
3b98c0c2 3213 return;
b411b363 3214
3b98c0c2
LE
3215nla_put_failure:
3216 nlmsg_free(msg);
3217failed:
3218 dev_err(DEV, "Error %d while broadcasting event. "
3219 "Event seq:%u sib_reason:%u\n",
3220 err, seq, sib->sib_reason);
b411b363 3221}
7c3063cc
AG
3222
3223int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
3224{
3225 struct nlattr *head = nla_data(nla);
3226 int len = nla_len(nla);
3227 int rem;
3228
3229 /*
3230 * validate_nla (called from nla_parse_nested) ignores attributes
3231 * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
3232 * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
3233 * flag set also, check and remove that flag before calling
3234 * nla_parse_nested.
3235 */
3236
3237 nla_for_each_attr(nla, head, len, rem) {
3238 if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
3239 nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
3240 if (nla_type(nla) > maxtype)
3241 return -EOPNOTSUPP;
3242 }
3243 }
3244 return 0;
3245}
3246
3247int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
3248 const struct nla_policy *policy)
3249{
3250 int err;
3251
3252 err = drbd_nla_check_mandatory(maxtype, nla);
3253 if (!err)
3254 err = nla_parse_nested(tb, maxtype, nla, policy);
3255
3256 return err;
3257}
3258
3259struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
3260{
3261 int err;
3262 /*
3263 * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
3264 * we don't know about that attribute, reject all the nested
3265 * attributes.
3266 */
3267 err = drbd_nla_check_mandatory(maxtype, nla);
3268 if (err)
3269 return ERR_PTR(err);
3270 return nla_find_nested(nla, attrtype);
3271}