]>
Commit | Line | Data |
---|---|---|
8e854e9c GR |
1 | /* |
2 | * Copyright (C) 2015, SUSE | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | */ | |
10 | ||
11 | ||
12 | #include <linux/module.h> | |
47741b7c GR |
13 | #include <linux/dlm.h> |
14 | #include <linux/sched.h> | |
1aee41f6 | 15 | #include <linux/raid/md_p.h> |
47741b7c | 16 | #include "md.h" |
e94987db | 17 | #include "bitmap.h" |
edb39c9d | 18 | #include "md-cluster.h" |
47741b7c GR |
19 | |
20 | #define LVB_SIZE 64 | |
1aee41f6 | 21 | #define NEW_DEV_TIMEOUT 5000 |
47741b7c GR |
22 | |
23 | struct dlm_lock_resource { | |
24 | dlm_lockspace_t *ls; | |
25 | struct dlm_lksb lksb; | |
26 | char *name; /* lock name. */ | |
27 | uint32_t flags; /* flags to pass to dlm_lock() */ | |
47741b7c | 28 | struct completion completion; /* completion for synchronized locking */ |
c4ce867f GR |
29 | void (*bast)(void *arg, int mode); /* blocking AST function pointer*/ |
30 | struct mddev *mddev; /* pointing back to mddev. */ | |
dbb64f86 | 31 | int mode; |
c4ce867f GR |
32 | }; |
33 | ||
96ae923a GR |
34 | struct suspend_info { |
35 | int slot; | |
36 | sector_t lo; | |
37 | sector_t hi; | |
38 | struct list_head list; | |
39 | }; | |
40 | ||
41 | struct resync_info { | |
42 | __le64 lo; | |
43 | __le64 hi; | |
44 | }; | |
45 | ||
fa8259da GR |
46 | /* md_cluster_info flags */ |
47 | #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 | |
90382ed9 | 48 | #define MD_CLUSTER_SUSPEND_READ_BALANCING 2 |
eece075c | 49 | #define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3 |
fa8259da GR |
50 | |
51 | ||
c4ce867f GR |
52 | struct md_cluster_info { |
53 | /* dlm lock space and resources for clustered raid. */ | |
54 | dlm_lockspace_t *lockspace; | |
cf921cc1 GR |
55 | int slot_number; |
56 | struct completion completion; | |
54519c5f | 57 | struct dlm_lock_resource *bitmap_lockres; |
f6a2dc64 | 58 | struct dlm_lock_resource **other_bitmap_lockres; |
c186b128 | 59 | struct dlm_lock_resource *resync_lockres; |
96ae923a GR |
60 | struct list_head suspend_list; |
61 | spinlock_t suspend_lock; | |
e94987db GR |
62 | struct md_thread *recovery_thread; |
63 | unsigned long recovery_map; | |
4664680c GR |
64 | /* communication loc resources */ |
65 | struct dlm_lock_resource *ack_lockres; | |
66 | struct dlm_lock_resource *message_lockres; | |
67 | struct dlm_lock_resource *token_lockres; | |
1aee41f6 | 68 | struct dlm_lock_resource *no_new_dev_lockres; |
4664680c | 69 | struct md_thread *recv_thread; |
1aee41f6 | 70 | struct completion newdisk_completion; |
fa8259da | 71 | unsigned long state; |
4664680c GR |
72 | }; |
73 | ||
74 | enum msg_type { | |
75 | METADATA_UPDATED = 0, | |
76 | RESYNCING, | |
1aee41f6 | 77 | NEWDISK, |
88bcfef7 | 78 | REMOVE, |
97f6cd39 | 79 | RE_ADD, |
dc737d7c | 80 | BITMAP_NEEDS_SYNC, |
4664680c GR |
81 | }; |
82 | ||
83 | struct cluster_msg { | |
cf97a348 GJ |
84 | __le32 type; |
85 | __le32 slot; | |
1aee41f6 | 86 | /* TODO: Unionize this for smaller footprint */ |
cf97a348 GJ |
87 | __le64 low; |
88 | __le64 high; | |
1aee41f6 | 89 | char uuid[16]; |
cf97a348 | 90 | __le32 raid_slot; |
47741b7c GR |
91 | }; |
92 | ||
93 | static void sync_ast(void *arg) | |
94 | { | |
95 | struct dlm_lock_resource *res; | |
96 | ||
2e2a7cd9 | 97 | res = arg; |
47741b7c GR |
98 | complete(&res->completion); |
99 | } | |
100 | ||
101 | static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) | |
102 | { | |
103 | int ret = 0; | |
104 | ||
47741b7c GR |
105 | ret = dlm_lock(res->ls, mode, &res->lksb, |
106 | res->flags, res->name, strlen(res->name), | |
107 | 0, sync_ast, res, res->bast); | |
108 | if (ret) | |
109 | return ret; | |
110 | wait_for_completion(&res->completion); | |
dbb64f86 GR |
111 | if (res->lksb.sb_status == 0) |
112 | res->mode = mode; | |
47741b7c GR |
113 | return res->lksb.sb_status; |
114 | } | |
115 | ||
116 | static int dlm_unlock_sync(struct dlm_lock_resource *res) | |
117 | { | |
118 | return dlm_lock_sync(res, DLM_LOCK_NL); | |
119 | } | |
120 | ||
c4ce867f | 121 | static struct dlm_lock_resource *lockres_init(struct mddev *mddev, |
47741b7c GR |
122 | char *name, void (*bastfn)(void *arg, int mode), int with_lvb) |
123 | { | |
124 | struct dlm_lock_resource *res = NULL; | |
125 | int ret, namelen; | |
c4ce867f | 126 | struct md_cluster_info *cinfo = mddev->cluster_info; |
47741b7c GR |
127 | |
128 | res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL); | |
129 | if (!res) | |
130 | return NULL; | |
b83d51c0 | 131 | init_completion(&res->completion); |
c4ce867f GR |
132 | res->ls = cinfo->lockspace; |
133 | res->mddev = mddev; | |
dbb64f86 | 134 | res->mode = DLM_LOCK_IV; |
47741b7c GR |
135 | namelen = strlen(name); |
136 | res->name = kzalloc(namelen + 1, GFP_KERNEL); | |
137 | if (!res->name) { | |
138 | pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name); | |
139 | goto out_err; | |
140 | } | |
141 | strlcpy(res->name, name, namelen + 1); | |
142 | if (with_lvb) { | |
143 | res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL); | |
144 | if (!res->lksb.sb_lvbptr) { | |
145 | pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name); | |
146 | goto out_err; | |
147 | } | |
148 | res->flags = DLM_LKF_VALBLK; | |
149 | } | |
150 | ||
151 | if (bastfn) | |
152 | res->bast = bastfn; | |
153 | ||
154 | res->flags |= DLM_LKF_EXPEDITE; | |
155 | ||
156 | ret = dlm_lock_sync(res, DLM_LOCK_NL); | |
157 | if (ret) { | |
158 | pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name); | |
159 | goto out_err; | |
160 | } | |
161 | res->flags &= ~DLM_LKF_EXPEDITE; | |
162 | res->flags |= DLM_LKF_CONVERT; | |
163 | ||
164 | return res; | |
165 | out_err: | |
166 | kfree(res->lksb.sb_lvbptr); | |
167 | kfree(res->name); | |
168 | kfree(res); | |
169 | return NULL; | |
170 | } | |
171 | ||
172 | static void lockres_free(struct dlm_lock_resource *res) | |
173 | { | |
b5ef5678 GJ |
174 | int ret; |
175 | ||
47741b7c GR |
176 | if (!res) |
177 | return; | |
178 | ||
b5ef5678 GJ |
179 | /* cancel a lock request or a conversion request that is blocked */ |
180 | res->flags |= DLM_LKF_CANCEL; | |
181 | retry: | |
182 | ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res); | |
183 | if (unlikely(ret != 0)) { | |
184 | pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret); | |
185 | ||
186 | /* if a lock conversion is cancelled, then the lock is put | |
187 | * back to grant queue, need to ensure it is unlocked */ | |
188 | if (ret == -DLM_ECANCEL) | |
189 | goto retry; | |
190 | } | |
191 | res->flags &= ~DLM_LKF_CANCEL; | |
47741b7c GR |
192 | wait_for_completion(&res->completion); |
193 | ||
194 | kfree(res->name); | |
195 | kfree(res->lksb.sb_lvbptr); | |
196 | kfree(res); | |
197 | } | |
8e854e9c | 198 | |
30661b49 N |
199 | static void add_resync_info(struct dlm_lock_resource *lockres, |
200 | sector_t lo, sector_t hi) | |
96ae923a GR |
201 | { |
202 | struct resync_info *ri; | |
203 | ||
204 | ri = (struct resync_info *)lockres->lksb.sb_lvbptr; | |
205 | ri->lo = cpu_to_le64(lo); | |
206 | ri->hi = cpu_to_le64(hi); | |
207 | } | |
208 | ||
209 | static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres) | |
210 | { | |
211 | struct resync_info ri; | |
212 | struct suspend_info *s = NULL; | |
213 | sector_t hi = 0; | |
214 | ||
215 | dlm_lock_sync(lockres, DLM_LOCK_CR); | |
216 | memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); | |
217 | hi = le64_to_cpu(ri.hi); | |
cf97a348 | 218 | if (hi > 0) { |
96ae923a GR |
219 | s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); |
220 | if (!s) | |
221 | goto out; | |
222 | s->hi = hi; | |
223 | s->lo = le64_to_cpu(ri.lo); | |
224 | } | |
225 | dlm_unlock_sync(lockres); | |
226 | out: | |
227 | return s; | |
228 | } | |
229 | ||
6dc69c9c | 230 | static void recover_bitmaps(struct md_thread *thread) |
e94987db GR |
231 | { |
232 | struct mddev *mddev = thread->mddev; | |
233 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
234 | struct dlm_lock_resource *bm_lockres; | |
235 | char str[64]; | |
236 | int slot, ret; | |
237 | struct suspend_info *s, *tmp; | |
238 | sector_t lo, hi; | |
239 | ||
240 | while (cinfo->recovery_map) { | |
241 | slot = fls64((u64)cinfo->recovery_map) - 1; | |
242 | ||
243 | /* Clear suspend_area associated with the bitmap */ | |
244 | spin_lock_irq(&cinfo->suspend_lock); | |
245 | list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) | |
246 | if (slot == s->slot) { | |
247 | list_del(&s->list); | |
248 | kfree(s); | |
249 | } | |
250 | spin_unlock_irq(&cinfo->suspend_lock); | |
251 | ||
252 | snprintf(str, 64, "bitmap%04d", slot); | |
253 | bm_lockres = lockres_init(mddev, str, NULL, 1); | |
254 | if (!bm_lockres) { | |
255 | pr_err("md-cluster: Cannot initialize bitmaps\n"); | |
256 | goto clear_bit; | |
257 | } | |
258 | ||
259 | ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); | |
260 | if (ret) { | |
261 | pr_err("md-cluster: Could not DLM lock %s: %d\n", | |
262 | str, ret); | |
263 | goto clear_bit; | |
264 | } | |
97f6cd39 | 265 | ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true); |
4b26a08a | 266 | if (ret) { |
e94987db | 267 | pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); |
4b26a08a GR |
268 | goto dlm_unlock; |
269 | } | |
270 | if (hi > 0) { | |
271 | /* TODO:Wait for current resync to get over */ | |
272 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
273 | if (lo < mddev->recovery_cp) | |
274 | mddev->recovery_cp = lo; | |
275 | md_check_recovery(mddev); | |
276 | } | |
277 | dlm_unlock: | |
e94987db GR |
278 | dlm_unlock_sync(bm_lockres); |
279 | clear_bit: | |
280 | clear_bit(slot, &cinfo->recovery_map); | |
281 | } | |
282 | } | |
283 | ||
cf921cc1 GR |
284 | static void recover_prep(void *arg) |
285 | { | |
90382ed9 GR |
286 | struct mddev *mddev = arg; |
287 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
288 | set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); | |
cf921cc1 GR |
289 | } |
290 | ||
05cd0e51 | 291 | static void __recover_slot(struct mddev *mddev, int slot) |
cf921cc1 | 292 | { |
cf921cc1 GR |
293 | struct md_cluster_info *cinfo = mddev->cluster_info; |
294 | ||
05cd0e51 | 295 | set_bit(slot, &cinfo->recovery_map); |
e94987db GR |
296 | if (!cinfo->recovery_thread) { |
297 | cinfo->recovery_thread = md_register_thread(recover_bitmaps, | |
298 | mddev, "recover"); | |
299 | if (!cinfo->recovery_thread) { | |
300 | pr_warn("md-cluster: Could not create recovery thread\n"); | |
301 | return; | |
302 | } | |
303 | } | |
304 | md_wakeup_thread(cinfo->recovery_thread); | |
cf921cc1 GR |
305 | } |
306 | ||
05cd0e51 GJ |
307 | static void recover_slot(void *arg, struct dlm_slot *slot) |
308 | { | |
309 | struct mddev *mddev = arg; | |
310 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
311 | ||
312 | pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n", | |
313 | mddev->bitmap_info.cluster_name, | |
314 | slot->nodeid, slot->slot, | |
315 | cinfo->slot_number); | |
316 | /* deduct one since dlm slot starts from one while the num of | |
317 | * cluster-md begins with 0 */ | |
318 | __recover_slot(mddev, slot->slot - 1); | |
319 | } | |
320 | ||
cf921cc1 GR |
321 | static void recover_done(void *arg, struct dlm_slot *slots, |
322 | int num_slots, int our_slot, | |
323 | uint32_t generation) | |
324 | { | |
325 | struct mddev *mddev = arg; | |
326 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
327 | ||
328 | cinfo->slot_number = our_slot; | |
eece075c GJ |
329 | /* completion is only need to be complete when node join cluster, |
330 | * it doesn't need to run during another node's failure */ | |
331 | if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) { | |
332 | complete(&cinfo->completion); | |
333 | clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); | |
334 | } | |
90382ed9 | 335 | clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); |
cf921cc1 GR |
336 | } |
337 | ||
eece075c GJ |
338 | /* the ops is called when node join the cluster, and do lock recovery |
339 | * if node failure occurs */ | |
cf921cc1 GR |
340 | static const struct dlm_lockspace_ops md_ls_ops = { |
341 | .recover_prep = recover_prep, | |
342 | .recover_slot = recover_slot, | |
343 | .recover_done = recover_done, | |
344 | }; | |
345 | ||
4664680c GR |
346 | /* |
347 | * The BAST function for the ack lock resource | |
348 | * This function wakes up the receive thread in | |
349 | * order to receive and process the message. | |
350 | */ | |
351 | static void ack_bast(void *arg, int mode) | |
352 | { | |
2e2a7cd9 | 353 | struct dlm_lock_resource *res = arg; |
4664680c GR |
354 | struct md_cluster_info *cinfo = res->mddev->cluster_info; |
355 | ||
356 | if (mode == DLM_LOCK_EX) | |
357 | md_wakeup_thread(cinfo->recv_thread); | |
358 | } | |
359 | ||
e59721cc GR |
360 | static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot) |
361 | { | |
362 | struct suspend_info *s, *tmp; | |
363 | ||
364 | list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) | |
365 | if (slot == s->slot) { | |
e59721cc GR |
366 | list_del(&s->list); |
367 | kfree(s); | |
368 | break; | |
369 | } | |
370 | } | |
371 | ||
b8ca846e | 372 | static void remove_suspend_info(struct mddev *mddev, int slot) |
e59721cc | 373 | { |
b8ca846e | 374 | struct md_cluster_info *cinfo = mddev->cluster_info; |
e59721cc GR |
375 | spin_lock_irq(&cinfo->suspend_lock); |
376 | __remove_suspend_info(cinfo, slot); | |
377 | spin_unlock_irq(&cinfo->suspend_lock); | |
b8ca846e | 378 | mddev->pers->quiesce(mddev, 2); |
e59721cc GR |
379 | } |
380 | ||
381 | ||
9ed38ff5 | 382 | static void process_suspend_info(struct mddev *mddev, |
e59721cc GR |
383 | int slot, sector_t lo, sector_t hi) |
384 | { | |
9ed38ff5 | 385 | struct md_cluster_info *cinfo = mddev->cluster_info; |
e59721cc GR |
386 | struct suspend_info *s; |
387 | ||
388 | if (!hi) { | |
b8ca846e | 389 | remove_suspend_info(mddev, slot); |
c186b128 GR |
390 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
391 | md_wakeup_thread(mddev->thread); | |
e59721cc GR |
392 | return; |
393 | } | |
394 | s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); | |
395 | if (!s) | |
396 | return; | |
397 | s->slot = slot; | |
398 | s->lo = lo; | |
399 | s->hi = hi; | |
9ed38ff5 GR |
400 | mddev->pers->quiesce(mddev, 1); |
401 | mddev->pers->quiesce(mddev, 0); | |
e59721cc GR |
402 | spin_lock_irq(&cinfo->suspend_lock); |
403 | /* Remove existing entry (if exists) before adding */ | |
404 | __remove_suspend_info(cinfo, slot); | |
405 | list_add(&s->list, &cinfo->suspend_list); | |
406 | spin_unlock_irq(&cinfo->suspend_lock); | |
b8ca846e | 407 | mddev->pers->quiesce(mddev, 2); |
e59721cc GR |
408 | } |
409 | ||
1aee41f6 GR |
410 | static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) |
411 | { | |
412 | char disk_uuid[64]; | |
413 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
414 | char event_name[] = "EVENT=ADD_DEVICE"; | |
415 | char raid_slot[16]; | |
416 | char *envp[] = {event_name, disk_uuid, raid_slot, NULL}; | |
417 | int len; | |
418 | ||
419 | len = snprintf(disk_uuid, 64, "DEVICE_UUID="); | |
b89f704a | 420 | sprintf(disk_uuid + len, "%pU", cmsg->uuid); |
faeff83f | 421 | snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot)); |
1aee41f6 GR |
422 | pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot); |
423 | init_completion(&cinfo->newdisk_completion); | |
fa8259da | 424 | set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); |
1aee41f6 GR |
425 | kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp); |
426 | wait_for_completion_timeout(&cinfo->newdisk_completion, | |
427 | NEW_DEV_TIMEOUT); | |
fa8259da | 428 | clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); |
1aee41f6 GR |
429 | } |
430 | ||
431 | ||
432 | static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) | |
433 | { | |
434 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
15858fa5 GJ |
435 | mddev->good_device_nr = le32_to_cpu(msg->raid_slot); |
436 | set_bit(MD_RELOAD_SB, &mddev->flags); | |
1aee41f6 | 437 | dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); |
15858fa5 | 438 | md_wakeup_thread(mddev->thread); |
1aee41f6 GR |
439 | } |
440 | ||
88bcfef7 GR |
441 | static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) |
442 | { | |
faeff83f GJ |
443 | struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, |
444 | le32_to_cpu(msg->raid_slot)); | |
88bcfef7 | 445 | |
659b254f GJ |
446 | if (rdev) { |
447 | set_bit(ClusterRemove, &rdev->flags); | |
448 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
449 | md_wakeup_thread(mddev->thread); | |
450 | } | |
88bcfef7 | 451 | else |
faeff83f GJ |
452 | pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", |
453 | __func__, __LINE__, le32_to_cpu(msg->raid_slot)); | |
88bcfef7 GR |
454 | } |
455 | ||
97f6cd39 GR |
456 | static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg) |
457 | { | |
faeff83f GJ |
458 | struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, |
459 | le32_to_cpu(msg->raid_slot)); | |
97f6cd39 GR |
460 | |
461 | if (rdev && test_bit(Faulty, &rdev->flags)) | |
462 | clear_bit(Faulty, &rdev->flags); | |
463 | else | |
faeff83f GJ |
464 | pr_warn("%s: %d Could not find disk(%d) which is faulty", |
465 | __func__, __LINE__, le32_to_cpu(msg->raid_slot)); | |
97f6cd39 GR |
466 | } |
467 | ||
4664680c GR |
468 | static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) |
469 | { | |
256f5b24 GJ |
470 | if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), |
471 | "node %d received it's own msg\n", le32_to_cpu(msg->slot))) | |
472 | return; | |
cf97a348 | 473 | switch (le32_to_cpu(msg->type)) { |
4664680c | 474 | case METADATA_UPDATED: |
1aee41f6 | 475 | process_metadata_update(mddev, msg); |
4664680c GR |
476 | break; |
477 | case RESYNCING: | |
cf97a348 GJ |
478 | process_suspend_info(mddev, le32_to_cpu(msg->slot), |
479 | le64_to_cpu(msg->low), | |
480 | le64_to_cpu(msg->high)); | |
4664680c | 481 | break; |
1aee41f6 | 482 | case NEWDISK: |
1aee41f6 | 483 | process_add_new_disk(mddev, msg); |
88bcfef7 GR |
484 | break; |
485 | case REMOVE: | |
88bcfef7 GR |
486 | process_remove_disk(mddev, msg); |
487 | break; | |
97f6cd39 | 488 | case RE_ADD: |
97f6cd39 GR |
489 | process_readd_disk(mddev, msg); |
490 | break; | |
dc737d7c | 491 | case BITMAP_NEEDS_SYNC: |
cf97a348 | 492 | __recover_slot(mddev, le32_to_cpu(msg->slot)); |
dc737d7c | 493 | break; |
88bcfef7 GR |
494 | default: |
495 | pr_warn("%s:%d Received unknown message from %d\n", | |
496 | __func__, __LINE__, msg->slot); | |
09dd1af2 | 497 | } |
4664680c GR |
498 | } |
499 | ||
500 | /* | |
501 | * thread for receiving message | |
502 | */ | |
503 | static void recv_daemon(struct md_thread *thread) | |
504 | { | |
505 | struct md_cluster_info *cinfo = thread->mddev->cluster_info; | |
506 | struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres; | |
507 | struct dlm_lock_resource *message_lockres = cinfo->message_lockres; | |
508 | struct cluster_msg msg; | |
b5ef5678 | 509 | int ret; |
4664680c GR |
510 | |
511 | /*get CR on Message*/ | |
512 | if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) { | |
513 | pr_err("md/raid1:failed to get CR on MESSAGE\n"); | |
514 | return; | |
515 | } | |
516 | ||
517 | /* read lvb and wake up thread to process this message_lockres */ | |
518 | memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg)); | |
519 | process_recvd_msg(thread->mddev, &msg); | |
520 | ||
521 | /*release CR on ack_lockres*/ | |
b5ef5678 GJ |
522 | ret = dlm_unlock_sync(ack_lockres); |
523 | if (unlikely(ret != 0)) | |
524 | pr_info("unlock ack failed return %d\n", ret); | |
66099bb0 | 525 | /*up-convert to PR on message_lockres*/ |
b5ef5678 GJ |
526 | ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR); |
527 | if (unlikely(ret != 0)) | |
528 | pr_info("lock PR on msg failed return %d\n", ret); | |
4664680c | 529 | /*get CR on ack_lockres again*/ |
b5ef5678 GJ |
530 | ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR); |
531 | if (unlikely(ret != 0)) | |
532 | pr_info("lock CR on ack failed return %d\n", ret); | |
4664680c | 533 | /*release CR on message_lockres*/ |
b5ef5678 GJ |
534 | ret = dlm_unlock_sync(message_lockres); |
535 | if (unlikely(ret != 0)) | |
536 | pr_info("unlock msg failed return %d\n", ret); | |
4664680c GR |
537 | } |
538 | ||
601b515c GR |
539 | /* lock_comm() |
540 | * Takes the lock on the TOKEN lock resource so no other | |
541 | * node can communicate while the operation is underway. | |
dbb64f86 GR |
542 | * If called again, and the TOKEN lock is alread in EX mode |
543 | * return success. However, care must be taken that unlock_comm() | |
544 | * is called only once. | |
601b515c GR |
545 | */ |
546 | static int lock_comm(struct md_cluster_info *cinfo) | |
547 | { | |
548 | int error; | |
549 | ||
dbb64f86 GR |
550 | if (cinfo->token_lockres->mode == DLM_LOCK_EX) |
551 | return 0; | |
552 | ||
601b515c GR |
553 | error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); |
554 | if (error) | |
555 | pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", | |
556 | __func__, __LINE__, error); | |
557 | return error; | |
558 | } | |
559 | ||
560 | static void unlock_comm(struct md_cluster_info *cinfo) | |
561 | { | |
dbb64f86 | 562 | WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); |
601b515c GR |
563 | dlm_unlock_sync(cinfo->token_lockres); |
564 | } | |
565 | ||
566 | /* __sendmsg() | |
567 | * This function performs the actual sending of the message. This function is | |
568 | * usually called after performing the encompassing operation | |
569 | * The function: | |
570 | * 1. Grabs the message lockresource in EX mode | |
571 | * 2. Copies the message to the message LVB | |
66099bb0 | 572 | * 3. Downconverts message lockresource to CW |
601b515c GR |
573 | * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes |
574 | * and the other nodes read the message. The thread will wait here until all other | |
575 | * nodes have released ack lock resource. | |
576 | * 5. Downconvert ack lockresource to CR | |
577 | */ | |
578 | static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) | |
579 | { | |
580 | int error; | |
581 | int slot = cinfo->slot_number - 1; | |
582 | ||
583 | cmsg->slot = cpu_to_le32(slot); | |
584 | /*get EX on Message*/ | |
585 | error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); | |
586 | if (error) { | |
587 | pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); | |
588 | goto failed_message; | |
589 | } | |
590 | ||
591 | memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, | |
592 | sizeof(struct cluster_msg)); | |
66099bb0 GJ |
593 | /*down-convert EX to CW on Message*/ |
594 | error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW); | |
601b515c | 595 | if (error) { |
66099bb0 | 596 | pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n", |
601b515c | 597 | error); |
66099bb0 | 598 | goto failed_ack; |
601b515c GR |
599 | } |
600 | ||
601 | /*up-convert CR to EX on Ack*/ | |
602 | error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX); | |
603 | if (error) { | |
604 | pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n", | |
605 | error); | |
606 | goto failed_ack; | |
607 | } | |
608 | ||
609 | /*down-convert EX to CR on Ack*/ | |
610 | error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR); | |
611 | if (error) { | |
612 | pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n", | |
613 | error); | |
614 | goto failed_ack; | |
615 | } | |
616 | ||
617 | failed_ack: | |
b5ef5678 GJ |
618 | error = dlm_unlock_sync(cinfo->message_lockres); |
619 | if (unlikely(error != 0)) { | |
620 | pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", | |
621 | error); | |
622 | /* in case the message can't be released due to some reason */ | |
623 | goto failed_ack; | |
624 | } | |
601b515c GR |
625 | failed_message: |
626 | return error; | |
627 | } | |
628 | ||
629 | static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) | |
630 | { | |
631 | int ret; | |
632 | ||
633 | lock_comm(cinfo); | |
634 | ret = __sendmsg(cinfo, cmsg); | |
635 | unlock_comm(cinfo); | |
636 | return ret; | |
637 | } | |
638 | ||
96ae923a GR |
639 | static int gather_all_resync_info(struct mddev *mddev, int total_slots) |
640 | { | |
641 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
642 | int i, ret = 0; | |
643 | struct dlm_lock_resource *bm_lockres; | |
644 | struct suspend_info *s; | |
645 | char str[64]; | |
abb9b22a | 646 | sector_t lo, hi; |
96ae923a GR |
647 | |
648 | ||
649 | for (i = 0; i < total_slots; i++) { | |
650 | memset(str, '\0', 64); | |
651 | snprintf(str, 64, "bitmap%04d", i); | |
652 | bm_lockres = lockres_init(mddev, str, NULL, 1); | |
653 | if (!bm_lockres) | |
654 | return -ENOMEM; | |
655 | if (i == (cinfo->slot_number - 1)) | |
656 | continue; | |
657 | ||
658 | bm_lockres->flags |= DLM_LKF_NOQUEUE; | |
659 | ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); | |
660 | if (ret == -EAGAIN) { | |
661 | memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); | |
662 | s = read_resync_info(mddev, bm_lockres); | |
663 | if (s) { | |
664 | pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", | |
665 | __func__, __LINE__, | |
666 | (unsigned long long) s->lo, | |
667 | (unsigned long long) s->hi, i); | |
668 | spin_lock_irq(&cinfo->suspend_lock); | |
669 | s->slot = i; | |
670 | list_add(&s->list, &cinfo->suspend_list); | |
671 | spin_unlock_irq(&cinfo->suspend_lock); | |
672 | } | |
673 | ret = 0; | |
674 | lockres_free(bm_lockres); | |
675 | continue; | |
676 | } | |
6e6d9f2c GJ |
677 | if (ret) { |
678 | lockres_free(bm_lockres); | |
96ae923a | 679 | goto out; |
6e6d9f2c | 680 | } |
abb9b22a GJ |
681 | |
682 | /* Read the disk bitmap sb and check if it needs recovery */ | |
683 | ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false); | |
684 | if (ret) { | |
685 | pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); | |
686 | lockres_free(bm_lockres); | |
687 | continue; | |
688 | } | |
689 | if ((hi > 0) && (lo < mddev->recovery_cp)) { | |
690 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
691 | mddev->recovery_cp = lo; | |
692 | md_check_recovery(mddev); | |
693 | } | |
694 | ||
96ae923a GR |
695 | dlm_unlock_sync(bm_lockres); |
696 | lockres_free(bm_lockres); | |
697 | } | |
698 | out: | |
699 | return ret; | |
700 | } | |
701 | ||
edb39c9d GR |
702 | static int join(struct mddev *mddev, int nodes) |
703 | { | |
c4ce867f | 704 | struct md_cluster_info *cinfo; |
cf921cc1 | 705 | int ret, ops_rv; |
c4ce867f GR |
706 | char str[64]; |
707 | ||
c4ce867f GR |
708 | cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL); |
709 | if (!cinfo) | |
710 | return -ENOMEM; | |
711 | ||
9e3072e3 GJ |
712 | INIT_LIST_HEAD(&cinfo->suspend_list); |
713 | spin_lock_init(&cinfo->suspend_lock); | |
cf921cc1 | 714 | init_completion(&cinfo->completion); |
eece075c | 715 | set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); |
cf921cc1 | 716 | |
cf921cc1 GR |
717 | mddev->cluster_info = cinfo; |
718 | ||
c4ce867f | 719 | memset(str, 0, 64); |
b89f704a | 720 | sprintf(str, "%pU", mddev->uuid); |
cf921cc1 GR |
721 | ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, |
722 | DLM_LSFL_FS, LVB_SIZE, | |
723 | &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace); | |
c4ce867f GR |
724 | if (ret) |
725 | goto err; | |
cf921cc1 | 726 | wait_for_completion(&cinfo->completion); |
8c58f02e GJ |
727 | if (nodes < cinfo->slot_number) { |
728 | pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).", | |
729 | cinfo->slot_number, nodes); | |
b97e9257 GR |
730 | ret = -ERANGE; |
731 | goto err; | |
732 | } | |
4664680c GR |
733 | /* Initiate the communication resources */ |
734 | ret = -ENOMEM; | |
735 | cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv"); | |
736 | if (!cinfo->recv_thread) { | |
737 | pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); | |
738 | goto err; | |
739 | } | |
740 | cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1); | |
741 | if (!cinfo->message_lockres) | |
742 | goto err; | |
743 | cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0); | |
744 | if (!cinfo->token_lockres) | |
745 | goto err; | |
746 | cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); | |
747 | if (!cinfo->ack_lockres) | |
748 | goto err; | |
1aee41f6 GR |
749 | cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0); |
750 | if (!cinfo->no_new_dev_lockres) | |
751 | goto err; | |
752 | ||
4664680c GR |
753 | /* get sync CR lock on ACK. */ |
754 | if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) | |
755 | pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", | |
756 | ret); | |
1aee41f6 GR |
757 | /* get sync CR lock on no-new-dev. */ |
758 | if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR)) | |
759 | pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret); | |
760 | ||
54519c5f GR |
761 | |
762 | pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); | |
763 | snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); | |
764 | cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); | |
765 | if (!cinfo->bitmap_lockres) | |
766 | goto err; | |
767 | if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { | |
768 | pr_err("Failed to get bitmap lock\n"); | |
769 | ret = -EINVAL; | |
770 | goto err; | |
771 | } | |
772 | ||
c186b128 GR |
773 | cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); |
774 | if (!cinfo->resync_lockres) | |
775 | goto err; | |
776 | ||
96ae923a GR |
777 | ret = gather_all_resync_info(mddev, nodes); |
778 | if (ret) | |
779 | goto err; | |
780 | ||
edb39c9d | 781 | return 0; |
c4ce867f | 782 | err: |
4664680c GR |
783 | lockres_free(cinfo->message_lockres); |
784 | lockres_free(cinfo->token_lockres); | |
785 | lockres_free(cinfo->ack_lockres); | |
1aee41f6 | 786 | lockres_free(cinfo->no_new_dev_lockres); |
c186b128 | 787 | lockres_free(cinfo->resync_lockres); |
96ae923a | 788 | lockres_free(cinfo->bitmap_lockres); |
c4ce867f GR |
789 | if (cinfo->lockspace) |
790 | dlm_release_lockspace(cinfo->lockspace, 2); | |
cf921cc1 | 791 | mddev->cluster_info = NULL; |
c4ce867f | 792 | kfree(cinfo); |
c4ce867f | 793 | return ret; |
edb39c9d GR |
794 | } |
795 | ||
09995411 GJ |
796 | static void resync_bitmap(struct mddev *mddev) |
797 | { | |
798 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
799 | struct cluster_msg cmsg = {0}; | |
800 | int err; | |
801 | ||
802 | cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC); | |
803 | err = sendmsg(cinfo, &cmsg); | |
804 | if (err) | |
805 | pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n", | |
806 | __func__, __LINE__, err); | |
807 | } | |
808 | ||
f6a2dc64 | 809 | static void unlock_all_bitmaps(struct mddev *mddev); |
edb39c9d GR |
810 | static int leave(struct mddev *mddev) |
811 | { | |
c4ce867f GR |
812 | struct md_cluster_info *cinfo = mddev->cluster_info; |
813 | ||
814 | if (!cinfo) | |
815 | return 0; | |
09995411 GJ |
816 | |
817 | /* BITMAP_NEEDS_SYNC message should be sent when node | |
818 | * is leaving the cluster with dirty bitmap, also we | |
819 | * can only deliver it when dlm connection is available */ | |
820 | if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) | |
821 | resync_bitmap(mddev); | |
822 | ||
e94987db | 823 | md_unregister_thread(&cinfo->recovery_thread); |
4664680c GR |
824 | md_unregister_thread(&cinfo->recv_thread); |
825 | lockres_free(cinfo->message_lockres); | |
826 | lockres_free(cinfo->token_lockres); | |
827 | lockres_free(cinfo->ack_lockres); | |
1aee41f6 | 828 | lockres_free(cinfo->no_new_dev_lockres); |
54519c5f | 829 | lockres_free(cinfo->bitmap_lockres); |
f6a2dc64 | 830 | unlock_all_bitmaps(mddev); |
c4ce867f | 831 | dlm_release_lockspace(cinfo->lockspace, 2); |
edb39c9d GR |
832 | return 0; |
833 | } | |
834 | ||
cf921cc1 GR |
835 | /* slot_number(): Returns the MD slot number to use |
836 | * DLM starts the slot numbers from 1, wheras cluster-md | |
837 | * wants the number to be from zero, so we deduct one | |
838 | */ | |
839 | static int slot_number(struct mddev *mddev) | |
840 | { | |
841 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
842 | ||
843 | return cinfo->slot_number - 1; | |
844 | } | |
845 | ||
293467aa GR |
846 | static int metadata_update_start(struct mddev *mddev) |
847 | { | |
848 | return lock_comm(mddev->cluster_info); | |
849 | } | |
850 | ||
851 | static int metadata_update_finish(struct mddev *mddev) | |
852 | { | |
853 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
854 | struct cluster_msg cmsg; | |
70bcecdb GR |
855 | struct md_rdev *rdev; |
856 | int ret = 0; | |
ba2746b0 | 857 | int raid_slot = -1; |
293467aa GR |
858 | |
859 | memset(&cmsg, 0, sizeof(cmsg)); | |
860 | cmsg.type = cpu_to_le32(METADATA_UPDATED); | |
70bcecdb GR |
861 | /* Pick up a good active device number to send. |
862 | */ | |
863 | rdev_for_each(rdev, mddev) | |
864 | if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { | |
ba2746b0 | 865 | raid_slot = rdev->desc_nr; |
70bcecdb GR |
866 | break; |
867 | } | |
ba2746b0 N |
868 | if (raid_slot >= 0) { |
869 | cmsg.raid_slot = cpu_to_le32(raid_slot); | |
70bcecdb | 870 | ret = __sendmsg(cinfo, &cmsg); |
ba2746b0 | 871 | } else |
70bcecdb | 872 | pr_warn("md-cluster: No good device id found to send\n"); |
293467aa GR |
873 | unlock_comm(cinfo); |
874 | return ret; | |
875 | } | |
876 | ||
dbb64f86 | 877 | static void metadata_update_cancel(struct mddev *mddev) |
293467aa GR |
878 | { |
879 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
dbb64f86 | 880 | unlock_comm(cinfo); |
293467aa GR |
881 | } |
882 | ||
c186b128 GR |
883 | static int resync_start(struct mddev *mddev) |
884 | { | |
885 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
886 | cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE; | |
887 | return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX); | |
888 | } | |
889 | ||
c40f341f | 890 | static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) |
965400eb GR |
891 | { |
892 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
ac277c6a | 893 | struct resync_info ri; |
aee177ac | 894 | struct cluster_msg cmsg = {0}; |
965400eb | 895 | |
ac277c6a GR |
896 | /* do not send zero again, if we have sent before */ |
897 | if (hi == 0) { | |
898 | memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); | |
899 | if (le64_to_cpu(ri.hi) == 0) | |
900 | return 0; | |
901 | } | |
902 | ||
30661b49 | 903 | add_resync_info(cinfo->bitmap_lockres, lo, hi); |
c40f341f GR |
904 | /* Re-acquire the lock to refresh LVB */ |
905 | dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); | |
c40f341f | 906 | cmsg.type = cpu_to_le32(RESYNCING); |
965400eb GR |
907 | cmsg.low = cpu_to_le64(lo); |
908 | cmsg.high = cpu_to_le64(hi); | |
c186b128 | 909 | |
965400eb GR |
910 | return sendmsg(cinfo, &cmsg); |
911 | } | |
912 | ||
c186b128 GR |
913 | static int resync_finish(struct mddev *mddev) |
914 | { | |
915 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
916 | cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE; | |
917 | dlm_unlock_sync(cinfo->resync_lockres); | |
918 | return resync_info_update(mddev, 0, 0); | |
919 | } | |
920 | ||
90382ed9 GR |
921 | static int area_resyncing(struct mddev *mddev, int direction, |
922 | sector_t lo, sector_t hi) | |
589a1c49 GR |
923 | { |
924 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
925 | int ret = 0; | |
926 | struct suspend_info *s; | |
927 | ||
90382ed9 GR |
928 | if ((direction == READ) && |
929 | test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) | |
930 | return 1; | |
931 | ||
589a1c49 GR |
932 | spin_lock_irq(&cinfo->suspend_lock); |
933 | if (list_empty(&cinfo->suspend_list)) | |
934 | goto out; | |
935 | list_for_each_entry(s, &cinfo->suspend_list, list) | |
936 | if (hi > s->lo && lo < s->hi) { | |
937 | ret = 1; | |
938 | break; | |
939 | } | |
940 | out: | |
941 | spin_unlock_irq(&cinfo->suspend_lock); | |
942 | return ret; | |
943 | } | |
944 | ||
dbb64f86 GR |
945 | /* add_new_disk() - initiates a disk add |
946 | * However, if this fails before writing md_update_sb(), | |
947 | * add_new_disk_cancel() must be called to release token lock | |
948 | */ | |
949 | static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) | |
1aee41f6 GR |
950 | { |
951 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
952 | struct cluster_msg cmsg; | |
953 | int ret = 0; | |
954 | struct mdp_superblock_1 *sb = page_address(rdev->sb_page); | |
955 | char *uuid = sb->device_uuid; | |
956 | ||
957 | memset(&cmsg, 0, sizeof(cmsg)); | |
958 | cmsg.type = cpu_to_le32(NEWDISK); | |
959 | memcpy(cmsg.uuid, uuid, 16); | |
faeff83f | 960 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); |
1aee41f6 GR |
961 | lock_comm(cinfo); |
962 | ret = __sendmsg(cinfo, &cmsg); | |
963 | if (ret) | |
964 | return ret; | |
965 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; | |
966 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); | |
967 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; | |
968 | /* Some node does not "see" the device */ | |
969 | if (ret == -EAGAIN) | |
970 | ret = -ENOENT; | |
dbb64f86 GR |
971 | if (ret) |
972 | unlock_comm(cinfo); | |
1aee41f6 GR |
973 | else |
974 | dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); | |
975 | return ret; | |
976 | } | |
977 | ||
dbb64f86 | 978 | static void add_new_disk_cancel(struct mddev *mddev) |
1aee41f6 | 979 | { |
dbb64f86 GR |
980 | struct md_cluster_info *cinfo = mddev->cluster_info; |
981 | unlock_comm(cinfo); | |
1aee41f6 GR |
982 | } |
983 | ||
fa8259da | 984 | static int new_disk_ack(struct mddev *mddev, bool ack) |
1aee41f6 GR |
985 | { |
986 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
987 | ||
fa8259da GR |
988 | if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) { |
989 | pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); | |
990 | return -EINVAL; | |
991 | } | |
992 | ||
1aee41f6 GR |
993 | if (ack) |
994 | dlm_unlock_sync(cinfo->no_new_dev_lockres); | |
995 | complete(&cinfo->newdisk_completion); | |
fa8259da | 996 | return 0; |
1aee41f6 GR |
997 | } |
998 | ||
88bcfef7 GR |
999 | static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) |
1000 | { | |
aee177ac | 1001 | struct cluster_msg cmsg = {0}; |
88bcfef7 | 1002 | struct md_cluster_info *cinfo = mddev->cluster_info; |
faeff83f GJ |
1003 | cmsg.type = cpu_to_le32(REMOVE); |
1004 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); | |
54a88392 | 1005 | return sendmsg(cinfo, &cmsg); |
88bcfef7 GR |
1006 | } |
1007 | ||
f6a2dc64 GJ |
1008 | static int lock_all_bitmaps(struct mddev *mddev) |
1009 | { | |
1010 | int slot, my_slot, ret, held = 1, i = 0; | |
1011 | char str[64]; | |
1012 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
1013 | ||
1014 | cinfo->other_bitmap_lockres = kzalloc((mddev->bitmap_info.nodes - 1) * | |
1015 | sizeof(struct dlm_lock_resource *), | |
1016 | GFP_KERNEL); | |
1017 | if (!cinfo->other_bitmap_lockres) { | |
1018 | pr_err("md: can't alloc mem for other bitmap locks\n"); | |
1019 | return 0; | |
1020 | } | |
1021 | ||
1022 | my_slot = slot_number(mddev); | |
1023 | for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) { | |
1024 | if (slot == my_slot) | |
1025 | continue; | |
1026 | ||
1027 | memset(str, '\0', 64); | |
1028 | snprintf(str, 64, "bitmap%04d", slot); | |
1029 | cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1); | |
1030 | if (!cinfo->other_bitmap_lockres[i]) | |
1031 | return -ENOMEM; | |
1032 | ||
1033 | cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE; | |
1034 | ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW); | |
1035 | if (ret) | |
1036 | held = -1; | |
1037 | i++; | |
1038 | } | |
1039 | ||
1040 | return held; | |
1041 | } | |
1042 | ||
1043 | static void unlock_all_bitmaps(struct mddev *mddev) | |
1044 | { | |
1045 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
1046 | int i; | |
1047 | ||
1048 | /* release other node's bitmap lock if they are existed */ | |
1049 | if (cinfo->other_bitmap_lockres) { | |
1050 | for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) { | |
1051 | if (cinfo->other_bitmap_lockres[i]) { | |
1052 | dlm_unlock_sync(cinfo->other_bitmap_lockres[i]); | |
1053 | lockres_free(cinfo->other_bitmap_lockres[i]); | |
1054 | } | |
1055 | } | |
1056 | kfree(cinfo->other_bitmap_lockres); | |
1057 | } | |
1058 | } | |
1059 | ||
97f6cd39 GR |
1060 | static int gather_bitmaps(struct md_rdev *rdev) |
1061 | { | |
1062 | int sn, err; | |
1063 | sector_t lo, hi; | |
aee177ac | 1064 | struct cluster_msg cmsg = {0}; |
97f6cd39 GR |
1065 | struct mddev *mddev = rdev->mddev; |
1066 | struct md_cluster_info *cinfo = mddev->cluster_info; | |
1067 | ||
faeff83f GJ |
1068 | cmsg.type = cpu_to_le32(RE_ADD); |
1069 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); | |
97f6cd39 GR |
1070 | err = sendmsg(cinfo, &cmsg); |
1071 | if (err) | |
1072 | goto out; | |
1073 | ||
1074 | for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { | |
1075 | if (sn == (cinfo->slot_number - 1)) | |
1076 | continue; | |
1077 | err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); | |
1078 | if (err) { | |
1079 | pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); | |
1080 | goto out; | |
1081 | } | |
1082 | if ((hi > 0) && (lo < mddev->recovery_cp)) | |
1083 | mddev->recovery_cp = lo; | |
1084 | } | |
1085 | out: | |
1086 | return err; | |
1087 | } | |
1088 | ||
edb39c9d GR |
1089 | static struct md_cluster_operations cluster_ops = { |
1090 | .join = join, | |
1091 | .leave = leave, | |
cf921cc1 | 1092 | .slot_number = slot_number, |
c186b128 GR |
1093 | .resync_start = resync_start, |
1094 | .resync_finish = resync_finish, | |
96ae923a | 1095 | .resync_info_update = resync_info_update, |
293467aa GR |
1096 | .metadata_update_start = metadata_update_start, |
1097 | .metadata_update_finish = metadata_update_finish, | |
1098 | .metadata_update_cancel = metadata_update_cancel, | |
589a1c49 | 1099 | .area_resyncing = area_resyncing, |
dbb64f86 GR |
1100 | .add_new_disk = add_new_disk, |
1101 | .add_new_disk_cancel = add_new_disk_cancel, | |
1aee41f6 | 1102 | .new_disk_ack = new_disk_ack, |
88bcfef7 | 1103 | .remove_disk = remove_disk, |
97f6cd39 | 1104 | .gather_bitmaps = gather_bitmaps, |
f6a2dc64 GJ |
1105 | .lock_all_bitmaps = lock_all_bitmaps, |
1106 | .unlock_all_bitmaps = unlock_all_bitmaps, | |
edb39c9d GR |
1107 | }; |
1108 | ||
8e854e9c GR |
1109 | static int __init cluster_init(void) |
1110 | { | |
1111 | pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n"); | |
1112 | pr_info("Registering Cluster MD functions\n"); | |
edb39c9d | 1113 | register_md_cluster_operations(&cluster_ops, THIS_MODULE); |
8e854e9c GR |
1114 | return 0; |
1115 | } | |
1116 | ||
1117 | static void cluster_exit(void) | |
1118 | { | |
edb39c9d | 1119 | unregister_md_cluster_operations(); |
8e854e9c GR |
1120 | } |
1121 | ||
1122 | module_init(cluster_init); | |
1123 | module_exit(cluster_exit); | |
86b57277 | 1124 | MODULE_AUTHOR("SUSE"); |
8e854e9c GR |
1125 | MODULE_LICENSE("GPL"); |
1126 | MODULE_DESCRIPTION("Clustering support for MD"); |