]>
Commit | Line | Data |
---|---|---|
9d09e663 N |
1 | /* |
2 | * Copyright (C) 2010-2011 Neil Brown | |
702108d1 | 3 | * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. |
9d09e663 N |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/slab.h> | |
056075c7 | 9 | #include <linux/module.h> |
9d09e663 N |
10 | |
11 | #include "md.h" | |
32737279 | 12 | #include "raid1.h" |
9d09e663 | 13 | #include "raid5.h" |
63f33b8d | 14 | #include "raid10.h" |
9d09e663 N |
15 | #include "bitmap.h" |
16 | ||
3e8dbb7f AK |
17 | #include <linux/device-mapper.h> |
18 | ||
9d09e663 | 19 | #define DM_MSG_PREFIX "raid" |
92c83d79 | 20 | #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ |
9d09e663 | 21 | |
48cf06bc HM |
22 | static bool devices_handle_discard_safely = false; |
23 | ||
9d09e663 | 24 | /* |
b12d437b JB |
25 | * The following flags are used by dm-raid.c to set up the array state. |
26 | * They must be cleared before md_run is called. | |
9d09e663 | 27 | */ |
43157840 | 28 | #define FirstUse 10 /* rdev flag */ |
9d09e663 N |
29 | |
30 | struct raid_dev { | |
31 | /* | |
32 | * Two DM devices, one to hold metadata and one to hold the | |
43157840 | 33 | * actual data/parity. The reason for this is to not confuse |
9d09e663 N |
34 | * ti->len and give more flexibility in altering size and |
35 | * characteristics. | |
36 | * | |
37 | * While it is possible for this device to be associated | |
38 | * with a different physical device than the data_dev, it | |
39 | * is intended for it to be the same. | |
40 | * |--------- Physical Device ---------| | |
41 | * |- meta_dev -|------ data_dev ------| | |
42 | */ | |
43 | struct dm_dev *meta_dev; | |
44 | struct dm_dev *data_dev; | |
3cb03002 | 45 | struct md_rdev rdev; |
9d09e663 N |
46 | }; |
47 | ||
48 | /* | |
c76d53f4 | 49 | * Flags for rs->ctr_flags field. |
702108d1 HM |
50 | * |
51 | * 1 = no flag value | |
52 | * 2 = flag with value | |
9d09e663 | 53 | */ |
43157840 MS |
54 | #define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */ |
55 | #define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */ | |
56 | #define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */ | |
57 | #define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */ | |
58 | #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */ | |
59 | #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */ | |
60 | #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */ | |
61 | #define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */ | |
62 | #define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */ | |
63 | #define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */ | |
64 | #define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */ | |
65 | #define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */ | |
33e53f06 | 66 | /* New for v1.8.0 */ |
43157840 MS |
67 | #define CTR_FLAG_DELTA_DISKS 0x1000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ |
68 | #define CTR_FLAG_DATA_OFFSET 0x2000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ | |
33e53f06 | 69 | #define CTR_FLAG_RAID10_USE_NEAR_SETS 0x4000 /* 2 */ /* Only with raid10! */ |
63f33b8d | 70 | |
f090279e HM |
71 | /* |
72 | * Definitions of various constructor flags to | |
73 | * be used in checks of valid / invalid flags | |
74 | * per raid level. | |
75 | */ | |
76 | /* Define all any sync flags */ | |
77 | #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) | |
78 | ||
79 | /* Define flags for options without argument (e.g. 'nosync') */ | |
33e53f06 HM |
80 | #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ |
81 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
82 | |
83 | /* Define flags for options with one argument (e.g. 'delta_disks +2') */ | |
84 | #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ | |
85 | CTR_FLAG_WRITE_MOSTLY | \ | |
86 | CTR_FLAG_DAEMON_SLEEP | \ | |
87 | CTR_FLAG_MIN_RECOVERY_RATE | \ | |
88 | CTR_FLAG_MAX_RECOVERY_RATE | \ | |
89 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
90 | CTR_FLAG_STRIPE_CACHE | \ | |
91 | CTR_FLAG_REGION_SIZE | \ | |
92 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
93 | CTR_FLAG_RAID10_FORMAT | \ |
94 | CTR_FLAG_DELTA_DISKS | \ | |
95 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
96 | |
97 | /* All ctr optional arguments */ | |
98 | #define ALL_CTR_FLAGS (CTR_FLAG_OPTIONS_NO_ARGS | \ | |
99 | CTR_FLAG_OPTIONS_ONE_ARG) | |
100 | ||
101 | /* Invalid options definitions per raid level... */ | |
102 | ||
103 | /* "raid0" does not accept any options */ | |
104 | #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS | |
105 | ||
106 | /* "raid1" does not accept stripe cache or any raid10 options */ | |
107 | #define RAID1_INVALID_FLAGS (CTR_FLAG_STRIPE_CACHE | \ | |
108 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
109 | CTR_FLAG_RAID10_FORMAT | \ |
110 | CTR_FLAG_DELTA_DISKS | \ | |
111 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
112 | |
113 | /* "raid10" does not accept any raid1 or stripe cache options */ | |
114 | #define RAID10_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
115 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
116 | CTR_FLAG_STRIPE_CACHE) | |
117 | /* | |
118 | * "raid4/5/6" do not accept any raid1 or raid10 specific options | |
119 | * | |
120 | * "raid6" does not accept "nosync", because it is not guaranteed | |
121 | * that both parity and q-syndrome are being written properly with | |
122 | * any writes | |
123 | */ | |
124 | #define RAID45_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
125 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
126 | CTR_FLAG_RAID10_FORMAT | \ | |
33e53f06 HM |
127 | CTR_FLAG_RAID10_COPIES | \ |
128 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
129 | #define RAID6_INVALID_FLAGS (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS) |
130 | /* ...invalid options definitions per raid level */ | |
131 | ||
ecbfb9f1 HM |
132 | /* |
133 | * Flags for rs->runtime_flags field | |
134 | * (RT_FLAG prefix meaning "runtime flag") | |
135 | * | |
136 | * These are all internal and used to define runtime state, | |
137 | * e.g. to prevent another resume from preresume processing | |
138 | * the raid set all over again. | |
139 | */ | |
140 | #define RT_FLAG_RS_PRERESUMED 0x1 | |
141 | #define RT_FLAG_RS_RESUMED 0x2 | |
142 | #define RT_FLAG_RS_BITMAP_LOADED 0x4 | |
143 | #define RT_FLAG_UPDATE_SBS 0x8 | |
144 | ||
33e53f06 HM |
145 | /* Array elements of 64 bit needed for rebuild/write_mostly bits */ |
146 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | |
147 | ||
ecbfb9f1 HM |
148 | /* |
149 | * raid set level, layout and chunk sectors backup/restore | |
150 | */ | |
151 | struct rs_layout { | |
152 | int new_level; | |
153 | int new_layout; | |
154 | int new_chunk_sectors; | |
155 | }; | |
156 | ||
9d09e663 N |
157 | struct raid_set { |
158 | struct dm_target *ti; | |
159 | ||
34f8ac6d | 160 | uint32_t bitmap_loaded; |
c76d53f4 | 161 | uint32_t ctr_flags; |
ecbfb9f1 HM |
162 | uint32_t runtime_flags; |
163 | ||
164 | uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; | |
9d09e663 | 165 | |
33e53f06 HM |
166 | int raid_disks; |
167 | int delta_disks; | |
4763e543 | 168 | int data_offset; |
33e53f06 HM |
169 | int raid10_copies; |
170 | ||
fd01b88c | 171 | struct mddev md; |
9d09e663 N |
172 | struct raid_type *raid_type; |
173 | struct dm_target_callbacks callbacks; | |
ecbfb9f1 | 174 | struct rs_layout rs_layout; |
9d09e663 N |
175 | |
176 | struct raid_dev dev[0]; | |
177 | }; | |
178 | ||
ecbfb9f1 HM |
179 | /* Backup/restore raid set configuration helpers */ |
180 | static void _rs_config_backup(struct raid_set *rs, struct rs_layout *l) | |
181 | { | |
182 | struct mddev *mddev = &rs->md; | |
183 | ||
184 | l->new_level = mddev->new_level; | |
185 | l->new_layout = mddev->new_layout; | |
186 | l->new_chunk_sectors = mddev->new_chunk_sectors; | |
187 | } | |
188 | ||
189 | static void rs_config_backup(struct raid_set *rs) | |
190 | { | |
191 | return _rs_config_backup(rs, &rs->rs_layout); | |
192 | } | |
193 | ||
194 | static void _rs_config_restore(struct raid_set *rs, struct rs_layout *l) | |
195 | { | |
196 | struct mddev *mddev = &rs->md; | |
197 | ||
198 | mddev->new_level = l->new_level; | |
199 | mddev->new_layout = l->new_layout; | |
200 | mddev->new_chunk_sectors = l->new_chunk_sectors; | |
201 | } | |
202 | ||
203 | static void rs_config_restore(struct raid_set *rs) | |
204 | { | |
205 | return _rs_config_restore(rs, &rs->rs_layout); | |
206 | } | |
207 | /* END: backup/restore raid set configuration helpers */ | |
208 | ||
33e53f06 HM |
209 | /* raid10 algorithms (i.e. formats) */ |
210 | #define ALGORITHM_RAID10_DEFAULT 0 | |
211 | #define ALGORITHM_RAID10_NEAR 1 | |
212 | #define ALGORITHM_RAID10_OFFSET 2 | |
213 | #define ALGORITHM_RAID10_FAR 3 | |
214 | ||
9d09e663 N |
215 | /* Supported raid types and properties. */ |
216 | static struct raid_type { | |
217 | const char *name; /* RAID algorithm. */ | |
218 | const char *descr; /* Descriptor text for logging. */ | |
219 | const unsigned parity_devs; /* # of parity devices. */ | |
220 | const unsigned minimal_devs; /* minimal # of devices in set. */ | |
221 | const unsigned level; /* RAID level. */ | |
222 | const unsigned algorithm; /* RAID algorithm. */ | |
223 | } raid_types[] = { | |
43157840 MS |
224 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, |
225 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, | |
226 | {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, | |
33e53f06 | 227 | {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, |
43157840 MS |
228 | {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, |
229 | {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, | |
230 | {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ | |
231 | {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, | |
232 | {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, | |
233 | {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, | |
234 | {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, | |
235 | {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, | |
236 | {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, | |
237 | {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, | |
238 | {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, | |
239 | {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, | |
240 | {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, | |
241 | {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, | |
242 | {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, | |
243 | {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} | |
9d09e663 N |
244 | }; |
245 | ||
92c83d79 HM |
246 | /* True, if @v is in inclusive range [@min, @max] */ |
247 | static bool _in_range(long v, long min, long max) | |
248 | { | |
249 | return v >= min && v <= max; | |
250 | } | |
251 | ||
702108d1 HM |
252 | /* ctr flag bit manipulation... */ |
253 | /* Set single @flag in @flags */ | |
254 | static void _set_flag(uint32_t flag, uint32_t *flags) | |
255 | { | |
256 | WARN_ON_ONCE(hweight32(flag) != 1); | |
257 | *flags |= flag; | |
258 | } | |
259 | ||
ecbfb9f1 HM |
260 | /* Clear single @flag in @flags */ |
261 | static void _clear_flag(uint32_t flag, uint32_t *flags) | |
262 | { | |
263 | WARN_ON_ONCE(hweight32(flag) != 1); | |
264 | *flags &= ~flag; | |
265 | } | |
266 | ||
702108d1 HM |
267 | /* Test single @flag in @flags */ |
268 | static bool _test_flag(uint32_t flag, uint32_t flags) | |
269 | { | |
270 | WARN_ON_ONCE(hweight32(flag) != 1); | |
271 | return (flag & flags) ? true : false; | |
272 | } | |
273 | ||
ad51d7f1 HM |
274 | /* Test multiple @flags in @all_flags */ |
275 | static bool _test_flags(uint32_t flags, uint32_t all_flags) | |
276 | { | |
277 | return (flags & all_flags) ? true : false; | |
278 | } | |
279 | ||
7b34df74 HM |
280 | /* Clear (multiple) @flags in @all_flags */ |
281 | static void _clear_flags(uint32_t flags, uint32_t *all_flags) | |
282 | { | |
283 | *all_flags &= ~flags; | |
284 | } | |
285 | ||
702108d1 HM |
286 | /* Return true if single @flag is set in @*flags, else set it and return false */ |
287 | static bool _test_and_set_flag(uint32_t flag, uint32_t *flags) | |
288 | { | |
289 | if (_test_flag(flag, *flags)) | |
290 | return true; | |
291 | ||
292 | _set_flag(flag, flags); | |
293 | return false; | |
294 | } | |
ecbfb9f1 HM |
295 | |
296 | /* Return true if single @flag is set in @*flags and clear it, else return false */ | |
297 | static bool _test_and_clear_flag(uint32_t flag, uint32_t *flags) | |
298 | { | |
299 | if (_test_flag(flag, *flags)) { | |
300 | _clear_flag(flag, flags); | |
301 | return true; | |
302 | } | |
303 | ||
304 | return false; | |
305 | } | |
702108d1 HM |
306 | /* ...ctr and runtime flag bit manipulation */ |
307 | ||
308 | /* All table line arguments are defined here */ | |
309 | static struct arg_name_flag { | |
310 | const uint32_t flag; | |
311 | const char *name; | |
312 | } _arg_name_flags[] = { | |
313 | { CTR_FLAG_SYNC, "sync"}, | |
314 | { CTR_FLAG_NOSYNC, "nosync"}, | |
315 | { CTR_FLAG_REBUILD, "rebuild"}, | |
316 | { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, | |
317 | { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, | |
318 | { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, | |
319 | { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, | |
320 | { CTR_FLAG_WRITE_MOSTLY, "writemostly"}, | |
321 | { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, | |
322 | { CTR_FLAG_REGION_SIZE, "region_size"}, | |
323 | { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, | |
324 | { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, | |
4763e543 HM |
325 | { CTR_FLAG_DATA_OFFSET, "data_offset"}, |
326 | { CTR_FLAG_DELTA_DISKS, "delta_disks"}, | |
327 | { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, | |
702108d1 HM |
328 | }; |
329 | ||
330 | /* Return argument name string for given @flag */ | |
331 | static const char *_argname_by_flag(const uint32_t flag) | |
332 | { | |
333 | if (hweight32(flag) == 1) { | |
334 | struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags); | |
335 | ||
336 | while (anf-- > _arg_name_flags) | |
337 | if (_test_flag(flag, anf->flag)) | |
338 | return anf->name; | |
339 | ||
340 | } else | |
341 | DMERR("%s called with more than one flag!", __func__); | |
342 | ||
343 | return NULL; | |
344 | } | |
345 | ||
33e53f06 HM |
346 | /* |
347 | * bool helpers to test for various raid levels of a raid set, | |
348 | * is. it's level as reported by the superblock rather than | |
349 | * the requested raid_type passed to the constructor. | |
350 | */ | |
351 | /* Return true, if raid set in @rs is raid0 */ | |
352 | static bool rs_is_raid0(struct raid_set *rs) | |
353 | { | |
354 | return !rs->md.level; | |
355 | } | |
356 | ||
357 | /* Return true, if raid set in @rs is raid10 */ | |
358 | static bool rs_is_raid10(struct raid_set *rs) | |
359 | { | |
360 | return rs->md.level == 10; | |
361 | } | |
362 | ||
f090279e HM |
363 | /* |
364 | * bool helpers to test for various raid levels of a raid type | |
365 | */ | |
366 | ||
367 | /* Return true, if raid type in @rt is raid0 */ | |
368 | static bool rt_is_raid0(struct raid_type *rt) | |
369 | { | |
370 | return !rt->level; | |
371 | } | |
372 | ||
373 | /* Return true, if raid type in @rt is raid1 */ | |
374 | static bool rt_is_raid1(struct raid_type *rt) | |
375 | { | |
376 | return rt->level == 1; | |
377 | } | |
378 | ||
379 | /* Return true, if raid type in @rt is raid10 */ | |
380 | static bool rt_is_raid10(struct raid_type *rt) | |
381 | { | |
382 | return rt->level == 10; | |
383 | } | |
384 | ||
385 | /* Return true, if raid type in @rt is raid4/5 */ | |
386 | static bool rt_is_raid45(struct raid_type *rt) | |
387 | { | |
388 | return _in_range(rt->level, 4, 5); | |
389 | } | |
390 | ||
391 | /* Return true, if raid type in @rt is raid6 */ | |
392 | static bool rt_is_raid6(struct raid_type *rt) | |
393 | { | |
394 | return rt->level == 6; | |
395 | } | |
676fa5ad HM |
396 | |
397 | /* Return true, if raid type in @rt is raid4/5/6 */ | |
398 | static bool rt_is_raid456(struct raid_type *rt) | |
399 | { | |
400 | return _in_range(rt->level, 4, 6); | |
401 | } | |
f090279e HM |
402 | /* END: raid level bools */ |
403 | ||
f090279e HM |
404 | /* Return invalid ctr flags for the raid level of @rs */ |
405 | static uint32_t _invalid_flags(struct raid_set *rs) | |
406 | { | |
407 | if (rt_is_raid0(rs->raid_type)) | |
408 | return RAID0_INVALID_FLAGS; | |
409 | else if (rt_is_raid1(rs->raid_type)) | |
410 | return RAID1_INVALID_FLAGS; | |
411 | else if (rt_is_raid10(rs->raid_type)) | |
412 | return RAID10_INVALID_FLAGS; | |
413 | else if (rt_is_raid45(rs->raid_type)) | |
414 | return RAID45_INVALID_FLAGS; | |
415 | else if (rt_is_raid6(rs->raid_type)) | |
416 | return RAID6_INVALID_FLAGS; | |
417 | ||
418 | return ~0; | |
419 | } | |
420 | ||
421 | /* | |
422 | * Check for any invalid flags set on @rs defined by bitset @invalid_flags | |
423 | * | |
424 | * Has to be called after parsing of the ctr flags! | |
425 | */ | |
426 | static int rs_check_for_invalid_flags(struct raid_set *rs) | |
427 | { | |
bd83a4c4 MS |
428 | if (_test_flags(rs->ctr_flags, _invalid_flags(rs))) { |
429 | rs->ti->error = "Invalid flag combined"; | |
430 | return -EINVAL; | |
431 | } | |
f090279e HM |
432 | |
433 | return 0; | |
434 | } | |
435 | ||
33e53f06 HM |
436 | |
437 | /* MD raid10 bit definitions and helpers */ | |
438 | #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ | |
439 | #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ | |
440 | #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ | |
441 | #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ | |
442 | ||
443 | /* Return md raid10 near copies for @layout */ | |
444 | static unsigned int _raid10_near_copies(int layout) | |
445 | { | |
446 | return layout & 0xFF; | |
447 | } | |
448 | ||
449 | /* Return md raid10 far copies for @layout */ | |
450 | static unsigned int _raid10_far_copies(int layout) | |
451 | { | |
452 | return _raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); | |
453 | } | |
454 | ||
455 | /* Return true if md raid10 offset for @layout */ | |
456 | static unsigned int _is_raid10_offset(int layout) | |
457 | { | |
458 | return layout & RAID10_OFFSET; | |
459 | } | |
460 | ||
461 | /* Return true if md raid10 near for @layout */ | |
462 | static unsigned int _is_raid10_near(int layout) | |
463 | { | |
464 | return !_is_raid10_offset(layout) && _raid10_near_copies(layout) > 1; | |
465 | } | |
466 | ||
467 | /* Return true if md raid10 far for @layout */ | |
468 | static unsigned int _is_raid10_far(int layout) | |
469 | { | |
470 | return !_is_raid10_offset(layout) && _raid10_far_copies(layout) > 1; | |
471 | } | |
472 | ||
473 | /* Return md raid10 layout string for @layout */ | |
474 | static const char *raid10_md_layout_to_format(int layout) | |
fe5d2f4a JB |
475 | { |
476 | /* | |
33e53f06 HM |
477 | * Bit 16 stands for "offset" |
478 | * (i.e. adjacent stripes hold copies) | |
479 | * | |
fe5d2f4a JB |
480 | * Refer to MD's raid10.c for details |
481 | */ | |
33e53f06 | 482 | if (_is_raid10_offset(layout)) |
fe5d2f4a JB |
483 | return "offset"; |
484 | ||
33e53f06 | 485 | if (_raid10_near_copies(layout) > 1) |
fe5d2f4a JB |
486 | return "near"; |
487 | ||
33e53f06 HM |
488 | WARN_ON(_raid10_far_copies(layout) < 2); |
489 | ||
fe5d2f4a JB |
490 | return "far"; |
491 | } | |
492 | ||
33e53f06 HM |
493 | /* Return md raid10 algorithm for @name */ |
494 | static const int raid10_name_to_format(const char *name) | |
495 | { | |
496 | if (!strcasecmp(name, "near")) | |
497 | return ALGORITHM_RAID10_NEAR; | |
498 | else if (!strcasecmp(name, "offset")) | |
499 | return ALGORITHM_RAID10_OFFSET; | |
500 | else if (!strcasecmp(name, "far")) | |
501 | return ALGORITHM_RAID10_FAR; | |
502 | ||
503 | return -EINVAL; | |
504 | } | |
505 | ||
506 | ||
507 | /* Return md raid10 copies for @layout */ | |
508 | static unsigned int raid10_md_layout_to_copies(int layout) | |
63f33b8d | 509 | { |
33e53f06 HM |
510 | return _raid10_near_copies(layout) > 1 ? |
511 | _raid10_near_copies(layout) : _raid10_far_copies(layout); | |
63f33b8d JB |
512 | } |
513 | ||
33e53f06 HM |
514 | /* Return md raid10 format id for @format string */ |
515 | static int raid10_format_to_md_layout(struct raid_set *rs, | |
516 | unsigned int algorithm, | |
517 | unsigned int copies) | |
63f33b8d | 518 | { |
33e53f06 | 519 | unsigned int n = 1, f = 1, r = 0; |
fe5d2f4a | 520 | |
33e53f06 HM |
521 | /* |
522 | * MD resilienece flaw: | |
523 | * | |
524 | * enabling use_far_sets for far/offset formats causes copies | |
525 | * to be colocated on the same devs together with their origins! | |
526 | * | |
527 | * -> disable it for now in the definition above | |
528 | */ | |
529 | if (algorithm == ALGORITHM_RAID10_DEFAULT || | |
530 | algorithm == ALGORITHM_RAID10_NEAR) | |
fe5d2f4a | 531 | n = copies; |
33e53f06 HM |
532 | |
533 | else if (algorithm == ALGORITHM_RAID10_OFFSET) { | |
534 | f = copies; | |
535 | r = RAID10_OFFSET; | |
536 | if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) | |
537 | r |= RAID10_USE_FAR_SETS; | |
538 | ||
539 | } else if (algorithm == ALGORITHM_RAID10_FAR) { | |
fe5d2f4a | 540 | f = copies; |
33e53f06 HM |
541 | r = !RAID10_OFFSET; |
542 | if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) | |
543 | r |= RAID10_USE_FAR_SETS; | |
fe5d2f4a | 544 | |
33e53f06 HM |
545 | } else |
546 | return -EINVAL; | |
547 | ||
548 | return r | (f << RAID10_FAR_COPIES_SHIFT) | n; | |
549 | } | |
550 | /* END: MD raid10 bit definitions and helpers */ | |
fe5d2f4a | 551 | |
33e53f06 HM |
552 | /* Check for any of the raid10 algorithms */ |
553 | static int _got_raid10(struct raid_type *rtp, const int layout) | |
554 | { | |
555 | if (rtp->level == 10) { | |
556 | switch (rtp->algorithm) { | |
557 | case ALGORITHM_RAID10_DEFAULT: | |
558 | case ALGORITHM_RAID10_NEAR: | |
559 | return _is_raid10_near(layout); | |
560 | case ALGORITHM_RAID10_OFFSET: | |
561 | return _is_raid10_offset(layout); | |
562 | case ALGORITHM_RAID10_FAR: | |
563 | return _is_raid10_far(layout); | |
564 | default: | |
565 | break; | |
566 | } | |
567 | } | |
fe5d2f4a | 568 | |
33e53f06 | 569 | return 0; |
63f33b8d JB |
570 | } |
571 | ||
33e53f06 | 572 | /* Return raid_type for @name */ |
92c83d79 | 573 | static struct raid_type *get_raid_type(const char *name) |
9d09e663 | 574 | { |
33e53f06 | 575 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); |
9d09e663 | 576 | |
33e53f06 HM |
577 | while (rtp-- > raid_types) |
578 | if (!strcasecmp(rtp->name, name)) | |
579 | return rtp; | |
9d09e663 N |
580 | |
581 | return NULL; | |
582 | } | |
583 | ||
33e53f06 HM |
584 | /* Return raid_type for @name based derived from @level and @layout */ |
585 | static struct raid_type *get_raid_type_by_ll(const int level, const int layout) | |
586 | { | |
587 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); | |
588 | ||
589 | while (rtp-- > raid_types) { | |
590 | /* RAID10 special checks based on @layout flags/properties */ | |
591 | if (rtp->level == level && | |
592 | (_got_raid10(rtp, layout) || rtp->algorithm == layout)) | |
593 | return rtp; | |
594 | } | |
595 | ||
596 | return NULL; | |
597 | } | |
598 | ||
3a1c1ef2 HM |
599 | /* |
600 | * Set the mddev properties in @rs to the current | |
601 | * ones retrieved from the freshest superblock | |
602 | */ | |
603 | static void rs_set_cur(struct raid_set *rs) | |
604 | { | |
605 | struct mddev *mddev = &rs->md; | |
606 | ||
607 | mddev->new_level = mddev->level; | |
608 | mddev->new_layout = mddev->layout; | |
609 | mddev->new_chunk_sectors = mddev->chunk_sectors; | |
610 | } | |
611 | ||
33e53f06 HM |
612 | /* |
613 | * Set the mddev properties in @rs to the new | |
614 | * ones requested by the ctr | |
615 | */ | |
616 | static void rs_set_new(struct raid_set *rs) | |
617 | { | |
618 | struct mddev *mddev = &rs->md; | |
619 | ||
620 | mddev->level = mddev->new_level; | |
621 | mddev->layout = mddev->new_layout; | |
622 | mddev->chunk_sectors = mddev->new_chunk_sectors; | |
3a1c1ef2 | 623 | mddev->raid_disks = rs->raid_disks; |
33e53f06 HM |
624 | mddev->delta_disks = 0; |
625 | } | |
626 | ||
627 | ||
9d09e663 N |
628 | static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) |
629 | { | |
630 | unsigned i; | |
631 | struct raid_set *rs; | |
9d09e663 | 632 | |
bd83a4c4 MS |
633 | if (raid_devs <= raid_type->parity_devs) { |
634 | ti->error = "Insufficient number of devices"; | |
635 | return ERR_PTR(-EINVAL); | |
636 | } | |
9d09e663 | 637 | |
9d09e663 | 638 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
bd83a4c4 MS |
639 | if (!rs) { |
640 | ti->error = "Cannot allocate raid context"; | |
641 | return ERR_PTR(-ENOMEM); | |
642 | } | |
9d09e663 N |
643 | |
644 | mddev_init(&rs->md); | |
645 | ||
33e53f06 HM |
646 | rs->raid_disks = raid_devs; |
647 | rs->delta_disks = 0; | |
648 | ||
9d09e663 N |
649 | rs->ti = ti; |
650 | rs->raid_type = raid_type; | |
651 | rs->md.raid_disks = raid_devs; | |
652 | rs->md.level = raid_type->level; | |
653 | rs->md.new_level = rs->md.level; | |
9d09e663 N |
654 | rs->md.layout = raid_type->algorithm; |
655 | rs->md.new_layout = rs->md.layout; | |
656 | rs->md.delta_disks = 0; | |
ecbfb9f1 | 657 | rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0; |
9d09e663 N |
658 | |
659 | for (i = 0; i < raid_devs; i++) | |
660 | md_rdev_init(&rs->dev[i].rdev); | |
661 | ||
662 | /* | |
663 | * Remaining items to be initialized by further RAID params: | |
664 | * rs->md.persistent | |
665 | * rs->md.external | |
666 | * rs->md.chunk_sectors | |
667 | * rs->md.new_chunk_sectors | |
c039c332 | 668 | * rs->md.dev_sectors |
9d09e663 N |
669 | */ |
670 | ||
671 | return rs; | |
672 | } | |
673 | ||
674 | static void context_free(struct raid_set *rs) | |
675 | { | |
676 | int i; | |
677 | ||
b12d437b JB |
678 | for (i = 0; i < rs->md.raid_disks; i++) { |
679 | if (rs->dev[i].meta_dev) | |
680 | dm_put_device(rs->ti, rs->dev[i].meta_dev); | |
545c8795 | 681 | md_rdev_clear(&rs->dev[i].rdev); |
9d09e663 N |
682 | if (rs->dev[i].data_dev) |
683 | dm_put_device(rs->ti, rs->dev[i].data_dev); | |
b12d437b | 684 | } |
9d09e663 N |
685 | |
686 | kfree(rs); | |
687 | } | |
688 | ||
689 | /* | |
690 | * For every device we have two words | |
691 | * <meta_dev>: meta device name or '-' if missing | |
692 | * <data_dev>: data device name or '-' if missing | |
693 | * | |
b12d437b JB |
694 | * The following are permitted: |
695 | * - - | |
696 | * - <data_dev> | |
697 | * <meta_dev> <data_dev> | |
698 | * | |
699 | * The following is not allowed: | |
700 | * <meta_dev> - | |
701 | * | |
702 | * This code parses those words. If there is a failure, | |
703 | * the caller must use context_free to unwind the operations. | |
9d09e663 | 704 | */ |
702108d1 | 705 | static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) |
9d09e663 N |
706 | { |
707 | int i; | |
708 | int rebuild = 0; | |
709 | int metadata_available = 0; | |
73c6f239 | 710 | int r = 0; |
92c83d79 | 711 | const char *arg; |
9d09e663 | 712 | |
92c83d79 HM |
713 | /* Put off the number of raid devices argument to get to dev pairs */ |
714 | arg = dm_shift_arg(as); | |
715 | if (!arg) | |
716 | return -EINVAL; | |
717 | ||
718 | for (i = 0; i < rs->md.raid_disks; i++) { | |
9d09e663 N |
719 | rs->dev[i].rdev.raid_disk = i; |
720 | ||
721 | rs->dev[i].meta_dev = NULL; | |
722 | rs->dev[i].data_dev = NULL; | |
723 | ||
724 | /* | |
725 | * There are no offsets, since there is a separate device | |
726 | * for data and metadata. | |
727 | */ | |
728 | rs->dev[i].rdev.data_offset = 0; | |
729 | rs->dev[i].rdev.mddev = &rs->md; | |
730 | ||
92c83d79 HM |
731 | arg = dm_shift_arg(as); |
732 | if (!arg) | |
733 | return -EINVAL; | |
734 | ||
735 | if (strcmp(arg, "-")) { | |
bd83a4c4 MS |
736 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
737 | &rs->dev[i].meta_dev); | |
738 | if (r) { | |
739 | rs->ti->error = "RAID metadata device lookup failure"; | |
740 | return r; | |
741 | } | |
b12d437b JB |
742 | |
743 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); | |
bd83a4c4 MS |
744 | if (!rs->dev[i].rdev.sb_page) { |
745 | rs->ti->error = "Failed to allocate superblock page"; | |
746 | return -ENOMEM; | |
747 | } | |
9d09e663 N |
748 | } |
749 | ||
92c83d79 HM |
750 | arg = dm_shift_arg(as); |
751 | if (!arg) | |
752 | return -EINVAL; | |
753 | ||
754 | if (!strcmp(arg, "-")) { | |
9d09e663 | 755 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && |
bd83a4c4 MS |
756 | (!rs->dev[i].rdev.recovery_offset)) { |
757 | rs->ti->error = "Drive designated for rebuild not specified"; | |
758 | return -EINVAL; | |
759 | } | |
9d09e663 | 760 | |
bd83a4c4 MS |
761 | if (rs->dev[i].meta_dev) { |
762 | rs->ti->error = "No data device supplied with metadata device"; | |
763 | return -EINVAL; | |
764 | } | |
b12d437b | 765 | |
9d09e663 N |
766 | continue; |
767 | } | |
768 | ||
bd83a4c4 MS |
769 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
770 | &rs->dev[i].data_dev); | |
771 | if (r) { | |
772 | rs->ti->error = "RAID device lookup failure"; | |
773 | return r; | |
774 | } | |
9d09e663 | 775 | |
b12d437b JB |
776 | if (rs->dev[i].meta_dev) { |
777 | metadata_available = 1; | |
778 | rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; | |
779 | } | |
9d09e663 | 780 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; |
3a1c1ef2 | 781 | list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); |
9d09e663 N |
782 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) |
783 | rebuild++; | |
784 | } | |
785 | ||
786 | if (metadata_available) { | |
787 | rs->md.external = 0; | |
788 | rs->md.persistent = 1; | |
789 | rs->md.major_version = 2; | |
790 | } else if (rebuild && !rs->md.recovery_cp) { | |
791 | /* | |
792 | * Without metadata, we will not be able to tell if the array | |
793 | * is in-sync or not - we must assume it is not. Therefore, | |
794 | * it is impossible to rebuild a drive. | |
795 | * | |
796 | * Even if there is metadata, the on-disk information may | |
797 | * indicate that the array is not in-sync and it will then | |
798 | * fail at that time. | |
799 | * | |
800 | * User could specify 'nosync' option if desperate. | |
801 | */ | |
bd83a4c4 MS |
802 | rs->ti->error = "Unable to rebuild drive while array is not in-sync"; |
803 | return -EINVAL; | |
9d09e663 N |
804 | } |
805 | ||
806 | return 0; | |
807 | } | |
808 | ||
c1084561 JB |
809 | /* |
810 | * validate_region_size | |
811 | * @rs | |
812 | * @region_size: region size in sectors. If 0, pick a size (4MiB default). | |
813 | * | |
814 | * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). | |
815 | * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. | |
816 | * | |
817 | * Returns: 0 on success, -EINVAL on failure. | |
818 | */ | |
819 | static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |
820 | { | |
821 | unsigned long min_region_size = rs->ti->len / (1 << 21); | |
822 | ||
823 | if (!region_size) { | |
824 | /* | |
43157840 | 825 | * Choose a reasonable default. All figures in sectors. |
c1084561 JB |
826 | */ |
827 | if (min_region_size > (1 << 13)) { | |
3a0f9aae | 828 | /* If not a power of 2, make it the next power of 2 */ |
042745ee | 829 | region_size = roundup_pow_of_two(min_region_size); |
c1084561 JB |
830 | DMINFO("Choosing default region size of %lu sectors", |
831 | region_size); | |
c1084561 JB |
832 | } else { |
833 | DMINFO("Choosing default region size of 4MiB"); | |
834 | region_size = 1 << 13; /* sectors */ | |
835 | } | |
836 | } else { | |
837 | /* | |
838 | * Validate user-supplied value. | |
839 | */ | |
bd83a4c4 MS |
840 | if (region_size > rs->ti->len) { |
841 | rs->ti->error = "Supplied region size is too large"; | |
842 | return -EINVAL; | |
843 | } | |
c1084561 JB |
844 | |
845 | if (region_size < min_region_size) { | |
846 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", | |
847 | region_size, min_region_size); | |
bd83a4c4 MS |
848 | rs->ti->error = "Supplied region size is too small"; |
849 | return -EINVAL; | |
c1084561 JB |
850 | } |
851 | ||
bd83a4c4 MS |
852 | if (!is_power_of_2(region_size)) { |
853 | rs->ti->error = "Region size is not a power of 2"; | |
854 | return -EINVAL; | |
855 | } | |
c1084561 | 856 | |
bd83a4c4 MS |
857 | if (region_size < rs->md.chunk_sectors) { |
858 | rs->ti->error = "Region size is smaller than the chunk size"; | |
859 | return -EINVAL; | |
860 | } | |
c1084561 JB |
861 | } |
862 | ||
863 | /* | |
864 | * Convert sectors to bytes. | |
865 | */ | |
866 | rs->md.bitmap_info.chunksize = (region_size << 9); | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
eb649123 | 871 | /* |
55ebbb59 | 872 | * validate_raid_redundancy |
eb649123 JB |
873 | * @rs |
874 | * | |
55ebbb59 JB |
875 | * Determine if there are enough devices in the array that haven't |
876 | * failed (or are being rebuilt) to form a usable array. | |
eb649123 JB |
877 | * |
878 | * Returns: 0 on success, -EINVAL on failure. | |
879 | */ | |
55ebbb59 | 880 | static int validate_raid_redundancy(struct raid_set *rs) |
eb649123 JB |
881 | { |
882 | unsigned i, rebuild_cnt = 0; | |
3f6bbd3f | 883 | unsigned rebuilds_per_group = 0, copies, d; |
fe5d2f4a | 884 | unsigned group_size, last_group_start; |
eb649123 | 885 | |
eb649123 | 886 | for (i = 0; i < rs->md.raid_disks; i++) |
55ebbb59 JB |
887 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || |
888 | !rs->dev[i].rdev.sb_page) | |
eb649123 JB |
889 | rebuild_cnt++; |
890 | ||
891 | switch (rs->raid_type->level) { | |
892 | case 1: | |
893 | if (rebuild_cnt >= rs->md.raid_disks) | |
894 | goto too_many; | |
895 | break; | |
896 | case 4: | |
897 | case 5: | |
898 | case 6: | |
899 | if (rebuild_cnt > rs->raid_type->parity_devs) | |
900 | goto too_many; | |
901 | break; | |
902 | case 10: | |
4ec1e369 JB |
903 | copies = raid10_md_layout_to_copies(rs->md.layout); |
904 | if (rebuild_cnt < copies) | |
905 | break; | |
906 | ||
907 | /* | |
908 | * It is possible to have a higher rebuild count for RAID10, | |
909 | * as long as the failed devices occur in different mirror | |
910 | * groups (i.e. different stripes). | |
911 | * | |
4ec1e369 JB |
912 | * When checking "near" format, make sure no adjacent devices |
913 | * have failed beyond what can be handled. In addition to the | |
914 | * simple case where the number of devices is a multiple of the | |
915 | * number of copies, we must also handle cases where the number | |
916 | * of devices is not a multiple of the number of copies. | |
43157840 MS |
917 | * E.g. dev1 dev2 dev3 dev4 dev5 |
918 | * A A B B C | |
919 | * C D D E E | |
4ec1e369 | 920 | */ |
fe5d2f4a JB |
921 | if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { |
922 | for (i = 0; i < rs->md.raid_disks * copies; i++) { | |
923 | if (!(i % copies)) | |
924 | rebuilds_per_group = 0; | |
925 | d = i % rs->md.raid_disks; | |
926 | if ((!rs->dev[d].rdev.sb_page || | |
927 | !test_bit(In_sync, &rs->dev[d].rdev.flags)) && | |
928 | (++rebuilds_per_group >= copies)) | |
929 | goto too_many; | |
930 | } | |
931 | break; | |
932 | } | |
933 | ||
934 | /* | |
935 | * When checking "far" and "offset" formats, we need to ensure | |
936 | * that the device that holds its copy is not also dead or | |
937 | * being rebuilt. (Note that "far" and "offset" formats only | |
938 | * support two copies right now. These formats also only ever | |
939 | * use the 'use_far_sets' variant.) | |
940 | * | |
941 | * This check is somewhat complicated by the need to account | |
43157840 | 942 | * for arrays that are not a multiple of (far) copies. This |
fe5d2f4a JB |
943 | * results in the need to treat the last (potentially larger) |
944 | * set differently. | |
945 | */ | |
946 | group_size = (rs->md.raid_disks / copies); | |
947 | last_group_start = (rs->md.raid_disks / group_size) - 1; | |
948 | last_group_start *= group_size; | |
949 | for (i = 0; i < rs->md.raid_disks; i++) { | |
950 | if (!(i % copies) && !(i > last_group_start)) | |
55ebbb59 | 951 | rebuilds_per_group = 0; |
fe5d2f4a JB |
952 | if ((!rs->dev[i].rdev.sb_page || |
953 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) && | |
4ec1e369 | 954 | (++rebuilds_per_group >= copies)) |
fe5d2f4a | 955 | goto too_many; |
4ec1e369 JB |
956 | } |
957 | break; | |
eb649123 | 958 | default: |
55ebbb59 JB |
959 | if (rebuild_cnt) |
960 | return -EINVAL; | |
eb649123 JB |
961 | } |
962 | ||
963 | return 0; | |
964 | ||
965 | too_many: | |
eb649123 JB |
966 | return -EINVAL; |
967 | } | |
968 | ||
9d09e663 N |
969 | /* |
970 | * Possible arguments are... | |
9d09e663 N |
971 | * <chunk_size> [optional_args] |
972 | * | |
32737279 JB |
973 | * Argument definitions |
974 | * <chunk_size> The number of sectors per disk that | |
43157840 | 975 | * will form the "stripe" |
32737279 | 976 | * [[no]sync] Force or prevent recovery of the |
43157840 | 977 | * entire array |
9d09e663 | 978 | * [rebuild <idx>] Rebuild the drive indicated by the index |
32737279 | 979 | * [daemon_sleep <ms>] Time between bitmap daemon work to |
43157840 | 980 | * clear bits |
9d09e663 N |
981 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
982 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization | |
46bed2b5 | 983 | * [write_mostly <idx>] Indicate a write mostly drive via index |
9d09e663 N |
984 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) |
985 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs | |
43157840 | 986 | * [region_size <sectors>] Defines granularity of bitmap |
63f33b8d JB |
987 | * |
988 | * RAID10-only options: | |
43157840 | 989 | * [raid10_copies <# copies>] Number of copies. (Default: 2) |
fe5d2f4a | 990 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) |
9d09e663 | 991 | */ |
92c83d79 | 992 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, |
9d09e663 N |
993 | unsigned num_raid_params) |
994 | { | |
33e53f06 | 995 | int raid10_format = ALGORITHM_RAID10_DEFAULT; |
63f33b8d | 996 | unsigned raid10_copies = 2; |
eb649123 | 997 | unsigned i; |
92c83d79 | 998 | unsigned value, region_size = 0; |
c039c332 | 999 | sector_t sectors_per_dev = rs->ti->len; |
542f9038 | 1000 | sector_t max_io_len; |
92c83d79 | 1001 | const char *arg, *key; |
702108d1 | 1002 | struct raid_dev *rd; |
33e53f06 | 1003 | struct raid_type *rt = rs->raid_type; |
92c83d79 HM |
1004 | |
1005 | arg = dm_shift_arg(as); | |
1006 | num_raid_params--; /* Account for chunk_size argument */ | |
1007 | ||
bd83a4c4 MS |
1008 | if (kstrtouint(arg, 10, &value) < 0) { |
1009 | rs->ti->error = "Bad numerical argument given for chunk_size"; | |
1010 | return -EINVAL; | |
1011 | } | |
9d09e663 N |
1012 | |
1013 | /* | |
1014 | * First, parse the in-order required arguments | |
32737279 | 1015 | * "chunk_size" is the only argument of this type. |
9d09e663 | 1016 | */ |
33e53f06 | 1017 | if (rt_is_raid1(rt)) { |
32737279 JB |
1018 | if (value) |
1019 | DMERR("Ignoring chunk size parameter for RAID 1"); | |
1020 | value = 0; | |
bd83a4c4 MS |
1021 | } else if (!is_power_of_2(value)) { |
1022 | rs->ti->error = "Chunk size must be a power of 2"; | |
1023 | return -EINVAL; | |
1024 | } else if (value < 8) { | |
1025 | rs->ti->error = "Chunk size value is too small"; | |
1026 | return -EINVAL; | |
1027 | } | |
9d09e663 N |
1028 | |
1029 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; | |
9d09e663 N |
1030 | |
1031 | /* | |
b12d437b JB |
1032 | * We set each individual device as In_sync with a completed |
1033 | * 'recovery_offset'. If there has been a device failure or | |
1034 | * replacement then one of the following cases applies: | |
1035 | * | |
1036 | * 1) User specifies 'rebuild'. | |
43157840 | 1037 | * - Device is reset when param is read. |
b12d437b | 1038 | * 2) A new device is supplied. |
43157840 | 1039 | * - No matching superblock found, resets device. |
b12d437b | 1040 | * 3) Device failure was transient and returns on reload. |
43157840 | 1041 | * - Failure noticed, resets device for bitmap replay. |
b12d437b | 1042 | * 4) Device hadn't completed recovery after previous failure. |
43157840 | 1043 | * - Superblock is read and overrides recovery_offset. |
b12d437b JB |
1044 | * |
1045 | * What is found in the superblocks of the devices is always | |
1046 | * authoritative, unless 'rebuild' or '[no]sync' was specified. | |
9d09e663 | 1047 | */ |
b12d437b | 1048 | for (i = 0; i < rs->md.raid_disks; i++) { |
9d09e663 | 1049 | set_bit(In_sync, &rs->dev[i].rdev.flags); |
b12d437b JB |
1050 | rs->dev[i].rdev.recovery_offset = MaxSector; |
1051 | } | |
9d09e663 | 1052 | |
b12d437b JB |
1053 | /* |
1054 | * Second, parse the unordered optional arguments | |
1055 | */ | |
9d09e663 | 1056 | for (i = 0; i < num_raid_params; i++) { |
4763e543 | 1057 | key = dm_shift_arg(as); |
bd83a4c4 MS |
1058 | if (!key) { |
1059 | rs->ti->error = "Not enough raid parameters given"; | |
1060 | return -EINVAL; | |
1061 | } | |
92c83d79 | 1062 | |
4763e543 | 1063 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_NOSYNC))) { |
bd83a4c4 MS |
1064 | if (_test_and_set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags)) { |
1065 | rs->ti->error = "Only one 'nosync' argument allowed"; | |
1066 | return -EINVAL; | |
1067 | } | |
9d09e663 | 1068 | rs->md.recovery_cp = MaxSector; |
9d09e663 N |
1069 | continue; |
1070 | } | |
4763e543 | 1071 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_SYNC))) { |
bd83a4c4 MS |
1072 | if (_test_and_set_flag(CTR_FLAG_SYNC, &rs->ctr_flags)) { |
1073 | rs->ti->error = "Only one 'sync' argument allowed"; | |
1074 | return -EINVAL; | |
1075 | } | |
9d09e663 | 1076 | rs->md.recovery_cp = 0; |
4763e543 HM |
1077 | continue; |
1078 | } | |
1079 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { | |
bd83a4c4 MS |
1080 | if (_test_and_set_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { |
1081 | rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; | |
1082 | return -EINVAL; | |
1083 | } | |
9d09e663 N |
1084 | continue; |
1085 | } | |
1086 | ||
92c83d79 HM |
1087 | arg = dm_shift_arg(as); |
1088 | i++; /* Account for the argument pairs */ | |
bd83a4c4 MS |
1089 | if (!arg) { |
1090 | rs->ti->error = "Wrong number of raid parameters given"; | |
1091 | return -EINVAL; | |
1092 | } | |
63f33b8d | 1093 | |
702108d1 HM |
1094 | /* |
1095 | * Parameters that take a string value are checked here. | |
1096 | */ | |
1097 | ||
1098 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) { | |
bd83a4c4 MS |
1099 | if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { |
1100 | rs->ti->error = "Only one 'raid10_format' argument pair allowed"; | |
1101 | return -EINVAL; | |
1102 | } | |
1103 | if (!rt_is_raid10(rt)) { | |
1104 | rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; | |
1105 | return -EINVAL; | |
1106 | } | |
33e53f06 | 1107 | raid10_format = raid10_name_to_format(arg); |
bd83a4c4 MS |
1108 | if (raid10_format < 0) { |
1109 | rs->ti->error = "Invalid 'raid10_format' value given"; | |
1110 | return raid10_format; | |
1111 | } | |
63f33b8d JB |
1112 | continue; |
1113 | } | |
1114 | ||
bd83a4c4 MS |
1115 | if (kstrtouint(arg, 10, &value) < 0) { |
1116 | rs->ti->error = "Bad numerical argument given in raid params"; | |
1117 | return -EINVAL; | |
1118 | } | |
702108d1 HM |
1119 | |
1120 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) { | |
1121 | /* | |
1122 | * "rebuild" is being passed in by userspace to provide | |
1123 | * indexes of replaced devices and to set up additional | |
1124 | * devices on raid level takeover. | |
43157840 | 1125 | */ |
bd83a4c4 MS |
1126 | if (!_in_range(value, 0, rs->raid_disks - 1)) { |
1127 | rs->ti->error = "Invalid rebuild index given"; | |
1128 | return -EINVAL; | |
1129 | } | |
702108d1 | 1130 | |
bd83a4c4 MS |
1131 | if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { |
1132 | rs->ti->error = "rebuild for this index already given"; | |
1133 | return -EINVAL; | |
1134 | } | |
ecbfb9f1 | 1135 | |
702108d1 HM |
1136 | rd = rs->dev + value; |
1137 | clear_bit(In_sync, &rd->rdev.flags); | |
1138 | clear_bit(Faulty, &rd->rdev.flags); | |
1139 | rd->rdev.recovery_offset = 0; | |
1140 | _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags); | |
1141 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) { | |
bd83a4c4 MS |
1142 | if (!rt_is_raid1(rt)) { |
1143 | rs->ti->error = "write_mostly option is only valid for RAID1"; | |
1144 | return -EINVAL; | |
1145 | } | |
702108d1 | 1146 | |
bd83a4c4 MS |
1147 | if (!_in_range(value, 0, rs->md.raid_disks - 1)) { |
1148 | rs->ti->error = "Invalid write_mostly index given"; | |
1149 | return -EINVAL; | |
1150 | } | |
9d09e663 | 1151 | |
46bed2b5 | 1152 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); |
702108d1 HM |
1153 | _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); |
1154 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { | |
bd83a4c4 MS |
1155 | if (!rt_is_raid1(rt)) { |
1156 | rs->ti->error = "max_write_behind option is only valid for RAID1"; | |
1157 | return -EINVAL; | |
1158 | } | |
702108d1 | 1159 | |
bd83a4c4 MS |
1160 | if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { |
1161 | rs->ti->error = "Only one max_write_behind argument pair allowed"; | |
1162 | return -EINVAL; | |
1163 | } | |
9d09e663 N |
1164 | |
1165 | /* | |
1166 | * In device-mapper, we specify things in sectors, but | |
1167 | * MD records this value in kB | |
1168 | */ | |
1169 | value /= 2; | |
bd83a4c4 MS |
1170 | if (value > COUNTER_MAX) { |
1171 | rs->ti->error = "Max write-behind limit out of range"; | |
1172 | return -EINVAL; | |
1173 | } | |
702108d1 | 1174 | |
9d09e663 | 1175 | rs->md.bitmap_info.max_write_behind = value; |
702108d1 | 1176 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) { |
bd83a4c4 MS |
1177 | if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { |
1178 | rs->ti->error = "Only one daemon_sleep argument pair allowed"; | |
1179 | return -EINVAL; | |
1180 | } | |
1181 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { | |
1182 | rs->ti->error = "daemon sleep period out of range"; | |
1183 | return -EINVAL; | |
1184 | } | |
9d09e663 | 1185 | rs->md.bitmap_info.daemon_sleep = value; |
4763e543 HM |
1186 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DATA_OFFSET))) { |
1187 | /* Userspace passes new data_offset after having extended the the data image LV */ | |
bd83a4c4 MS |
1188 | if (_test_and_set_flag(CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { |
1189 | rs->ti->error = "Only one data_offset argument pair allowed"; | |
1190 | return -EINVAL; | |
1191 | } | |
4763e543 | 1192 | /* Ensure sensible data offset */ |
bd83a4c4 MS |
1193 | if (value < 0) { |
1194 | rs->ti->error = "Bogus data_offset value"; | |
1195 | return -EINVAL; | |
1196 | } | |
4763e543 HM |
1197 | rs->data_offset = value; |
1198 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DELTA_DISKS))) { | |
1199 | /* Define the +/-# of disks to add to/remove from the given raid set */ | |
bd83a4c4 MS |
1200 | if (_test_and_set_flag(CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { |
1201 | rs->ti->error = "Only one delta_disks argument pair allowed"; | |
1202 | return -EINVAL; | |
1203 | } | |
4763e543 | 1204 | /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ |
bd83a4c4 MS |
1205 | if (!_in_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) { |
1206 | rs->ti->error = "Too many delta_disk requested"; | |
1207 | return -EINVAL; | |
1208 | } | |
4763e543 HM |
1209 | |
1210 | rs->delta_disks = value; | |
702108d1 | 1211 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) { |
bd83a4c4 MS |
1212 | if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { |
1213 | rs->ti->error = "Only one stripe_cache argument pair allowed"; | |
1214 | return -EINVAL; | |
1215 | } | |
1216 | ||
9d09e663 N |
1217 | /* |
1218 | * In device-mapper, we specify things in sectors, but | |
1219 | * MD records this value in kB | |
1220 | */ | |
1221 | value /= 2; | |
1222 | ||
bd83a4c4 MS |
1223 | if (!rt_is_raid456(rt)) { |
1224 | rs->ti->error = "Inappropriate argument: stripe_cache"; | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | if (raid5_set_cache_size(&rs->md, (int)value)) { | |
1228 | rs->ti->error = "Bad stripe_cache size"; | |
1229 | return -EINVAL; | |
1230 | } | |
702108d1 HM |
1231 | |
1232 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { | |
bd83a4c4 MS |
1233 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
1234 | rs->ti->error = "Only one min_recovery_rate argument pair allowed"; | |
1235 | return -EINVAL; | |
1236 | } | |
1237 | if (value > INT_MAX) { | |
1238 | rs->ti->error = "min_recovery_rate out of range"; | |
1239 | return -EINVAL; | |
1240 | } | |
9d09e663 | 1241 | rs->md.sync_speed_min = (int)value; |
702108d1 | 1242 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { |
bd83a4c4 MS |
1243 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
1244 | rs->ti->error = "Only one max_recovery_rate argument pair allowed"; | |
1245 | return -EINVAL; | |
1246 | } | |
1247 | if (value > INT_MAX) { | |
1248 | rs->ti->error = "max_recovery_rate out of range"; | |
1249 | return -EINVAL; | |
1250 | } | |
9d09e663 | 1251 | rs->md.sync_speed_max = (int)value; |
702108d1 | 1252 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) { |
bd83a4c4 MS |
1253 | if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { |
1254 | rs->ti->error = "Only one region_size argument pair allowed"; | |
1255 | return -EINVAL; | |
1256 | } | |
702108d1 | 1257 | |
c1084561 | 1258 | region_size = value; |
702108d1 | 1259 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) { |
bd83a4c4 MS |
1260 | if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { |
1261 | rs->ti->error = "Only one raid10_copies argument pair allowed"; | |
1262 | return -EINVAL; | |
1263 | } | |
702108d1 | 1264 | |
bd83a4c4 MS |
1265 | if (!_in_range(value, 2, rs->md.raid_disks)) { |
1266 | rs->ti->error = "Bad value for 'raid10_copies'"; | |
1267 | return -EINVAL; | |
1268 | } | |
702108d1 | 1269 | |
63f33b8d | 1270 | raid10_copies = value; |
9d09e663 N |
1271 | } else { |
1272 | DMERR("Unable to parse RAID parameter: %s", key); | |
bd83a4c4 MS |
1273 | rs->ti->error = "Unable to parse RAID parameter"; |
1274 | return -EINVAL; | |
9d09e663 N |
1275 | } |
1276 | } | |
1277 | ||
c1084561 JB |
1278 | if (validate_region_size(rs, region_size)) |
1279 | return -EINVAL; | |
1280 | ||
1281 | if (rs->md.chunk_sectors) | |
542f9038 | 1282 | max_io_len = rs->md.chunk_sectors; |
c1084561 | 1283 | else |
542f9038 | 1284 | max_io_len = region_size; |
c1084561 | 1285 | |
542f9038 MS |
1286 | if (dm_set_target_max_io_len(rs->ti, max_io_len)) |
1287 | return -EINVAL; | |
32737279 | 1288 | |
33e53f06 | 1289 | if (rt_is_raid10(rt)) { |
bd83a4c4 MS |
1290 | if (raid10_copies > rs->md.raid_disks) { |
1291 | rs->ti->error = "Not enough devices to satisfy specification"; | |
1292 | return -EINVAL; | |
1293 | } | |
63f33b8d | 1294 | |
33e53f06 | 1295 | rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
bd83a4c4 MS |
1296 | if (rs->md.new_layout < 0) { |
1297 | rs->ti->error = "Error getting raid10 format"; | |
1298 | return rs->md.new_layout; | |
1299 | } | |
33e53f06 HM |
1300 | |
1301 | rt = get_raid_type_by_ll(10, rs->md.new_layout); | |
bd83a4c4 MS |
1302 | if (!rt) { |
1303 | rs->ti->error = "Failed to recognize new raid10 layout"; | |
1304 | return -EINVAL; | |
1305 | } | |
33e53f06 HM |
1306 | |
1307 | if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || | |
1308 | rt->algorithm == ALGORITHM_RAID10_NEAR) && | |
bd83a4c4 MS |
1309 | _test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) { |
1310 | rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; | |
1311 | return -EINVAL; | |
1312 | } | |
fe5d2f4a | 1313 | |
63f33b8d JB |
1314 | /* (Len * #mirrors) / #devices */ |
1315 | sectors_per_dev = rs->ti->len * raid10_copies; | |
1316 | sector_div(sectors_per_dev, rs->md.raid_disks); | |
1317 | ||
33e53f06 | 1318 | rs->md.layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
63f33b8d | 1319 | rs->md.new_layout = rs->md.layout; |
33e53f06 | 1320 | } else if (!rt_is_raid1(rt) && |
bd83a4c4 MS |
1321 | sector_div(sectors_per_dev, (rs->md.raid_disks - rt->parity_devs))) { |
1322 | rs->ti->error = "Target length not divisible by number of data devices"; | |
1323 | return -EINVAL; | |
1324 | } | |
702108d1 | 1325 | |
33e53f06 | 1326 | rs->raid10_copies = raid10_copies; |
c039c332 JB |
1327 | rs->md.dev_sectors = sectors_per_dev; |
1328 | ||
9d09e663 N |
1329 | /* Assume there are no metadata devices until the drives are parsed */ |
1330 | rs->md.persistent = 0; | |
1331 | rs->md.external = 1; | |
1332 | ||
f090279e HM |
1333 | /* Check, if any invalid ctr arguments have been passed in for the raid level */ |
1334 | return rs_check_for_invalid_flags(rs); | |
9d09e663 N |
1335 | } |
1336 | ||
3a1c1ef2 HM |
1337 | /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */ |
1338 | static unsigned int mddev_data_stripes(struct raid_set *rs) | |
1339 | { | |
1340 | return rs->md.raid_disks - rs->raid_type->parity_devs; | |
1341 | } | |
1342 | ||
9d09e663 N |
1343 | static void do_table_event(struct work_struct *ws) |
1344 | { | |
1345 | struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); | |
1346 | ||
1347 | dm_table_event(rs->ti->table); | |
1348 | } | |
1349 | ||
1350 | static int raid_is_congested(struct dm_target_callbacks *cb, int bits) | |
1351 | { | |
1352 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | |
1353 | ||
5c675f83 | 1354 | return mddev_congested(&rs->md, bits); |
9d09e663 N |
1355 | } |
1356 | ||
ecbfb9f1 HM |
1357 | /* |
1358 | * Make sure a valid takover (level switch) is being requested on @rs | |
1359 | * | |
1360 | * Conversions of raid sets from one MD personality to another | |
1361 | * have to conform to restrictions which are enforced here. | |
1362 | * | |
1363 | * Degration is already checked for in rs_check_conversion() below. | |
1364 | */ | |
1365 | static int rs_check_takeover(struct raid_set *rs) | |
1366 | { | |
1367 | struct mddev *mddev = &rs->md; | |
1368 | unsigned int near_copies; | |
1369 | ||
1370 | switch (mddev->level) { | |
1371 | case 0: | |
1372 | /* raid0 -> raid1/5 with one disk */ | |
1373 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1374 | mddev->raid_disks == 1) | |
1375 | return 0; | |
1376 | ||
1377 | /* raid0 -> raid10 */ | |
1378 | if (mddev->new_level == 10 && | |
1379 | !(rs->raid_disks % 2)) | |
1380 | return 0; | |
1381 | ||
1382 | /* raid0 with multiple disks -> raid4/5/6 */ | |
1383 | if (_in_range(mddev->new_level, 4, 6) && | |
1384 | mddev->new_layout == ALGORITHM_PARITY_N && | |
1385 | mddev->raid_disks > 1) | |
1386 | return 0; | |
1387 | ||
1388 | break; | |
1389 | ||
1390 | case 10: | |
1391 | /* Can't takeover raid10_offset! */ | |
1392 | if (_is_raid10_offset(mddev->layout)) | |
1393 | break; | |
1394 | ||
1395 | near_copies = _raid10_near_copies(mddev->layout); | |
1396 | ||
1397 | /* raid10* -> raid0 */ | |
1398 | if (mddev->new_level == 0) { | |
1399 | /* Can takeover raid10_near with raid disks divisable by data copies! */ | |
1400 | if (near_copies > 1 && | |
1401 | !(mddev->raid_disks % near_copies)) { | |
1402 | mddev->raid_disks /= near_copies; | |
1403 | mddev->delta_disks = mddev->raid_disks; | |
1404 | return 0; | |
1405 | } | |
1406 | ||
1407 | /* Can takeover raid10_far */ | |
1408 | if (near_copies == 1 && | |
1409 | _raid10_far_copies(mddev->layout) > 1) | |
1410 | return 0; | |
1411 | ||
1412 | break; | |
1413 | } | |
1414 | ||
1415 | /* raid10_{near,far} -> raid1 */ | |
1416 | if (mddev->new_level == 1 && | |
1417 | max(near_copies, _raid10_far_copies(mddev->layout)) == mddev->raid_disks) | |
1418 | return 0; | |
1419 | ||
1420 | /* raid10_{near,far} with 2 disks -> raid4/5 */ | |
1421 | if (_in_range(mddev->new_level, 4, 5) && | |
1422 | mddev->raid_disks == 2) | |
1423 | return 0; | |
1424 | break; | |
1425 | ||
1426 | case 1: | |
1427 | /* raid1 with 2 disks -> raid4/5 */ | |
1428 | if (_in_range(mddev->new_level, 4, 5) && | |
1429 | mddev->raid_disks == 2) { | |
1430 | mddev->degraded = 1; | |
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | /* raid1 -> raid0 */ | |
1435 | if (mddev->new_level == 0 && | |
1436 | mddev->raid_disks == 1) | |
1437 | return 0; | |
1438 | ||
1439 | /* raid1 -> raid10 */ | |
1440 | if (mddev->new_level == 10) | |
1441 | return 0; | |
1442 | ||
1443 | break; | |
1444 | ||
1445 | case 4: | |
1446 | /* raid4 -> raid0 */ | |
1447 | if (mddev->new_level == 0) | |
1448 | return 0; | |
1449 | ||
1450 | /* raid4 -> raid1/5 with 2 disks */ | |
1451 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1452 | mddev->raid_disks == 2) | |
1453 | return 0; | |
1454 | ||
1455 | /* raid4 -> raid5/6 with parity N */ | |
1456 | if (_in_range(mddev->new_level, 5, 6) && | |
1457 | mddev->layout == ALGORITHM_PARITY_N) | |
1458 | return 0; | |
1459 | break; | |
1460 | ||
1461 | case 5: | |
1462 | /* raid5 with parity N -> raid0 */ | |
1463 | if (mddev->new_level == 0 && | |
1464 | mddev->layout == ALGORITHM_PARITY_N) | |
1465 | return 0; | |
1466 | ||
1467 | /* raid5 with parity N -> raid4 */ | |
1468 | if (mddev->new_level == 4 && | |
1469 | mddev->layout == ALGORITHM_PARITY_N) | |
1470 | return 0; | |
1471 | ||
1472 | /* raid5 with 2 disks -> raid1/4/10 */ | |
1473 | if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && | |
1474 | mddev->raid_disks == 2) | |
1475 | return 0; | |
1476 | ||
1477 | /* raid5 with parity N -> raid6 with parity N */ | |
1478 | if (mddev->new_level == 6 && | |
1479 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
1480 | _in_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) | |
1481 | return 0; | |
1482 | break; | |
1483 | ||
1484 | case 6: | |
1485 | /* raid6 with parity N -> raid0 */ | |
1486 | if (mddev->new_level == 0 && | |
1487 | mddev->layout == ALGORITHM_PARITY_N) | |
1488 | return 0; | |
1489 | ||
1490 | /* raid6 with parity N -> raid4 */ | |
1491 | if (mddev->new_level == 4 && | |
1492 | mddev->layout == ALGORITHM_PARITY_N) | |
1493 | return 0; | |
1494 | ||
1495 | /* raid6_*_n with parity N -> raid5_* */ | |
1496 | if (mddev->new_level == 5 && | |
1497 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
1498 | _in_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) | |
1499 | return 0; | |
1500 | ||
1501 | default: | |
1502 | break; | |
1503 | } | |
1504 | ||
bd83a4c4 MS |
1505 | rs->ti->error = "takeover not possible"; |
1506 | return -EINVAL; | |
ecbfb9f1 HM |
1507 | } |
1508 | ||
1509 | /* True if @rs requested to be taken over */ | |
1510 | static bool rs_takeover_requested(struct raid_set *rs) | |
1511 | { | |
1512 | return rs->md.new_level != rs->md.level; | |
1513 | } | |
1514 | ||
33e53f06 | 1515 | /* Features */ |
ecbfb9f1 HM |
1516 | #define FEATURE_FLAG_SUPPORTS_V180 0x1 /* Supports v1.8.0 extended superblock */ |
1517 | #define FEATURE_FLAG_SUPPORTS_RESHAPE 0x2 /* Supports v1.8.0 reshaping functionality */ | |
33e53f06 HM |
1518 | |
1519 | /* State flags for sb->flags */ | |
1520 | #define SB_FLAG_RESHAPE_ACTIVE 0x1 | |
1521 | #define SB_FLAG_RESHAPE_BACKWARDS 0x2 | |
1522 | ||
b12d437b JB |
1523 | /* |
1524 | * This structure is never routinely used by userspace, unlike md superblocks. | |
1525 | * Devices with this superblock should only ever be accessed via device-mapper. | |
1526 | */ | |
1527 | #define DM_RAID_MAGIC 0x64526D44 | |
1528 | struct dm_raid_superblock { | |
1529 | __le32 magic; /* "DmRd" */ | |
33e53f06 | 1530 | __le32 compat_features; /* Used to indicate compatible features (like 1.8.0 ondisk metadata extension) */ |
b12d437b | 1531 | |
33e53f06 HM |
1532 | __le32 num_devices; /* Number of devices in this raid set. (Max 64) */ |
1533 | __le32 array_position; /* The position of this drive in the raid set */ | |
b12d437b JB |
1534 | |
1535 | __le64 events; /* Incremented by md when superblock updated */ | |
33e53f06 HM |
1536 | __le64 failed_devices; /* Pre 1.8.0 part of bit field of devices to */ |
1537 | /* indicate failures (see extension below) */ | |
b12d437b JB |
1538 | |
1539 | /* | |
1540 | * This offset tracks the progress of the repair or replacement of | |
1541 | * an individual drive. | |
1542 | */ | |
1543 | __le64 disk_recovery_offset; | |
1544 | ||
1545 | /* | |
33e53f06 | 1546 | * This offset tracks the progress of the initial raid set |
b12d437b JB |
1547 | * synchronisation/parity calculation. |
1548 | */ | |
1549 | __le64 array_resync_offset; | |
1550 | ||
1551 | /* | |
33e53f06 | 1552 | * raid characteristics |
b12d437b JB |
1553 | */ |
1554 | __le32 level; | |
1555 | __le32 layout; | |
1556 | __le32 stripe_sectors; | |
1557 | ||
33e53f06 HM |
1558 | /******************************************************************** |
1559 | * BELOW FOLLOW V1.8.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | |
1560 | * | |
ecbfb9f1 | 1561 | * FEATURE_FLAG_SUPPORTS_V180 in the features member indicates that those exist |
33e53f06 HM |
1562 | */ |
1563 | ||
1564 | __le32 flags; /* Flags defining array states for reshaping */ | |
1565 | ||
1566 | /* | |
1567 | * This offset tracks the progress of a raid | |
1568 | * set reshape in order to be able to restart it | |
1569 | */ | |
1570 | __le64 reshape_position; | |
1571 | ||
1572 | /* | |
1573 | * These define the properties of the array in case of an interrupted reshape | |
1574 | */ | |
1575 | __le32 new_level; | |
1576 | __le32 new_layout; | |
1577 | __le32 new_stripe_sectors; | |
1578 | __le32 delta_disks; | |
1579 | ||
1580 | __le64 array_sectors; /* Array size in sectors */ | |
1581 | ||
1582 | /* | |
1583 | * Sector offsets to data on devices (reshaping). | |
1584 | * Needed to support out of place reshaping, thus | |
1585 | * not writing over any stripes whilst converting | |
1586 | * them from old to new layout | |
1587 | */ | |
1588 | __le64 data_offset; | |
1589 | __le64 new_data_offset; | |
1590 | ||
1591 | __le64 sectors; /* Used device size in sectors */ | |
1592 | ||
1593 | /* | |
1594 | * Additonal Bit field of devices indicating failures to support | |
1595 | * up to 256 devices with the 1.8.0 on-disk metadata format | |
1596 | */ | |
1597 | __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1]; | |
1598 | ||
1599 | __le32 incompat_features; /* Used to indicate any incompatible features */ | |
1600 | ||
1601 | /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */ | |
b12d437b JB |
1602 | } __packed; |
1603 | ||
3cb03002 | 1604 | static int read_disk_sb(struct md_rdev *rdev, int size) |
b12d437b JB |
1605 | { |
1606 | BUG_ON(!rdev->sb_page); | |
1607 | ||
1608 | if (rdev->sb_loaded) | |
1609 | return 0; | |
1610 | ||
796a5cf0 | 1611 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { |
0447568f JB |
1612 | DMERR("Failed to read superblock of device at position %d", |
1613 | rdev->raid_disk); | |
c32fb9e7 | 1614 | md_error(rdev->mddev, rdev); |
b12d437b JB |
1615 | return -EINVAL; |
1616 | } | |
1617 | ||
1618 | rdev->sb_loaded = 1; | |
1619 | ||
1620 | return 0; | |
1621 | } | |
1622 | ||
33e53f06 HM |
1623 | static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1624 | { | |
1625 | failed_devices[0] = le64_to_cpu(sb->failed_devices); | |
1626 | memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); | |
1627 | ||
ecbfb9f1 | 1628 | if (_test_flag(FEATURE_FLAG_SUPPORTS_V180, le32_to_cpu(sb->compat_features))) { |
33e53f06 HM |
1629 | int i = ARRAY_SIZE(sb->extended_failed_devices); |
1630 | ||
1631 | while (i--) | |
1632 | failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]); | |
1633 | } | |
1634 | } | |
1635 | ||
7b34df74 HM |
1636 | static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1637 | { | |
1638 | int i = ARRAY_SIZE(sb->extended_failed_devices); | |
1639 | ||
1640 | sb->failed_devices = cpu_to_le64(failed_devices[0]); | |
1641 | while (i--) | |
1642 | sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]); | |
1643 | } | |
1644 | ||
1645 | /* | |
1646 | * Synchronize the superblock members with the raid set properties | |
1647 | * | |
1648 | * All superblock data is little endian. | |
1649 | */ | |
fd01b88c | 1650 | static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
b12d437b | 1651 | { |
7b34df74 HM |
1652 | bool update_failed_devices = false; |
1653 | unsigned int i; | |
1654 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | |
b12d437b | 1655 | struct dm_raid_superblock *sb; |
81f382f9 | 1656 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
b12d437b | 1657 | |
7b34df74 HM |
1658 | /* No metadata device, no superblock */ |
1659 | if (!rdev->meta_bdev) | |
1660 | return; | |
1661 | ||
1662 | BUG_ON(!rdev->sb_page); | |
1663 | ||
b12d437b | 1664 | sb = page_address(rdev->sb_page); |
b12d437b | 1665 | |
7b34df74 | 1666 | sb_retrieve_failed_devices(sb, failed_devices); |
b12d437b | 1667 | |
7b34df74 HM |
1668 | for (i = 0; i < rs->raid_disks; i++) |
1669 | if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { | |
1670 | update_failed_devices = true; | |
1671 | set_bit(i, (void *) failed_devices); | |
1672 | } | |
1673 | ||
1674 | if (update_failed_devices) | |
1675 | sb_update_failed_devices(sb, failed_devices); | |
b12d437b JB |
1676 | |
1677 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); | |
ecbfb9f1 | 1678 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V180); /* Don't set reshape flag yet */ |
b12d437b JB |
1679 | |
1680 | sb->num_devices = cpu_to_le32(mddev->raid_disks); | |
1681 | sb->array_position = cpu_to_le32(rdev->raid_disk); | |
1682 | ||
1683 | sb->events = cpu_to_le64(mddev->events); | |
b12d437b JB |
1684 | |
1685 | sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); | |
1686 | sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); | |
1687 | ||
1688 | sb->level = cpu_to_le32(mddev->level); | |
1689 | sb->layout = cpu_to_le32(mddev->layout); | |
1690 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | |
7b34df74 HM |
1691 | |
1692 | sb->new_level = cpu_to_le32(mddev->new_level); | |
1693 | sb->new_layout = cpu_to_le32(mddev->new_layout); | |
1694 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); | |
1695 | ||
1696 | sb->delta_disks = cpu_to_le32(mddev->delta_disks); | |
1697 | ||
1698 | smp_rmb(); /* Make sure we access most recent reshape position */ | |
1699 | sb->reshape_position = cpu_to_le64(mddev->reshape_position); | |
1700 | if (le64_to_cpu(sb->reshape_position) != MaxSector) { | |
1701 | /* Flag ongoing reshape */ | |
1702 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE); | |
1703 | ||
1704 | if (mddev->delta_disks < 0 || mddev->reshape_backwards) | |
1705 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); | |
1706 | } else | |
1707 | /* Flag no reshape */ | |
1708 | _clear_flags(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS), &sb->flags); | |
1709 | ||
1710 | sb->array_sectors = cpu_to_le64(mddev->array_sectors); | |
1711 | sb->data_offset = cpu_to_le64(rdev->data_offset); | |
1712 | sb->new_data_offset = cpu_to_le64(rdev->new_data_offset); | |
1713 | sb->sectors = cpu_to_le64(rdev->sectors); | |
1714 | ||
1715 | /* Zero out the rest of the payload after the size of the superblock */ | |
1716 | memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); | |
b12d437b JB |
1717 | } |
1718 | ||
1719 | /* | |
1720 | * super_load | |
1721 | * | |
1722 | * This function creates a superblock if one is not found on the device | |
1723 | * and will decide which superblock to use if there's a choice. | |
1724 | * | |
1725 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | |
1726 | */ | |
3cb03002 | 1727 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
b12d437b | 1728 | { |
73c6f239 | 1729 | int r; |
b12d437b JB |
1730 | struct dm_raid_superblock *sb; |
1731 | struct dm_raid_superblock *refsb; | |
1732 | uint64_t events_sb, events_refsb; | |
1733 | ||
1734 | rdev->sb_start = 0; | |
40d43c4b HM |
1735 | rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
1736 | if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { | |
1737 | DMERR("superblock size of a logical block is no longer valid"); | |
1738 | return -EINVAL; | |
1739 | } | |
b12d437b | 1740 | |
73c6f239 HM |
1741 | r = read_disk_sb(rdev, rdev->sb_size); |
1742 | if (r) | |
1743 | return r; | |
b12d437b JB |
1744 | |
1745 | sb = page_address(rdev->sb_page); | |
3aa3b2b2 JB |
1746 | |
1747 | /* | |
1748 | * Two cases that we want to write new superblocks and rebuild: | |
1749 | * 1) New device (no matching magic number) | |
1750 | * 2) Device specified for rebuild (!In_sync w/ offset == 0) | |
1751 | */ | |
1752 | if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || | |
1753 | (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { | |
b12d437b JB |
1754 | super_sync(rdev->mddev, rdev); |
1755 | ||
1756 | set_bit(FirstUse, &rdev->flags); | |
ecbfb9f1 | 1757 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V180); /* Don't set reshape flag yet */ |
b12d437b JB |
1758 | |
1759 | /* Force writing of superblocks to disk */ | |
1760 | set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); | |
1761 | ||
1762 | /* Any superblock is better than none, choose that if given */ | |
1763 | return refdev ? 0 : 1; | |
1764 | } | |
1765 | ||
1766 | if (!refdev) | |
1767 | return 1; | |
1768 | ||
1769 | events_sb = le64_to_cpu(sb->events); | |
1770 | ||
1771 | refsb = page_address(refdev->sb_page); | |
1772 | events_refsb = le64_to_cpu(refsb->events); | |
1773 | ||
1774 | return (events_sb > events_refsb) ? 1 : 0; | |
1775 | } | |
1776 | ||
33e53f06 | 1777 | static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b JB |
1778 | { |
1779 | int role; | |
33e53f06 HM |
1780 | unsigned int d; |
1781 | struct mddev *mddev = &rs->md; | |
b12d437b | 1782 | uint64_t events_sb; |
33e53f06 | 1783 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; |
b12d437b | 1784 | struct dm_raid_superblock *sb; |
33e53f06 | 1785 | uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0; |
dafb20fa | 1786 | struct md_rdev *r; |
b12d437b JB |
1787 | struct dm_raid_superblock *sb2; |
1788 | ||
1789 | sb = page_address(rdev->sb_page); | |
1790 | events_sb = le64_to_cpu(sb->events); | |
b12d437b JB |
1791 | |
1792 | /* | |
1793 | * Initialise to 1 if this is a new superblock. | |
1794 | */ | |
1795 | mddev->events = events_sb ? : 1; | |
1796 | ||
33e53f06 HM |
1797 | mddev->reshape_position = MaxSector; |
1798 | ||
b12d437b | 1799 | /* |
33e53f06 HM |
1800 | * Reshaping is supported, e.g. reshape_position is valid |
1801 | * in superblock and superblock content is authoritative. | |
b12d437b | 1802 | */ |
ecbfb9f1 | 1803 | if (_test_flag(FEATURE_FLAG_SUPPORTS_V180, le32_to_cpu(sb->compat_features))) { |
33e53f06 HM |
1804 | /* Superblock is authoritative wrt given raid set layout! */ |
1805 | mddev->raid_disks = le32_to_cpu(sb->num_devices); | |
1806 | mddev->level = le32_to_cpu(sb->level); | |
1807 | mddev->layout = le32_to_cpu(sb->layout); | |
1808 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); | |
1809 | mddev->new_level = le32_to_cpu(sb->new_level); | |
1810 | mddev->new_layout = le32_to_cpu(sb->new_layout); | |
1811 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); | |
1812 | mddev->delta_disks = le32_to_cpu(sb->delta_disks); | |
1813 | mddev->array_sectors = le64_to_cpu(sb->array_sectors); | |
1814 | ||
1815 | /* raid was reshaping and got interrupted */ | |
1816 | if (_test_flag(SB_FLAG_RESHAPE_ACTIVE, le32_to_cpu(sb->flags))) { | |
1817 | if (_test_flag(CTR_FLAG_DELTA_DISKS, rs->ctr_flags)) { | |
1818 | DMERR("Reshape requested but raid set is still reshaping"); | |
1819 | return -EINVAL; | |
1820 | } | |
b12d437b | 1821 | |
33e53f06 HM |
1822 | if (mddev->delta_disks < 0 || |
1823 | (!mddev->delta_disks && _test_flag(SB_FLAG_RESHAPE_BACKWARDS, le32_to_cpu(sb->flags)))) | |
1824 | mddev->reshape_backwards = 1; | |
1825 | else | |
1826 | mddev->reshape_backwards = 0; | |
1827 | ||
1828 | mddev->reshape_position = le64_to_cpu(sb->reshape_position); | |
1829 | rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); | |
1830 | } | |
1831 | ||
1832 | } else { | |
1833 | /* | |
3a1c1ef2 | 1834 | * No takeover/reshaping, because we don't have the extended v1.8.0 metadata |
33e53f06 HM |
1835 | */ |
1836 | if (le32_to_cpu(sb->level) != mddev->level) { | |
1837 | DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); | |
1838 | return -EINVAL; | |
1839 | } | |
1840 | if (le32_to_cpu(sb->layout) != mddev->layout) { | |
1841 | DMERR("Reshaping raid sets not yet supported. (raid layout change)"); | |
43157840 MS |
1842 | DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); |
1843 | DMERR(" Old layout: %s w/ %d copies", | |
33e53f06 HM |
1844 | raid10_md_layout_to_format(le32_to_cpu(sb->layout)), |
1845 | raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); | |
43157840 | 1846 | DMERR(" New layout: %s w/ %d copies", |
33e53f06 HM |
1847 | raid10_md_layout_to_format(mddev->layout), |
1848 | raid10_md_layout_to_copies(mddev->layout)); | |
1849 | return -EINVAL; | |
1850 | } | |
1851 | if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { | |
1852 | DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); | |
1853 | return -EINVAL; | |
1854 | } | |
1855 | ||
1856 | /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */ | |
1857 | if (!rt_is_raid1(rs->raid_type) && | |
1858 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { | |
1859 | DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)", | |
1860 | sb->num_devices, mddev->raid_disks); | |
1861 | return -EINVAL; | |
1862 | } | |
1863 | ||
1864 | /* Table line is checked vs. authoritative superblock */ | |
1865 | rs_set_new(rs); | |
b12d437b JB |
1866 | } |
1867 | ||
33e53f06 | 1868 | if (!_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) |
b12d437b JB |
1869 | mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); |
1870 | ||
1871 | /* | |
1872 | * During load, we set FirstUse if a new superblock was written. | |
1873 | * There are two reasons we might not have a superblock: | |
33e53f06 | 1874 | * 1) The raid set is brand new - in which case, all of the |
43157840 | 1875 | * devices must have their In_sync bit set. Also, |
b12d437b | 1876 | * recovery_cp must be 0, unless forced. |
33e53f06 | 1877 | * 2) This is a new device being added to an old raid set |
b12d437b JB |
1878 | * and the new device needs to be rebuilt - in which |
1879 | * case the In_sync bit will /not/ be set and | |
1880 | * recovery_cp must be MaxSector. | |
1881 | */ | |
33e53f06 | 1882 | d = 0; |
dafb20fa | 1883 | rdev_for_each(r, mddev) { |
33e53f06 HM |
1884 | if (test_bit(FirstUse, &r->flags)) |
1885 | new_devs++; | |
1886 | ||
b12d437b | 1887 | if (!test_bit(In_sync, &r->flags)) { |
33e53f06 HM |
1888 | DMINFO("Device %d specified for rebuild; clearing superblock", |
1889 | r->raid_disk); | |
b12d437b | 1890 | rebuilds++; |
33e53f06 HM |
1891 | |
1892 | if (test_bit(FirstUse, &r->flags)) | |
1893 | rebuild_and_new++; | |
1894 | } | |
1895 | ||
1896 | d++; | |
b12d437b JB |
1897 | } |
1898 | ||
33e53f06 HM |
1899 | if (new_devs == rs->raid_disks || !rebuilds) { |
1900 | /* Replace a broken device */ | |
1901 | if (new_devs == 1 && !rs->delta_disks) | |
1902 | ; | |
1903 | if (new_devs == rs->raid_disks) { | |
1904 | DMINFO("Superblocks created for new raid set"); | |
b12d437b | 1905 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
ecbfb9f1 | 1906 | _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
33e53f06 HM |
1907 | mddev->recovery_cp = 0; |
1908 | } else if (new_devs && new_devs != rs->raid_disks && !rebuilds) { | |
1909 | DMERR("New device injected into existing raid set without " | |
1910 | "'delta_disks' or 'rebuild' parameter specified"); | |
b12d437b JB |
1911 | return -EINVAL; |
1912 | } | |
33e53f06 HM |
1913 | } else if (new_devs && new_devs != rebuilds) { |
1914 | DMERR("%u 'rebuild' devices cannot be injected into" | |
1915 | " a raid set with %u other first-time devices", | |
1916 | rebuilds, new_devs); | |
b12d437b | 1917 | return -EINVAL; |
33e53f06 HM |
1918 | } else if (rebuilds) { |
1919 | if (rebuild_and_new && rebuilds != rebuild_and_new) { | |
1920 | DMERR("new device%s provided without 'rebuild'", | |
1921 | new_devs > 1 ? "s" : ""); | |
1922 | return -EINVAL; | |
1923 | } else if (mddev->recovery_cp != MaxSector) { | |
1924 | DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", | |
1925 | (unsigned long long) mddev->recovery_cp); | |
1926 | return -EINVAL; | |
1927 | } else if (mddev->reshape_position != MaxSector) { | |
1928 | DMERR("'rebuild' specified while raid set is being reshaped"); | |
1929 | return -EINVAL; | |
1930 | } | |
b12d437b JB |
1931 | } |
1932 | ||
1933 | /* | |
1934 | * Now we set the Faulty bit for those devices that are | |
1935 | * recorded in the superblock as failed. | |
1936 | */ | |
33e53f06 | 1937 | sb_retrieve_failed_devices(sb, failed_devices); |
dafb20fa | 1938 | rdev_for_each(r, mddev) { |
b12d437b JB |
1939 | if (!r->sb_page) |
1940 | continue; | |
1941 | sb2 = page_address(r->sb_page); | |
1942 | sb2->failed_devices = 0; | |
33e53f06 | 1943 | memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices)); |
b12d437b JB |
1944 | |
1945 | /* | |
1946 | * Check for any device re-ordering. | |
1947 | */ | |
1948 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { | |
1949 | role = le32_to_cpu(sb2->array_position); | |
33e53f06 HM |
1950 | if (role < 0) |
1951 | continue; | |
1952 | ||
b12d437b | 1953 | if (role != r->raid_disk) { |
33e53f06 HM |
1954 | if (_is_raid10_near(mddev->layout)) { |
1955 | if (mddev->raid_disks % _raid10_near_copies(mddev->layout) || | |
bd83a4c4 MS |
1956 | rs->raid_disks % rs->raid10_copies) { |
1957 | rs->ti->error = | |
1958 | "Cannot change raid10 near set to odd # of devices!"; | |
1959 | return -EINVAL; | |
1960 | } | |
33e53f06 HM |
1961 | |
1962 | sb2->array_position = cpu_to_le32(r->raid_disk); | |
1963 | ||
1964 | } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && | |
bd83a4c4 MS |
1965 | !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && |
1966 | !rt_is_raid1(rs->raid_type)) { | |
1967 | rs->ti->error = "Cannot change device positions in raid set"; | |
1968 | return -EINVAL; | |
1969 | } | |
33e53f06 | 1970 | |
bd83a4c4 | 1971 | DMINFO("raid device #%d now at position #%d", role, r->raid_disk); |
b12d437b JB |
1972 | } |
1973 | ||
1974 | /* | |
1975 | * Partial recovery is performed on | |
1976 | * returning failed devices. | |
1977 | */ | |
33e53f06 | 1978 | if (test_bit(role, (void *) failed_devices)) |
b12d437b JB |
1979 | set_bit(Faulty, &r->flags); |
1980 | } | |
1981 | } | |
1982 | ||
1983 | return 0; | |
1984 | } | |
1985 | ||
0cf45031 | 1986 | static int super_validate(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b | 1987 | { |
0cf45031 | 1988 | struct mddev *mddev = &rs->md; |
33e53f06 HM |
1989 | struct dm_raid_superblock *sb; |
1990 | ||
3a1c1ef2 | 1991 | if (rs_is_raid0(rs) || !rdev->sb_page) |
33e53f06 HM |
1992 | return 0; |
1993 | ||
1994 | sb = page_address(rdev->sb_page); | |
b12d437b JB |
1995 | |
1996 | /* | |
1997 | * If mddev->events is not set, we know we have not yet initialized | |
1998 | * the array. | |
1999 | */ | |
33e53f06 | 2000 | if (!mddev->events && super_init_validation(rs, rdev)) |
b12d437b JB |
2001 | return -EINVAL; |
2002 | ||
ecbfb9f1 HM |
2003 | if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V180 || |
2004 | sb->incompat_features) { | |
2005 | rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; | |
4c9971ca HM |
2006 | return -EINVAL; |
2007 | } | |
2008 | ||
0cf45031 | 2009 | /* Enable bitmap creation for RAID levels != 0 */ |
676fa5ad | 2010 | mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); |
0cf45031 HM |
2011 | rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
2012 | ||
33e53f06 HM |
2013 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
2014 | /* Retrieve device size stored in superblock to be prepared for shrink */ | |
2015 | rdev->sectors = le64_to_cpu(sb->sectors); | |
b12d437b | 2016 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
33e53f06 HM |
2017 | if (rdev->recovery_offset == MaxSector) |
2018 | set_bit(In_sync, &rdev->flags); | |
2019 | /* | |
2020 | * If no reshape in progress -> we're recovering single | |
2021 | * disk(s) and have to set the device(s) to out-of-sync | |
2022 | */ | |
2023 | else if (rs->md.reshape_position == MaxSector) | |
2024 | clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */ | |
b12d437b JB |
2025 | } |
2026 | ||
2027 | /* | |
2028 | * If a device comes back, set it as not In_sync and no longer faulty. | |
2029 | */ | |
33e53f06 HM |
2030 | if (test_and_clear_bit(Faulty, &rdev->flags)) { |
2031 | rdev->recovery_offset = 0; | |
b12d437b JB |
2032 | clear_bit(In_sync, &rdev->flags); |
2033 | rdev->saved_raid_disk = rdev->raid_disk; | |
b12d437b JB |
2034 | } |
2035 | ||
33e53f06 HM |
2036 | /* Reshape support -> restore repective data offsets */ |
2037 | rdev->data_offset = le64_to_cpu(sb->data_offset); | |
2038 | rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); | |
b12d437b JB |
2039 | |
2040 | return 0; | |
2041 | } | |
2042 | ||
2043 | /* | |
2044 | * Analyse superblocks and select the freshest. | |
2045 | */ | |
2046 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |
2047 | { | |
73c6f239 | 2048 | int r; |
0447568f | 2049 | struct raid_dev *dev; |
a9ad8526 | 2050 | struct md_rdev *rdev, *tmp, *freshest; |
fd01b88c | 2051 | struct mddev *mddev = &rs->md; |
b12d437b JB |
2052 | |
2053 | freshest = NULL; | |
a9ad8526 | 2054 | rdev_for_each_safe(rdev, tmp, mddev) { |
761becff | 2055 | /* |
c76d53f4 | 2056 | * Skipping super_load due to CTR_FLAG_SYNC will cause |
761becff | 2057 | * the array to undergo initialization again as |
43157840 | 2058 | * though it were new. This is the intended effect |
761becff JB |
2059 | * of the "sync" directive. |
2060 | * | |
2061 | * When reshaping capability is added, we must ensure | |
2062 | * that the "sync" directive is disallowed during the | |
2063 | * reshape. | |
2064 | */ | |
ad51d7f1 | 2065 | if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags)) |
761becff JB |
2066 | continue; |
2067 | ||
b12d437b JB |
2068 | if (!rdev->meta_bdev) |
2069 | continue; | |
2070 | ||
73c6f239 | 2071 | r = super_load(rdev, freshest); |
b12d437b | 2072 | |
73c6f239 | 2073 | switch (r) { |
b12d437b JB |
2074 | case 1: |
2075 | freshest = rdev; | |
2076 | break; | |
2077 | case 0: | |
2078 | break; | |
2079 | default: | |
0447568f | 2080 | dev = container_of(rdev, struct raid_dev, rdev); |
55ebbb59 JB |
2081 | if (dev->meta_dev) |
2082 | dm_put_device(ti, dev->meta_dev); | |
0447568f | 2083 | |
55ebbb59 JB |
2084 | dev->meta_dev = NULL; |
2085 | rdev->meta_bdev = NULL; | |
0447568f | 2086 | |
55ebbb59 JB |
2087 | if (rdev->sb_page) |
2088 | put_page(rdev->sb_page); | |
0447568f | 2089 | |
55ebbb59 | 2090 | rdev->sb_page = NULL; |
0447568f | 2091 | |
55ebbb59 | 2092 | rdev->sb_loaded = 0; |
0447568f | 2093 | |
55ebbb59 JB |
2094 | /* |
2095 | * We might be able to salvage the data device | |
2096 | * even though the meta device has failed. For | |
2097 | * now, we behave as though '- -' had been | |
2098 | * set for this device in the table. | |
2099 | */ | |
2100 | if (dev->data_dev) | |
2101 | dm_put_device(ti, dev->data_dev); | |
0447568f | 2102 | |
55ebbb59 JB |
2103 | dev->data_dev = NULL; |
2104 | rdev->bdev = NULL; | |
0447568f | 2105 | |
55ebbb59 | 2106 | list_del(&rdev->same_set); |
b12d437b JB |
2107 | } |
2108 | } | |
2109 | ||
2110 | if (!freshest) | |
2111 | return 0; | |
2112 | ||
bd83a4c4 MS |
2113 | if (validate_raid_redundancy(rs)) { |
2114 | rs->ti->error = "Insufficient redundancy to activate array"; | |
2115 | return -EINVAL; | |
2116 | } | |
55ebbb59 | 2117 | |
b12d437b JB |
2118 | /* |
2119 | * Validation of the freshest device provides the source of | |
2120 | * validation for the remaining devices. | |
2121 | */ | |
bd83a4c4 MS |
2122 | if (super_validate(rs, freshest)) { |
2123 | rs->ti->error = "Unable to assemble array: Invalid superblocks"; | |
2124 | return -EINVAL; | |
2125 | } | |
b12d437b | 2126 | |
dafb20fa | 2127 | rdev_for_each(rdev, mddev) |
0cf45031 | 2128 | if ((rdev != freshest) && super_validate(rs, rdev)) |
b12d437b JB |
2129 | return -EINVAL; |
2130 | ||
2131 | return 0; | |
2132 | } | |
2133 | ||
ecbfb9f1 HM |
2134 | /* Userpace reordered disks -> adjust raid_disk indexes in @rs */ |
2135 | static void _reorder_raid_disk_indexes(struct raid_set *rs) | |
2136 | { | |
2137 | int i = 0; | |
2138 | struct md_rdev *rdev; | |
2139 | ||
2140 | rdev_for_each(rdev, &rs->md) { | |
2141 | rdev->raid_disk = i++; | |
2142 | rdev->saved_raid_disk = rdev->new_raid_disk = -1; | |
2143 | } | |
2144 | } | |
2145 | ||
2146 | /* | |
2147 | * Setup @rs for takeover by a different raid level | |
2148 | */ | |
2149 | static int rs_setup_takeover(struct raid_set *rs) | |
2150 | { | |
2151 | struct mddev *mddev = &rs->md; | |
2152 | struct md_rdev *rdev; | |
2153 | unsigned int d = mddev->raid_disks = rs->raid_disks; | |
2154 | sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; | |
2155 | ||
2156 | if (rt_is_raid10(rs->raid_type)) { | |
2157 | if (mddev->level == 0) { | |
2158 | /* Userpace reordered disks -> adjust raid_disk indexes */ | |
2159 | _reorder_raid_disk_indexes(rs); | |
2160 | ||
2161 | /* raid0 -> raid10_far layout */ | |
2162 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, | |
2163 | rs->raid10_copies); | |
2164 | } else if (mddev->level == 1) | |
2165 | /* raid1 -> raid10_near layout */ | |
2166 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, | |
2167 | rs->raid_disks); | |
2168 | else | |
2169 | return -EINVAL; | |
2170 | ||
2171 | } | |
2172 | ||
2173 | clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2174 | mddev->recovery_cp = MaxSector; | |
2175 | ||
2176 | while (d--) { | |
2177 | rdev = &rs->dev[d].rdev; | |
2178 | ||
2179 | if (test_bit(d, (void *) rs->rebuild_disks)) { | |
2180 | clear_bit(In_sync, &rdev->flags); | |
2181 | clear_bit(Faulty, &rdev->flags); | |
2182 | mddev->recovery_cp = rdev->recovery_offset = 0; | |
2183 | /* Bitmap has to be created when we do an "up" takeover */ | |
2184 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2185 | } | |
2186 | ||
2187 | rdev->new_data_offset = new_data_offset; | |
2188 | } | |
2189 | ||
ecbfb9f1 HM |
2190 | return 0; |
2191 | } | |
2192 | ||
75b8e04b | 2193 | /* |
48cf06bc HM |
2194 | * Enable/disable discard support on RAID set depending on |
2195 | * RAID level and discard properties of underlying RAID members. | |
75b8e04b | 2196 | */ |
ecbfb9f1 | 2197 | static void configure_discard_support(struct raid_set *rs) |
75b8e04b | 2198 | { |
48cf06bc HM |
2199 | int i; |
2200 | bool raid456; | |
ecbfb9f1 | 2201 | struct dm_target *ti = rs->ti; |
48cf06bc | 2202 | |
75b8e04b HM |
2203 | /* Assume discards not supported until after checks below. */ |
2204 | ti->discards_supported = false; | |
2205 | ||
2206 | /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ | |
48cf06bc | 2207 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); |
75b8e04b | 2208 | |
48cf06bc | 2209 | for (i = 0; i < rs->md.raid_disks; i++) { |
d20c4b08 | 2210 | struct request_queue *q; |
48cf06bc | 2211 | |
d20c4b08 HM |
2212 | if (!rs->dev[i].rdev.bdev) |
2213 | continue; | |
2214 | ||
2215 | q = bdev_get_queue(rs->dev[i].rdev.bdev); | |
48cf06bc HM |
2216 | if (!q || !blk_queue_discard(q)) |
2217 | return; | |
2218 | ||
2219 | if (raid456) { | |
2220 | if (!q->limits.discard_zeroes_data) | |
2221 | return; | |
2222 | if (!devices_handle_discard_safely) { | |
2223 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); | |
2224 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); | |
2225 | return; | |
2226 | } | |
2227 | } | |
2228 | } | |
2229 | ||
2230 | /* All RAID members properly support discards */ | |
75b8e04b HM |
2231 | ti->discards_supported = true; |
2232 | ||
2233 | /* | |
2234 | * RAID1 and RAID10 personalities require bio splitting, | |
48cf06bc | 2235 | * RAID0/4/5/6 don't and process large discard bios properly. |
75b8e04b | 2236 | */ |
48cf06bc | 2237 | ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); |
75b8e04b HM |
2238 | ti->num_discard_bios = 1; |
2239 | } | |
2240 | ||
9d09e663 | 2241 | /* |
73c6f239 | 2242 | * Construct a RAID0/1/10/4/5/6 mapping: |
9d09e663 | 2243 | * Args: |
43157840 MS |
2244 | * <raid_type> <#raid_params> <raid_params>{0,} \ |
2245 | * <#raid_devs> [<meta_dev1> <dev1>]{1,} | |
9d09e663 | 2246 | * |
43157840 | 2247 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for |
9d09e663 | 2248 | * details on possible <raid_params>. |
73c6f239 HM |
2249 | * |
2250 | * Userspace is free to initialize the metadata devices, hence the superblocks to | |
2251 | * enforce recreation based on the passed in table parameters. | |
2252 | * | |
9d09e663 N |
2253 | */ |
2254 | static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |
2255 | { | |
73c6f239 | 2256 | int r; |
9d09e663 | 2257 | struct raid_type *rt; |
92c83d79 | 2258 | unsigned num_raid_params, num_raid_devs; |
9d09e663 | 2259 | struct raid_set *rs = NULL; |
92c83d79 HM |
2260 | const char *arg; |
2261 | struct dm_arg_set as = { argc, argv }, as_nrd; | |
2262 | struct dm_arg _args[] = { | |
2263 | { 0, as.argc, "Cannot understand number of raid parameters" }, | |
2264 | { 1, 254, "Cannot understand number of raid devices parameters" } | |
2265 | }; | |
2266 | ||
2267 | /* Must have <raid_type> */ | |
2268 | arg = dm_shift_arg(&as); | |
bd83a4c4 MS |
2269 | if (!arg) { |
2270 | ti->error = "No arguments"; | |
2271 | return -EINVAL; | |
2272 | } | |
9d09e663 | 2273 | |
92c83d79 | 2274 | rt = get_raid_type(arg); |
bd83a4c4 MS |
2275 | if (!rt) { |
2276 | ti->error = "Unrecognised raid_type"; | |
2277 | return -EINVAL; | |
2278 | } | |
9d09e663 | 2279 | |
92c83d79 HM |
2280 | /* Must have <#raid_params> */ |
2281 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) | |
43157840 | 2282 | return -EINVAL; |
9d09e663 | 2283 | |
92c83d79 HM |
2284 | /* number of raid device tupples <meta_dev data_dev> */ |
2285 | as_nrd = as; | |
2286 | dm_consume_args(&as_nrd, num_raid_params); | |
2287 | _args[1].max = (as_nrd.argc - 1) / 2; | |
2288 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) | |
43157840 | 2289 | return -EINVAL; |
9d09e663 | 2290 | |
bd83a4c4 MS |
2291 | if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) { |
2292 | ti->error = "Invalid number of supplied raid devices"; | |
2293 | return -EINVAL; | |
2294 | } | |
3ca5a21a | 2295 | |
92c83d79 | 2296 | rs = context_alloc(ti, rt, num_raid_devs); |
9d09e663 N |
2297 | if (IS_ERR(rs)) |
2298 | return PTR_ERR(rs); | |
2299 | ||
92c83d79 | 2300 | r = parse_raid_params(rs, &as, num_raid_params); |
73c6f239 | 2301 | if (r) |
9d09e663 N |
2302 | goto bad; |
2303 | ||
702108d1 | 2304 | r = parse_dev_params(rs, &as); |
73c6f239 | 2305 | if (r) |
9d09e663 N |
2306 | goto bad; |
2307 | ||
b12d437b | 2308 | rs->md.sync_super = super_sync; |
ecbfb9f1 HM |
2309 | |
2310 | /* | |
2311 | * Backup any new raid set level, layout, ... | |
2312 | * requested to be able to compare to superblock | |
2313 | * members for conversion decisions. | |
2314 | */ | |
2315 | rs_config_backup(rs); | |
2316 | ||
73c6f239 HM |
2317 | r = analyse_superblocks(ti, rs); |
2318 | if (r) | |
b12d437b JB |
2319 | goto bad; |
2320 | ||
9d09e663 | 2321 | INIT_WORK(&rs->md.event_work, do_table_event); |
9d09e663 | 2322 | ti->private = rs; |
55a62eef | 2323 | ti->num_flush_bios = 1; |
9d09e663 | 2324 | |
ecbfb9f1 HM |
2325 | /* Restore any requested new layout for conversion decision */ |
2326 | rs_config_restore(rs); | |
2327 | ||
75b8e04b | 2328 | /* |
ecbfb9f1 HM |
2329 | * If a takeover is needed, just set the level to |
2330 | * the new requested one and allow the raid set to run. | |
75b8e04b | 2331 | */ |
ecbfb9f1 HM |
2332 | if (rs_takeover_requested(rs)) { |
2333 | r = rs_check_takeover(rs); | |
2334 | if (r) | |
2335 | return r; | |
2336 | ||
2337 | r = rs_setup_takeover(rs); | |
2338 | if (r) | |
2339 | return r; | |
2340 | ||
3a1c1ef2 | 2341 | /* Tell preresume to update superblocks with new layout */ |
ecbfb9f1 | 2342 | _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
3a1c1ef2 HM |
2343 | rs_set_new(rs); |
2344 | } else | |
2345 | rs_set_cur(rs); | |
ecbfb9f1 HM |
2346 | |
2347 | /* Start raid set read-only and assumed clean to change in raid_resume() */ | |
2348 | rs->md.ro = 1; | |
2349 | rs->md.in_sync = 1; | |
2350 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); | |
75b8e04b | 2351 | |
0cf45031 HM |
2352 | /* Has to be held on running the array */ |
2353 | mddev_lock_nointr(&rs->md); | |
73c6f239 | 2354 | r = md_run(&rs->md); |
9d09e663 | 2355 | rs->md.in_sync = 0; /* Assume already marked dirty */ |
0cf45031 | 2356 | mddev_unlock(&rs->md); |
9d09e663 | 2357 | |
73c6f239 | 2358 | if (r) { |
9d09e663 N |
2359 | ti->error = "Fail to run raid array"; |
2360 | goto bad; | |
2361 | } | |
2362 | ||
63f33b8d | 2363 | if (ti->len != rs->md.array_sectors) { |
bd83a4c4 MS |
2364 | ti->error = "Array size does not match requested target length"; |
2365 | r = -EINVAL; | |
63f33b8d JB |
2366 | goto size_mismatch; |
2367 | } | |
9d09e663 | 2368 | rs->callbacks.congested_fn = raid_is_congested; |
9d09e663 N |
2369 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
2370 | ||
32737279 | 2371 | mddev_suspend(&rs->md); |
9d09e663 N |
2372 | return 0; |
2373 | ||
63f33b8d JB |
2374 | size_mismatch: |
2375 | md_stop(&rs->md); | |
9d09e663 N |
2376 | bad: |
2377 | context_free(rs); | |
2378 | ||
73c6f239 | 2379 | return r; |
9d09e663 N |
2380 | } |
2381 | ||
2382 | static void raid_dtr(struct dm_target *ti) | |
2383 | { | |
2384 | struct raid_set *rs = ti->private; | |
2385 | ||
2386 | list_del_init(&rs->callbacks.list); | |
2387 | md_stop(&rs->md); | |
2388 | context_free(rs); | |
2389 | } | |
2390 | ||
7de3ee57 | 2391 | static int raid_map(struct dm_target *ti, struct bio *bio) |
9d09e663 N |
2392 | { |
2393 | struct raid_set *rs = ti->private; | |
fd01b88c | 2394 | struct mddev *mddev = &rs->md; |
9d09e663 N |
2395 | |
2396 | mddev->pers->make_request(mddev, bio); | |
2397 | ||
2398 | return DM_MAPIO_SUBMITTED; | |
2399 | } | |
2400 | ||
3a1c1ef2 | 2401 | /* Return string describing the current sync action of @mddev */ |
be83651f JB |
2402 | static const char *decipher_sync_action(struct mddev *mddev) |
2403 | { | |
2404 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | |
2405 | return "frozen"; | |
2406 | ||
2407 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2408 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { | |
2409 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | |
2410 | return "reshape"; | |
2411 | ||
2412 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | |
2413 | if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | |
2414 | return "resync"; | |
2415 | else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) | |
2416 | return "check"; | |
2417 | return "repair"; | |
2418 | } | |
2419 | ||
2420 | if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) | |
2421 | return "recover"; | |
2422 | } | |
2423 | ||
2424 | return "idle"; | |
2425 | } | |
2426 | ||
3a1c1ef2 HM |
2427 | /* |
2428 | * Return status string @rdev | |
2429 | * | |
2430 | * Status characters: | |
2431 | * | |
2432 | * 'D' = Dead/Failed device | |
2433 | * 'a' = Alive but not in-sync | |
2434 | * 'A' = Alive and in-sync | |
2435 | */ | |
2436 | static const char *_raid_dev_status(struct md_rdev *rdev, bool array_in_sync) | |
9d09e663 | 2437 | { |
3a1c1ef2 HM |
2438 | if (test_bit(Faulty, &rdev->flags)) |
2439 | return "D"; | |
2440 | else if (!array_in_sync || !test_bit(In_sync, &rdev->flags)) | |
2441 | return "a"; | |
2442 | else | |
2443 | return "A"; | |
2444 | } | |
9d09e663 | 2445 | |
3a1c1ef2 HM |
2446 | /* Helper to return resync/reshape progress for @rs and @array_in_sync */ |
2447 | static sector_t rs_get_progress(struct raid_set *rs, | |
2448 | sector_t resync_max_sectors, bool *array_in_sync) | |
2449 | { | |
2450 | sector_t r, recovery_cp, curr_resync_completed; | |
2451 | struct mddev *mddev = &rs->md; | |
9d09e663 | 2452 | |
3a1c1ef2 HM |
2453 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; |
2454 | recovery_cp = mddev->recovery_cp; | |
2455 | *array_in_sync = false; | |
2456 | ||
2457 | if (rs_is_raid0(rs)) { | |
2458 | r = resync_max_sectors; | |
2459 | *array_in_sync = true; | |
2460 | ||
2461 | } else { | |
2462 | r = mddev->reshape_position; | |
2463 | ||
2464 | /* Reshape is relative to the array size */ | |
2465 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || | |
2466 | r != MaxSector) { | |
2467 | if (r == MaxSector) { | |
2468 | *array_in_sync = true; | |
2469 | r = resync_max_sectors; | |
0cf45031 | 2470 | } else { |
3a1c1ef2 HM |
2471 | /* Got to reverse on backward reshape */ |
2472 | if (mddev->reshape_backwards) | |
2473 | r = mddev->array_sectors - r; | |
2474 | ||
2475 | /* Devide by # of data stripes */ | |
2476 | sector_div(r, mddev_data_stripes(rs)); | |
0cf45031 | 2477 | } |
3a1c1ef2 HM |
2478 | |
2479 | /* Sync is relative to the component device size */ | |
2480 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | |
2481 | r = curr_resync_completed; | |
2482 | else | |
2483 | r = recovery_cp; | |
2484 | ||
2485 | if (r == MaxSector) { | |
2486 | /* | |
2487 | * Sync complete. | |
2488 | */ | |
2489 | *array_in_sync = true; | |
2490 | r = resync_max_sectors; | |
2491 | } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | |
2492 | /* | |
2493 | * If "check" or "repair" is occurring, the raid set has | |
2494 | * undergone an initial sync and the health characters | |
2495 | * should not be 'a' anymore. | |
2496 | */ | |
2497 | *array_in_sync = true; | |
0cf45031 | 2498 | } else { |
3a1c1ef2 | 2499 | struct md_rdev *rdev; |
be83651f | 2500 | |
3a1c1ef2 HM |
2501 | /* |
2502 | * The raid set may be doing an initial sync, or it may | |
43157840 | 2503 | * be rebuilding individual components. If all the |
3a1c1ef2 HM |
2504 | * devices are In_sync, then it is the raid set that is |
2505 | * being initialized. | |
2506 | */ | |
2507 | rdev_for_each(rdev, mddev) | |
2508 | if (!test_bit(In_sync, &rdev->flags)) | |
2509 | *array_in_sync = true; | |
2510 | #if 0 | |
2511 | r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */ | |
2512 | #endif | |
2e727c3c | 2513 | } |
3a1c1ef2 HM |
2514 | } |
2515 | ||
2516 | return r; | |
2517 | } | |
2518 | ||
2519 | /* Helper to return @dev name or "-" if !@dev */ | |
2520 | static const char *_get_dev_name(struct dm_dev *dev) | |
2521 | { | |
2522 | return dev ? dev->name : "-"; | |
2523 | } | |
2524 | ||
2525 | static void raid_status(struct dm_target *ti, status_type_t type, | |
2526 | unsigned int status_flags, char *result, unsigned int maxlen) | |
2527 | { | |
2528 | struct raid_set *rs = ti->private; | |
2529 | struct mddev *mddev = &rs->md; | |
2530 | struct r5conf *conf = mddev->private; | |
2531 | int max_nr_stripes = conf ? conf->max_nr_stripes : 0; | |
2532 | bool array_in_sync; | |
2533 | unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ | |
2534 | unsigned int sz = 0; | |
2535 | unsigned int write_mostly_params = 0; | |
2536 | sector_t progress, resync_max_sectors, resync_mismatches; | |
2537 | const char *sync_action; | |
2538 | struct raid_type *rt; | |
2539 | struct md_rdev *rdev; | |
2540 | ||
2541 | switch (type) { | |
2542 | case STATUSTYPE_INFO: | |
2543 | /* *Should* always succeed */ | |
2544 | rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); | |
2545 | if (!rt) | |
2546 | return; | |
2547 | ||
2548 | DMEMIT("%s %d ", rt ? rt->name : "unknown", mddev->raid_disks); | |
2549 | ||
2550 | /* Access most recent mddev properties for status output */ | |
2551 | smp_rmb(); | |
2552 | /* Get sensible max sectors even if raid set not yet started */ | |
2553 | resync_max_sectors = _test_flag(RT_FLAG_RS_PRERESUMED, rs->runtime_flags) ? | |
2554 | mddev->resync_max_sectors : mddev->dev_sectors; | |
2555 | progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); | |
2556 | resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? | |
2557 | (unsigned int) atomic64_read(&mddev->resync_mismatches) : 0; | |
2558 | sync_action = decipher_sync_action(&rs->md); | |
2559 | ||
2560 | /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */ | |
2561 | rdev_for_each(rdev, mddev) | |
2562 | DMEMIT(_raid_dev_status(rdev, array_in_sync)); | |
9d09e663 | 2563 | |
2e727c3c | 2564 | /* |
3a1c1ef2 | 2565 | * In-sync/Reshape ratio: |
2e727c3c | 2566 | * The in-sync ratio shows the progress of: |
3a1c1ef2 HM |
2567 | * - Initializing the raid set |
2568 | * - Rebuilding a subset of devices of the raid set | |
2e727c3c JB |
2569 | * The user can distinguish between the two by referring |
2570 | * to the status characters. | |
3a1c1ef2 HM |
2571 | * |
2572 | * The reshape ratio shows the progress of | |
2573 | * changing the raid layout or the number of | |
2574 | * disks of a raid set | |
2e727c3c | 2575 | */ |
3a1c1ef2 HM |
2576 | DMEMIT(" %llu/%llu", (unsigned long long) progress, |
2577 | (unsigned long long) resync_max_sectors); | |
9d09e663 | 2578 | |
be83651f | 2579 | /* |
3a1c1ef2 HM |
2580 | * v1.5.0+: |
2581 | * | |
be83651f | 2582 | * Sync action: |
3a1c1ef2 | 2583 | * See Documentation/device-mapper/dm-raid.txt for |
be83651f JB |
2584 | * information on each of these states. |
2585 | */ | |
3a1c1ef2 | 2586 | DMEMIT(" %s", sync_action); |
be83651f JB |
2587 | |
2588 | /* | |
3a1c1ef2 HM |
2589 | * v1.5.0+: |
2590 | * | |
be83651f JB |
2591 | * resync_mismatches/mismatch_cnt |
2592 | * This field shows the number of discrepancies found when | |
3a1c1ef2 | 2593 | * performing a "check" of the raid set. |
be83651f | 2594 | */ |
3a1c1ef2 | 2595 | DMEMIT(" %llu", (unsigned long long) resync_mismatches); |
9d09e663 | 2596 | |
3a1c1ef2 HM |
2597 | /* |
2598 | * v1.8.0+: | |
2599 | * | |
2600 | * data_offset (needed for out of space reshaping) | |
2601 | * This field shows the data offset into the data | |
2602 | * image LV where the first stripes data starts. | |
2603 | * | |
2604 | * We keep data_offset equal on all raid disks of the set, | |
2605 | * so retrieving it from the first raid disk is sufficient. | |
2606 | */ | |
2607 | DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); | |
2608 | break; | |
9d09e663 | 2609 | |
3a1c1ef2 HM |
2610 | case STATUSTYPE_TABLE: |
2611 | /* Report the table line string you would use to construct this raid set */ | |
2612 | ||
2613 | /* Calculate raid parameter count */ | |
2614 | rdev_for_each(rdev, mddev) | |
2615 | if (test_bit(WriteMostly, &rdev->flags)) | |
2616 | write_mostly_params += 2; | |
2617 | raid_param_cnt += memweight(rs->rebuild_disks, | |
2618 | DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks)) * 2 + | |
2619 | write_mostly_params + | |
2620 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + | |
2621 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; | |
2622 | /* Emit table line */ | |
2623 | DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); | |
2624 | if (_test_flag(CTR_FLAG_RAID10_FORMAT, rs->ctr_flags)) | |
2625 | DMEMIT(" %s %s", _argname_by_flag(CTR_FLAG_RAID10_FORMAT), | |
2626 | raid10_md_layout_to_format(mddev->layout)); | |
2627 | if (_test_flag(CTR_FLAG_RAID10_COPIES, rs->ctr_flags)) | |
2628 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_RAID10_COPIES), | |
2629 | raid10_md_layout_to_copies(mddev->layout)); | |
ad51d7f1 | 2630 | if (_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) |
3a1c1ef2 HM |
2631 | DMEMIT(" %s", _argname_by_flag(CTR_FLAG_NOSYNC)); |
2632 | if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags)) | |
2633 | DMEMIT(" %s", _argname_by_flag(CTR_FLAG_SYNC)); | |
2634 | if (_test_flag(CTR_FLAG_REGION_SIZE, rs->ctr_flags)) | |
2635 | DMEMIT(" %s %llu", _argname_by_flag(CTR_FLAG_REGION_SIZE), | |
2636 | (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); | |
2637 | if (_test_flag(CTR_FLAG_DATA_OFFSET, rs->ctr_flags)) | |
2638 | DMEMIT(" %s %llu", _argname_by_flag(CTR_FLAG_DATA_OFFSET), | |
2639 | (unsigned long long) rs->data_offset); | |
ad51d7f1 | 2640 | if (_test_flag(CTR_FLAG_DAEMON_SLEEP, rs->ctr_flags)) |
3a1c1ef2 HM |
2641 | DMEMIT(" %s %lu", _argname_by_flag(CTR_FLAG_DAEMON_SLEEP), |
2642 | mddev->bitmap_info.daemon_sleep); | |
2643 | if (_test_flag(CTR_FLAG_DELTA_DISKS, rs->ctr_flags)) | |
2644 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_DELTA_DISKS), | |
2645 | mddev->delta_disks); | |
2646 | if (_test_flag(CTR_FLAG_STRIPE_CACHE, rs->ctr_flags)) | |
2647 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_STRIPE_CACHE), | |
2648 | max_nr_stripes); | |
2649 | rdev_for_each(rdev, mddev) | |
2650 | if (test_bit(rdev->raid_disk, (void *) rs->rebuild_disks)) | |
2651 | DMEMIT(" %s %u", _argname_by_flag(CTR_FLAG_REBUILD), | |
2652 | rdev->raid_disk); | |
2653 | rdev_for_each(rdev, mddev) | |
2654 | if (test_bit(WriteMostly, &rdev->flags)) | |
2655 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_WRITE_MOSTLY), | |
2656 | rdev->raid_disk); | |
ad51d7f1 | 2657 | if (_test_flag(CTR_FLAG_MAX_WRITE_BEHIND, rs->ctr_flags)) |
3a1c1ef2 HM |
2658 | DMEMIT(" %s %lu", _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND), |
2659 | mddev->bitmap_info.max_write_behind); | |
2660 | if (_test_flag(CTR_FLAG_MAX_RECOVERY_RATE, rs->ctr_flags)) | |
2661 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE), | |
2662 | mddev->sync_speed_max); | |
2663 | if (_test_flag(CTR_FLAG_MIN_RECOVERY_RATE, rs->ctr_flags)) | |
2664 | DMEMIT(" %s %d", _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE), | |
2665 | mddev->sync_speed_min); | |
2666 | DMEMIT(" %d", rs->raid_disks); | |
2667 | rdev_for_each(rdev, mddev) { | |
2668 | struct raid_dev *rd = container_of(rdev, struct raid_dev, rdev); | |
2669 | ||
2670 | DMEMIT(" %s %s", _get_dev_name(rd->meta_dev), | |
2671 | _get_dev_name(rd->data_dev)); | |
9d09e663 N |
2672 | } |
2673 | } | |
9d09e663 N |
2674 | } |
2675 | ||
be83651f JB |
2676 | static int raid_message(struct dm_target *ti, unsigned argc, char **argv) |
2677 | { | |
2678 | struct raid_set *rs = ti->private; | |
2679 | struct mddev *mddev = &rs->md; | |
2680 | ||
2681 | if (!strcasecmp(argv[0], "reshape")) { | |
2682 | DMERR("Reshape not supported."); | |
2683 | return -EINVAL; | |
2684 | } | |
2685 | ||
2686 | if (!mddev->pers || !mddev->pers->sync_request) | |
2687 | return -EINVAL; | |
2688 | ||
2689 | if (!strcasecmp(argv[0], "frozen")) | |
2690 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2691 | else | |
2692 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2693 | ||
2694 | if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { | |
2695 | if (mddev->sync_thread) { | |
2696 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | |
2697 | md_reap_sync_thread(mddev); | |
2698 | } | |
2699 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2700 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | |
2701 | return -EBUSY; | |
2702 | else if (!strcasecmp(argv[0], "resync")) | |
3a1c1ef2 HM |
2703 | ; /* MD_RECOVERY_NEEDED set below */ |
2704 | else if (!strcasecmp(argv[0], "recover")) | |
be83651f | 2705 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
3a1c1ef2 | 2706 | else { |
be83651f JB |
2707 | if (!strcasecmp(argv[0], "check")) |
2708 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); | |
2709 | else if (!!strcasecmp(argv[0], "repair")) | |
2710 | return -EINVAL; | |
2711 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | |
2712 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | |
2713 | } | |
2714 | if (mddev->ro == 2) { | |
2715 | /* A write to sync_action is enough to justify | |
2716 | * canceling read-auto mode | |
2717 | */ | |
2718 | mddev->ro = 0; | |
3a1c1ef2 | 2719 | if (!mddev->suspended && mddev->sync_thread) |
be83651f JB |
2720 | md_wakeup_thread(mddev->sync_thread); |
2721 | } | |
2722 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
3a1c1ef2 | 2723 | if (!mddev->suspended && mddev->thread) |
be83651f JB |
2724 | md_wakeup_thread(mddev->thread); |
2725 | ||
2726 | return 0; | |
2727 | } | |
2728 | ||
2729 | static int raid_iterate_devices(struct dm_target *ti, | |
2730 | iterate_devices_callout_fn fn, void *data) | |
9d09e663 N |
2731 | { |
2732 | struct raid_set *rs = ti->private; | |
2733 | unsigned i; | |
73c6f239 | 2734 | int r = 0; |
9d09e663 | 2735 | |
73c6f239 | 2736 | for (i = 0; !r && i < rs->md.raid_disks; i++) |
9d09e663 | 2737 | if (rs->dev[i].data_dev) |
73c6f239 | 2738 | r = fn(ti, |
9d09e663 N |
2739 | rs->dev[i].data_dev, |
2740 | 0, /* No offset on data devs */ | |
2741 | rs->md.dev_sectors, | |
2742 | data); | |
2743 | ||
73c6f239 | 2744 | return r; |
9d09e663 N |
2745 | } |
2746 | ||
2747 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
2748 | { | |
2749 | struct raid_set *rs = ti->private; | |
2750 | unsigned chunk_size = rs->md.chunk_sectors << 9; | |
d1688a6d | 2751 | struct r5conf *conf = rs->md.private; |
9d09e663 N |
2752 | |
2753 | blk_limits_io_min(limits, chunk_size); | |
2754 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); | |
2755 | } | |
2756 | ||
2757 | static void raid_presuspend(struct dm_target *ti) | |
2758 | { | |
2759 | struct raid_set *rs = ti->private; | |
2760 | ||
2761 | md_stop_writes(&rs->md); | |
2762 | } | |
2763 | ||
2764 | static void raid_postsuspend(struct dm_target *ti) | |
2765 | { | |
2766 | struct raid_set *rs = ti->private; | |
2767 | ||
2768 | mddev_suspend(&rs->md); | |
2769 | } | |
2770 | ||
f381e71b | 2771 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
9d09e663 | 2772 | { |
9092c02d JB |
2773 | int i; |
2774 | uint64_t failed_devices, cleared_failed_devices = 0; | |
2775 | unsigned long flags; | |
2776 | struct dm_raid_superblock *sb; | |
9092c02d | 2777 | struct md_rdev *r; |
9d09e663 | 2778 | |
f381e71b JB |
2779 | for (i = 0; i < rs->md.raid_disks; i++) { |
2780 | r = &rs->dev[i].rdev; | |
2781 | if (test_bit(Faulty, &r->flags) && r->sb_page && | |
796a5cf0 MC |
2782 | sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, |
2783 | 1)) { | |
f381e71b JB |
2784 | DMINFO("Faulty %s device #%d has readable super block." |
2785 | " Attempting to revive it.", | |
2786 | rs->raid_type->name, i); | |
a4dc163a JB |
2787 | |
2788 | /* | |
2789 | * Faulty bit may be set, but sometimes the array can | |
2790 | * be suspended before the personalities can respond | |
2791 | * by removing the device from the array (i.e. calling | |
43157840 | 2792 | * 'hot_remove_disk'). If they haven't yet removed |
a4dc163a JB |
2793 | * the failed device, its 'raid_disk' number will be |
2794 | * '>= 0' - meaning we must call this function | |
2795 | * ourselves. | |
2796 | */ | |
2797 | if ((r->raid_disk >= 0) && | |
2798 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | |
2799 | /* Failed to revive this device, try next */ | |
2800 | continue; | |
2801 | ||
f381e71b JB |
2802 | r->raid_disk = i; |
2803 | r->saved_raid_disk = i; | |
2804 | flags = r->flags; | |
2805 | clear_bit(Faulty, &r->flags); | |
2806 | clear_bit(WriteErrorSeen, &r->flags); | |
2807 | clear_bit(In_sync, &r->flags); | |
2808 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | |
2809 | r->raid_disk = -1; | |
2810 | r->saved_raid_disk = -1; | |
2811 | r->flags = flags; | |
2812 | } else { | |
2813 | r->recovery_offset = 0; | |
2814 | cleared_failed_devices |= 1 << i; | |
2815 | } | |
2816 | } | |
2817 | } | |
2818 | if (cleared_failed_devices) { | |
2819 | rdev_for_each(r, &rs->md) { | |
2820 | sb = page_address(r->sb_page); | |
2821 | failed_devices = le64_to_cpu(sb->failed_devices); | |
2822 | failed_devices &= ~cleared_failed_devices; | |
2823 | sb->failed_devices = cpu_to_le64(failed_devices); | |
2824 | } | |
2825 | } | |
2826 | } | |
2827 | ||
ecbfb9f1 HM |
2828 | /* Load the dirty region bitmap */ |
2829 | static int _bitmap_load(struct raid_set *rs) | |
2830 | { | |
2831 | int r = 0; | |
2832 | ||
2833 | /* Try loading the bitmap unless "raid0", which does not have one */ | |
2834 | if (!rs_is_raid0(rs) && | |
2835 | !_test_and_set_flag(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { | |
2836 | r = bitmap_load(&rs->md); | |
2837 | if (r) | |
2838 | DMERR("Failed to load bitmap"); | |
2839 | } | |
2840 | ||
2841 | return r; | |
2842 | } | |
2843 | ||
2844 | static int raid_preresume(struct dm_target *ti) | |
2845 | { | |
2846 | struct raid_set *rs = ti->private; | |
2847 | struct mddev *mddev = &rs->md; | |
2848 | ||
2849 | /* This is a resume after a suspend of the set -> it's already started */ | |
2850 | if (_test_and_set_flag(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) | |
2851 | return 0; | |
2852 | ||
2853 | /* | |
2854 | * The superblocks need to be updated on disk if the | |
2855 | * array is new or _bitmap_load will overwrite them | |
2856 | * in core with old data. | |
2857 | * | |
2858 | * In case the array got modified (takeover/reshape/resize) | |
2859 | * or the data offsets on the component devices changed, they | |
2860 | * have to be updated as well. | |
2861 | * | |
2862 | * Have to switch to readwrite and back in order to | |
2863 | * allow for the superblock updates. | |
2864 | */ | |
2865 | if (_test_and_clear_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) { | |
2866 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
2867 | mddev->ro = 0; | |
2868 | md_update_sb(mddev, 1); | |
2869 | mddev->ro = 1; | |
2870 | } | |
2871 | ||
2872 | /* | |
2873 | * Disable/enable discard support on raid set after any | |
2874 | * conversion, because devices can have been added | |
2875 | */ | |
2876 | configure_discard_support(rs); | |
2877 | ||
2878 | /* Load the bitmap from disk unless raid0 */ | |
2879 | return _bitmap_load(rs); | |
2880 | } | |
2881 | ||
f381e71b JB |
2882 | static void raid_resume(struct dm_target *ti) |
2883 | { | |
2884 | struct raid_set *rs = ti->private; | |
ecbfb9f1 | 2885 | struct mddev *mddev = &rs->md; |
f381e71b | 2886 | |
ecbfb9f1 HM |
2887 | if (_test_and_set_flag(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { |
2888 | /* | |
2889 | * A secondary resume while the device is active. | |
2890 | * Take this opportunity to check whether any failed | |
2891 | * devices are reachable again. | |
2892 | */ | |
2893 | attempt_restore_of_faulty_devices(rs); | |
47525e59 | 2894 | } |
34f8ac6d | 2895 | |
ecbfb9f1 | 2896 | mddev->ro = 0; |
3a1c1ef2 HM |
2897 | mddev->in_sync = 0; |
2898 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2899 | ||
ecbfb9f1 HM |
2900 | if (mddev->suspended) |
2901 | mddev_resume(mddev); | |
9d09e663 N |
2902 | } |
2903 | ||
2904 | static struct target_type raid_target = { | |
2905 | .name = "raid", | |
702108d1 | 2906 | .version = {1, 8, 1}, |
9d09e663 N |
2907 | .module = THIS_MODULE, |
2908 | .ctr = raid_ctr, | |
2909 | .dtr = raid_dtr, | |
2910 | .map = raid_map, | |
2911 | .status = raid_status, | |
be83651f | 2912 | .message = raid_message, |
9d09e663 N |
2913 | .iterate_devices = raid_iterate_devices, |
2914 | .io_hints = raid_io_hints, | |
2915 | .presuspend = raid_presuspend, | |
2916 | .postsuspend = raid_postsuspend, | |
ecbfb9f1 | 2917 | .preresume = raid_preresume, |
9d09e663 N |
2918 | .resume = raid_resume, |
2919 | }; | |
2920 | ||
2921 | static int __init dm_raid_init(void) | |
2922 | { | |
fe5d2f4a JB |
2923 | DMINFO("Loading target version %u.%u.%u", |
2924 | raid_target.version[0], | |
2925 | raid_target.version[1], | |
2926 | raid_target.version[2]); | |
9d09e663 N |
2927 | return dm_register_target(&raid_target); |
2928 | } | |
2929 | ||
2930 | static void __exit dm_raid_exit(void) | |
2931 | { | |
2932 | dm_unregister_target(&raid_target); | |
2933 | } | |
2934 | ||
2935 | module_init(dm_raid_init); | |
2936 | module_exit(dm_raid_exit); | |
2937 | ||
48cf06bc HM |
2938 | module_param(devices_handle_discard_safely, bool, 0644); |
2939 | MODULE_PARM_DESC(devices_handle_discard_safely, | |
2940 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); | |
2941 | ||
9d09e663 | 2942 | MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); |
63f33b8d JB |
2943 | MODULE_ALIAS("dm-raid1"); |
2944 | MODULE_ALIAS("dm-raid10"); | |
9d09e663 N |
2945 | MODULE_ALIAS("dm-raid4"); |
2946 | MODULE_ALIAS("dm-raid5"); | |
2947 | MODULE_ALIAS("dm-raid6"); | |
2948 | MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); | |
3a1c1ef2 | 2949 | MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); |
9d09e663 | 2950 | MODULE_LICENSE("GPL"); |