]>
Commit | Line | Data |
---|---|---|
9d09e663 N |
1 | /* |
2 | * Copyright (C) 2010-2011 Neil Brown | |
702108d1 | 3 | * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. |
9d09e663 N |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/slab.h> | |
056075c7 | 9 | #include <linux/module.h> |
9d09e663 N |
10 | |
11 | #include "md.h" | |
32737279 | 12 | #include "raid1.h" |
9d09e663 | 13 | #include "raid5.h" |
63f33b8d | 14 | #include "raid10.h" |
9d09e663 N |
15 | #include "bitmap.h" |
16 | ||
3e8dbb7f AK |
17 | #include <linux/device-mapper.h> |
18 | ||
9d09e663 | 19 | #define DM_MSG_PREFIX "raid" |
92c83d79 | 20 | #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ |
9d09e663 | 21 | |
48cf06bc HM |
22 | static bool devices_handle_discard_safely = false; |
23 | ||
9d09e663 | 24 | /* |
b12d437b JB |
25 | * The following flags are used by dm-raid.c to set up the array state. |
26 | * They must be cleared before md_run is called. | |
9d09e663 | 27 | */ |
43157840 | 28 | #define FirstUse 10 /* rdev flag */ |
9d09e663 N |
29 | |
30 | struct raid_dev { | |
31 | /* | |
32 | * Two DM devices, one to hold metadata and one to hold the | |
43157840 | 33 | * actual data/parity. The reason for this is to not confuse |
9d09e663 N |
34 | * ti->len and give more flexibility in altering size and |
35 | * characteristics. | |
36 | * | |
37 | * While it is possible for this device to be associated | |
38 | * with a different physical device than the data_dev, it | |
39 | * is intended for it to be the same. | |
40 | * |--------- Physical Device ---------| | |
41 | * |- meta_dev -|------ data_dev ------| | |
42 | */ | |
43 | struct dm_dev *meta_dev; | |
44 | struct dm_dev *data_dev; | |
3cb03002 | 45 | struct md_rdev rdev; |
9d09e663 N |
46 | }; |
47 | ||
48 | /* | |
4286325b | 49 | * Bits for establishing rs->ctr_flags |
702108d1 HM |
50 | * |
51 | * 1 = no flag value | |
52 | * 2 = flag with value | |
9d09e663 | 53 | */ |
4286325b MS |
54 | #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */ |
55 | #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */ | |
56 | #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */ | |
57 | #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */ | |
58 | #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */ | |
59 | #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */ | |
60 | #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */ | |
61 | #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */ | |
62 | #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */ | |
63 | #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */ | |
64 | #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */ | |
65 | #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */ | |
9b6e5423 | 66 | /* New for v1.9.0 */ |
4286325b MS |
67 | #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid4/5/6/10! */ |
68 | #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */ | |
69 | #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */ | |
70 | ||
71 | /* | |
72 | * Flags for rs->ctr_flags field. | |
73 | */ | |
74 | #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC) | |
75 | #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC) | |
76 | #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD) | |
77 | #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP) | |
78 | #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE) | |
79 | #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE) | |
80 | #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND) | |
81 | #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY) | |
82 | #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE) | |
83 | #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE) | |
84 | #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES) | |
85 | #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT) | |
86 | #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS) | |
87 | #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET) | |
88 | #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) | |
63f33b8d | 89 | |
f090279e HM |
90 | /* |
91 | * Definitions of various constructor flags to | |
92 | * be used in checks of valid / invalid flags | |
93 | * per raid level. | |
94 | */ | |
95 | /* Define all any sync flags */ | |
96 | #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) | |
97 | ||
98 | /* Define flags for options without argument (e.g. 'nosync') */ | |
33e53f06 HM |
99 | #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ |
100 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
101 | |
102 | /* Define flags for options with one argument (e.g. 'delta_disks +2') */ | |
103 | #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ | |
104 | CTR_FLAG_WRITE_MOSTLY | \ | |
105 | CTR_FLAG_DAEMON_SLEEP | \ | |
106 | CTR_FLAG_MIN_RECOVERY_RATE | \ | |
107 | CTR_FLAG_MAX_RECOVERY_RATE | \ | |
108 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
109 | CTR_FLAG_STRIPE_CACHE | \ | |
110 | CTR_FLAG_REGION_SIZE | \ | |
111 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
112 | CTR_FLAG_RAID10_FORMAT | \ |
113 | CTR_FLAG_DELTA_DISKS | \ | |
114 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
115 | |
116 | /* All ctr optional arguments */ | |
117 | #define ALL_CTR_FLAGS (CTR_FLAG_OPTIONS_NO_ARGS | \ | |
118 | CTR_FLAG_OPTIONS_ONE_ARG) | |
119 | ||
120 | /* Invalid options definitions per raid level... */ | |
121 | ||
122 | /* "raid0" does not accept any options */ | |
123 | #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS | |
124 | ||
125 | /* "raid1" does not accept stripe cache or any raid10 options */ | |
126 | #define RAID1_INVALID_FLAGS (CTR_FLAG_STRIPE_CACHE | \ | |
127 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
128 | CTR_FLAG_RAID10_FORMAT | \ |
129 | CTR_FLAG_DELTA_DISKS | \ | |
130 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
131 | |
132 | /* "raid10" does not accept any raid1 or stripe cache options */ | |
133 | #define RAID10_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
134 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
135 | CTR_FLAG_STRIPE_CACHE) | |
136 | /* | |
137 | * "raid4/5/6" do not accept any raid1 or raid10 specific options | |
138 | * | |
139 | * "raid6" does not accept "nosync", because it is not guaranteed | |
140 | * that both parity and q-syndrome are being written properly with | |
141 | * any writes | |
142 | */ | |
143 | #define RAID45_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
144 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
145 | CTR_FLAG_RAID10_FORMAT | \ | |
33e53f06 HM |
146 | CTR_FLAG_RAID10_COPIES | \ |
147 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
148 | #define RAID6_INVALID_FLAGS (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS) |
149 | /* ...invalid options definitions per raid level */ | |
150 | ||
ecbfb9f1 HM |
151 | /* |
152 | * Flags for rs->runtime_flags field | |
153 | * (RT_FLAG prefix meaning "runtime flag") | |
154 | * | |
155 | * These are all internal and used to define runtime state, | |
156 | * e.g. to prevent another resume from preresume processing | |
157 | * the raid set all over again. | |
158 | */ | |
159 | #define RT_FLAG_RS_PRERESUMED 0x1 | |
160 | #define RT_FLAG_RS_RESUMED 0x2 | |
161 | #define RT_FLAG_RS_BITMAP_LOADED 0x4 | |
162 | #define RT_FLAG_UPDATE_SBS 0x8 | |
163 | ||
33e53f06 HM |
164 | /* Array elements of 64 bit needed for rebuild/write_mostly bits */ |
165 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | |
166 | ||
ecbfb9f1 HM |
167 | /* |
168 | * raid set level, layout and chunk sectors backup/restore | |
169 | */ | |
170 | struct rs_layout { | |
171 | int new_level; | |
172 | int new_layout; | |
173 | int new_chunk_sectors; | |
174 | }; | |
175 | ||
9d09e663 N |
176 | struct raid_set { |
177 | struct dm_target *ti; | |
178 | ||
34f8ac6d | 179 | uint32_t bitmap_loaded; |
4286325b MS |
180 | unsigned long ctr_flags; |
181 | unsigned long runtime_flags; | |
ecbfb9f1 HM |
182 | |
183 | uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; | |
9d09e663 | 184 | |
33e53f06 HM |
185 | int raid_disks; |
186 | int delta_disks; | |
4763e543 | 187 | int data_offset; |
33e53f06 HM |
188 | int raid10_copies; |
189 | ||
fd01b88c | 190 | struct mddev md; |
9d09e663 N |
191 | struct raid_type *raid_type; |
192 | struct dm_target_callbacks callbacks; | |
ecbfb9f1 | 193 | struct rs_layout rs_layout; |
9d09e663 N |
194 | |
195 | struct raid_dev dev[0]; | |
196 | }; | |
197 | ||
ecbfb9f1 HM |
198 | /* Backup/restore raid set configuration helpers */ |
199 | static void _rs_config_backup(struct raid_set *rs, struct rs_layout *l) | |
200 | { | |
201 | struct mddev *mddev = &rs->md; | |
202 | ||
203 | l->new_level = mddev->new_level; | |
204 | l->new_layout = mddev->new_layout; | |
205 | l->new_chunk_sectors = mddev->new_chunk_sectors; | |
206 | } | |
207 | ||
208 | static void rs_config_backup(struct raid_set *rs) | |
209 | { | |
210 | return _rs_config_backup(rs, &rs->rs_layout); | |
211 | } | |
212 | ||
213 | static void _rs_config_restore(struct raid_set *rs, struct rs_layout *l) | |
214 | { | |
215 | struct mddev *mddev = &rs->md; | |
216 | ||
217 | mddev->new_level = l->new_level; | |
218 | mddev->new_layout = l->new_layout; | |
219 | mddev->new_chunk_sectors = l->new_chunk_sectors; | |
220 | } | |
221 | ||
222 | static void rs_config_restore(struct raid_set *rs) | |
223 | { | |
224 | return _rs_config_restore(rs, &rs->rs_layout); | |
225 | } | |
226 | /* END: backup/restore raid set configuration helpers */ | |
227 | ||
33e53f06 HM |
228 | /* raid10 algorithms (i.e. formats) */ |
229 | #define ALGORITHM_RAID10_DEFAULT 0 | |
230 | #define ALGORITHM_RAID10_NEAR 1 | |
231 | #define ALGORITHM_RAID10_OFFSET 2 | |
232 | #define ALGORITHM_RAID10_FAR 3 | |
233 | ||
9d09e663 N |
234 | /* Supported raid types and properties. */ |
235 | static struct raid_type { | |
236 | const char *name; /* RAID algorithm. */ | |
237 | const char *descr; /* Descriptor text for logging. */ | |
238 | const unsigned parity_devs; /* # of parity devices. */ | |
239 | const unsigned minimal_devs; /* minimal # of devices in set. */ | |
240 | const unsigned level; /* RAID level. */ | |
241 | const unsigned algorithm; /* RAID algorithm. */ | |
242 | } raid_types[] = { | |
43157840 MS |
243 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, |
244 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, | |
245 | {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, | |
33e53f06 | 246 | {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, |
43157840 MS |
247 | {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, |
248 | {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, | |
249 | {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ | |
250 | {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, | |
251 | {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, | |
252 | {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, | |
253 | {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, | |
254 | {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, | |
255 | {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, | |
256 | {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, | |
257 | {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, | |
258 | {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, | |
259 | {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, | |
260 | {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, | |
261 | {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, | |
262 | {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} | |
9d09e663 N |
263 | }; |
264 | ||
92c83d79 | 265 | /* True, if @v is in inclusive range [@min, @max] */ |
bb91a63f | 266 | static bool __within_range(long v, long min, long max) |
92c83d79 HM |
267 | { |
268 | return v >= min && v <= max; | |
269 | } | |
270 | ||
702108d1 HM |
271 | /* All table line arguments are defined here */ |
272 | static struct arg_name_flag { | |
4286325b | 273 | const unsigned long flag; |
702108d1 HM |
274 | const char *name; |
275 | } _arg_name_flags[] = { | |
276 | { CTR_FLAG_SYNC, "sync"}, | |
277 | { CTR_FLAG_NOSYNC, "nosync"}, | |
278 | { CTR_FLAG_REBUILD, "rebuild"}, | |
279 | { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, | |
280 | { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, | |
281 | { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, | |
282 | { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, | |
283 | { CTR_FLAG_WRITE_MOSTLY, "writemostly"}, | |
284 | { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, | |
285 | { CTR_FLAG_REGION_SIZE, "region_size"}, | |
286 | { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, | |
287 | { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, | |
4763e543 HM |
288 | { CTR_FLAG_DATA_OFFSET, "data_offset"}, |
289 | { CTR_FLAG_DELTA_DISKS, "delta_disks"}, | |
290 | { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, | |
702108d1 HM |
291 | }; |
292 | ||
293 | /* Return argument name string for given @flag */ | |
3fa6cf38 | 294 | static const char *dm_raid_arg_name_by_flag(const uint32_t flag) |
702108d1 HM |
295 | { |
296 | if (hweight32(flag) == 1) { | |
297 | struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags); | |
298 | ||
299 | while (anf-- > _arg_name_flags) | |
4286325b | 300 | if (flag & anf->flag) |
702108d1 HM |
301 | return anf->name; |
302 | ||
303 | } else | |
304 | DMERR("%s called with more than one flag!", __func__); | |
305 | ||
306 | return NULL; | |
307 | } | |
308 | ||
33e53f06 HM |
309 | /* |
310 | * bool helpers to test for various raid levels of a raid set, | |
311 | * is. it's level as reported by the superblock rather than | |
312 | * the requested raid_type passed to the constructor. | |
313 | */ | |
314 | /* Return true, if raid set in @rs is raid0 */ | |
315 | static bool rs_is_raid0(struct raid_set *rs) | |
316 | { | |
317 | return !rs->md.level; | |
318 | } | |
319 | ||
320 | /* Return true, if raid set in @rs is raid10 */ | |
321 | static bool rs_is_raid10(struct raid_set *rs) | |
322 | { | |
323 | return rs->md.level == 10; | |
324 | } | |
325 | ||
f090279e HM |
326 | /* |
327 | * bool helpers to test for various raid levels of a raid type | |
328 | */ | |
329 | ||
330 | /* Return true, if raid type in @rt is raid0 */ | |
331 | static bool rt_is_raid0(struct raid_type *rt) | |
332 | { | |
333 | return !rt->level; | |
334 | } | |
335 | ||
336 | /* Return true, if raid type in @rt is raid1 */ | |
337 | static bool rt_is_raid1(struct raid_type *rt) | |
338 | { | |
339 | return rt->level == 1; | |
340 | } | |
341 | ||
342 | /* Return true, if raid type in @rt is raid10 */ | |
343 | static bool rt_is_raid10(struct raid_type *rt) | |
344 | { | |
345 | return rt->level == 10; | |
346 | } | |
347 | ||
348 | /* Return true, if raid type in @rt is raid4/5 */ | |
349 | static bool rt_is_raid45(struct raid_type *rt) | |
350 | { | |
bb91a63f | 351 | return __within_range(rt->level, 4, 5); |
f090279e HM |
352 | } |
353 | ||
354 | /* Return true, if raid type in @rt is raid6 */ | |
355 | static bool rt_is_raid6(struct raid_type *rt) | |
356 | { | |
357 | return rt->level == 6; | |
358 | } | |
676fa5ad HM |
359 | |
360 | /* Return true, if raid type in @rt is raid4/5/6 */ | |
361 | static bool rt_is_raid456(struct raid_type *rt) | |
362 | { | |
bb91a63f | 363 | return __within_range(rt->level, 4, 6); |
676fa5ad | 364 | } |
f090279e HM |
365 | /* END: raid level bools */ |
366 | ||
f090279e HM |
367 | /* Return invalid ctr flags for the raid level of @rs */ |
368 | static uint32_t _invalid_flags(struct raid_set *rs) | |
369 | { | |
370 | if (rt_is_raid0(rs->raid_type)) | |
371 | return RAID0_INVALID_FLAGS; | |
372 | else if (rt_is_raid1(rs->raid_type)) | |
373 | return RAID1_INVALID_FLAGS; | |
374 | else if (rt_is_raid10(rs->raid_type)) | |
375 | return RAID10_INVALID_FLAGS; | |
376 | else if (rt_is_raid45(rs->raid_type)) | |
377 | return RAID45_INVALID_FLAGS; | |
378 | else if (rt_is_raid6(rs->raid_type)) | |
379 | return RAID6_INVALID_FLAGS; | |
380 | ||
381 | return ~0; | |
382 | } | |
383 | ||
384 | /* | |
385 | * Check for any invalid flags set on @rs defined by bitset @invalid_flags | |
386 | * | |
387 | * Has to be called after parsing of the ctr flags! | |
388 | */ | |
389 | static int rs_check_for_invalid_flags(struct raid_set *rs) | |
390 | { | |
4286325b MS |
391 | if (rs->ctr_flags & _invalid_flags(rs)) { |
392 | rs->ti->error = "Invalid flags combination"; | |
bd83a4c4 MS |
393 | return -EINVAL; |
394 | } | |
f090279e HM |
395 | |
396 | return 0; | |
397 | } | |
398 | ||
33e53f06 HM |
399 | |
400 | /* MD raid10 bit definitions and helpers */ | |
401 | #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ | |
402 | #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ | |
403 | #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ | |
404 | #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ | |
405 | ||
406 | /* Return md raid10 near copies for @layout */ | |
407 | static unsigned int _raid10_near_copies(int layout) | |
408 | { | |
409 | return layout & 0xFF; | |
410 | } | |
411 | ||
412 | /* Return md raid10 far copies for @layout */ | |
413 | static unsigned int _raid10_far_copies(int layout) | |
414 | { | |
415 | return _raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); | |
416 | } | |
417 | ||
418 | /* Return true if md raid10 offset for @layout */ | |
419 | static unsigned int _is_raid10_offset(int layout) | |
420 | { | |
421 | return layout & RAID10_OFFSET; | |
422 | } | |
423 | ||
424 | /* Return true if md raid10 near for @layout */ | |
425 | static unsigned int _is_raid10_near(int layout) | |
426 | { | |
427 | return !_is_raid10_offset(layout) && _raid10_near_copies(layout) > 1; | |
428 | } | |
429 | ||
430 | /* Return true if md raid10 far for @layout */ | |
431 | static unsigned int _is_raid10_far(int layout) | |
432 | { | |
433 | return !_is_raid10_offset(layout) && _raid10_far_copies(layout) > 1; | |
434 | } | |
435 | ||
436 | /* Return md raid10 layout string for @layout */ | |
437 | static const char *raid10_md_layout_to_format(int layout) | |
fe5d2f4a JB |
438 | { |
439 | /* | |
33e53f06 HM |
440 | * Bit 16 stands for "offset" |
441 | * (i.e. adjacent stripes hold copies) | |
442 | * | |
fe5d2f4a JB |
443 | * Refer to MD's raid10.c for details |
444 | */ | |
33e53f06 | 445 | if (_is_raid10_offset(layout)) |
fe5d2f4a JB |
446 | return "offset"; |
447 | ||
33e53f06 | 448 | if (_raid10_near_copies(layout) > 1) |
fe5d2f4a JB |
449 | return "near"; |
450 | ||
33e53f06 HM |
451 | WARN_ON(_raid10_far_copies(layout) < 2); |
452 | ||
fe5d2f4a JB |
453 | return "far"; |
454 | } | |
455 | ||
33e53f06 HM |
456 | /* Return md raid10 algorithm for @name */ |
457 | static const int raid10_name_to_format(const char *name) | |
458 | { | |
459 | if (!strcasecmp(name, "near")) | |
460 | return ALGORITHM_RAID10_NEAR; | |
461 | else if (!strcasecmp(name, "offset")) | |
462 | return ALGORITHM_RAID10_OFFSET; | |
463 | else if (!strcasecmp(name, "far")) | |
464 | return ALGORITHM_RAID10_FAR; | |
465 | ||
466 | return -EINVAL; | |
467 | } | |
468 | ||
469 | ||
470 | /* Return md raid10 copies for @layout */ | |
471 | static unsigned int raid10_md_layout_to_copies(int layout) | |
63f33b8d | 472 | { |
33e53f06 HM |
473 | return _raid10_near_copies(layout) > 1 ? |
474 | _raid10_near_copies(layout) : _raid10_far_copies(layout); | |
63f33b8d JB |
475 | } |
476 | ||
33e53f06 HM |
477 | /* Return md raid10 format id for @format string */ |
478 | static int raid10_format_to_md_layout(struct raid_set *rs, | |
479 | unsigned int algorithm, | |
480 | unsigned int copies) | |
63f33b8d | 481 | { |
33e53f06 | 482 | unsigned int n = 1, f = 1, r = 0; |
fe5d2f4a | 483 | |
33e53f06 HM |
484 | /* |
485 | * MD resilienece flaw: | |
486 | * | |
487 | * enabling use_far_sets for far/offset formats causes copies | |
488 | * to be colocated on the same devs together with their origins! | |
489 | * | |
490 | * -> disable it for now in the definition above | |
491 | */ | |
492 | if (algorithm == ALGORITHM_RAID10_DEFAULT || | |
493 | algorithm == ALGORITHM_RAID10_NEAR) | |
fe5d2f4a | 494 | n = copies; |
33e53f06 HM |
495 | |
496 | else if (algorithm == ALGORITHM_RAID10_OFFSET) { | |
497 | f = copies; | |
498 | r = RAID10_OFFSET; | |
4286325b | 499 | if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) |
33e53f06 HM |
500 | r |= RAID10_USE_FAR_SETS; |
501 | ||
502 | } else if (algorithm == ALGORITHM_RAID10_FAR) { | |
fe5d2f4a | 503 | f = copies; |
33e53f06 | 504 | r = !RAID10_OFFSET; |
4286325b | 505 | if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) |
33e53f06 | 506 | r |= RAID10_USE_FAR_SETS; |
fe5d2f4a | 507 | |
33e53f06 HM |
508 | } else |
509 | return -EINVAL; | |
510 | ||
511 | return r | (f << RAID10_FAR_COPIES_SHIFT) | n; | |
512 | } | |
513 | /* END: MD raid10 bit definitions and helpers */ | |
fe5d2f4a | 514 | |
33e53f06 HM |
515 | /* Check for any of the raid10 algorithms */ |
516 | static int _got_raid10(struct raid_type *rtp, const int layout) | |
517 | { | |
518 | if (rtp->level == 10) { | |
519 | switch (rtp->algorithm) { | |
520 | case ALGORITHM_RAID10_DEFAULT: | |
521 | case ALGORITHM_RAID10_NEAR: | |
522 | return _is_raid10_near(layout); | |
523 | case ALGORITHM_RAID10_OFFSET: | |
524 | return _is_raid10_offset(layout); | |
525 | case ALGORITHM_RAID10_FAR: | |
526 | return _is_raid10_far(layout); | |
527 | default: | |
528 | break; | |
529 | } | |
530 | } | |
fe5d2f4a | 531 | |
33e53f06 | 532 | return 0; |
63f33b8d JB |
533 | } |
534 | ||
33e53f06 | 535 | /* Return raid_type for @name */ |
92c83d79 | 536 | static struct raid_type *get_raid_type(const char *name) |
9d09e663 | 537 | { |
33e53f06 | 538 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); |
9d09e663 | 539 | |
33e53f06 HM |
540 | while (rtp-- > raid_types) |
541 | if (!strcasecmp(rtp->name, name)) | |
542 | return rtp; | |
9d09e663 N |
543 | |
544 | return NULL; | |
545 | } | |
546 | ||
33e53f06 HM |
547 | /* Return raid_type for @name based derived from @level and @layout */ |
548 | static struct raid_type *get_raid_type_by_ll(const int level, const int layout) | |
549 | { | |
550 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); | |
551 | ||
552 | while (rtp-- > raid_types) { | |
553 | /* RAID10 special checks based on @layout flags/properties */ | |
554 | if (rtp->level == level && | |
555 | (_got_raid10(rtp, layout) || rtp->algorithm == layout)) | |
556 | return rtp; | |
557 | } | |
558 | ||
559 | return NULL; | |
560 | } | |
561 | ||
3a1c1ef2 HM |
562 | /* |
563 | * Set the mddev properties in @rs to the current | |
564 | * ones retrieved from the freshest superblock | |
565 | */ | |
566 | static void rs_set_cur(struct raid_set *rs) | |
567 | { | |
568 | struct mddev *mddev = &rs->md; | |
569 | ||
570 | mddev->new_level = mddev->level; | |
571 | mddev->new_layout = mddev->layout; | |
572 | mddev->new_chunk_sectors = mddev->chunk_sectors; | |
573 | } | |
574 | ||
33e53f06 HM |
575 | /* |
576 | * Set the mddev properties in @rs to the new | |
577 | * ones requested by the ctr | |
578 | */ | |
579 | static void rs_set_new(struct raid_set *rs) | |
580 | { | |
581 | struct mddev *mddev = &rs->md; | |
582 | ||
583 | mddev->level = mddev->new_level; | |
584 | mddev->layout = mddev->new_layout; | |
585 | mddev->chunk_sectors = mddev->new_chunk_sectors; | |
3a1c1ef2 | 586 | mddev->raid_disks = rs->raid_disks; |
33e53f06 HM |
587 | mddev->delta_disks = 0; |
588 | } | |
589 | ||
590 | ||
9d09e663 N |
591 | static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) |
592 | { | |
593 | unsigned i; | |
594 | struct raid_set *rs; | |
9d09e663 | 595 | |
bd83a4c4 MS |
596 | if (raid_devs <= raid_type->parity_devs) { |
597 | ti->error = "Insufficient number of devices"; | |
598 | return ERR_PTR(-EINVAL); | |
599 | } | |
9d09e663 | 600 | |
9d09e663 | 601 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
bd83a4c4 MS |
602 | if (!rs) { |
603 | ti->error = "Cannot allocate raid context"; | |
604 | return ERR_PTR(-ENOMEM); | |
605 | } | |
9d09e663 N |
606 | |
607 | mddev_init(&rs->md); | |
608 | ||
33e53f06 HM |
609 | rs->raid_disks = raid_devs; |
610 | rs->delta_disks = 0; | |
611 | ||
9d09e663 N |
612 | rs->ti = ti; |
613 | rs->raid_type = raid_type; | |
614 | rs->md.raid_disks = raid_devs; | |
615 | rs->md.level = raid_type->level; | |
616 | rs->md.new_level = rs->md.level; | |
9d09e663 N |
617 | rs->md.layout = raid_type->algorithm; |
618 | rs->md.new_layout = rs->md.layout; | |
619 | rs->md.delta_disks = 0; | |
ecbfb9f1 | 620 | rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0; |
9d09e663 N |
621 | |
622 | for (i = 0; i < raid_devs; i++) | |
623 | md_rdev_init(&rs->dev[i].rdev); | |
624 | ||
625 | /* | |
626 | * Remaining items to be initialized by further RAID params: | |
627 | * rs->md.persistent | |
628 | * rs->md.external | |
629 | * rs->md.chunk_sectors | |
630 | * rs->md.new_chunk_sectors | |
c039c332 | 631 | * rs->md.dev_sectors |
9d09e663 N |
632 | */ |
633 | ||
634 | return rs; | |
635 | } | |
636 | ||
637 | static void context_free(struct raid_set *rs) | |
638 | { | |
639 | int i; | |
640 | ||
b12d437b JB |
641 | for (i = 0; i < rs->md.raid_disks; i++) { |
642 | if (rs->dev[i].meta_dev) | |
643 | dm_put_device(rs->ti, rs->dev[i].meta_dev); | |
545c8795 | 644 | md_rdev_clear(&rs->dev[i].rdev); |
9d09e663 N |
645 | if (rs->dev[i].data_dev) |
646 | dm_put_device(rs->ti, rs->dev[i].data_dev); | |
b12d437b | 647 | } |
9d09e663 N |
648 | |
649 | kfree(rs); | |
650 | } | |
651 | ||
652 | /* | |
653 | * For every device we have two words | |
654 | * <meta_dev>: meta device name or '-' if missing | |
655 | * <data_dev>: data device name or '-' if missing | |
656 | * | |
b12d437b JB |
657 | * The following are permitted: |
658 | * - - | |
659 | * - <data_dev> | |
660 | * <meta_dev> <data_dev> | |
661 | * | |
662 | * The following is not allowed: | |
663 | * <meta_dev> - | |
664 | * | |
665 | * This code parses those words. If there is a failure, | |
666 | * the caller must use context_free to unwind the operations. | |
9d09e663 | 667 | */ |
702108d1 | 668 | static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) |
9d09e663 N |
669 | { |
670 | int i; | |
671 | int rebuild = 0; | |
672 | int metadata_available = 0; | |
73c6f239 | 673 | int r = 0; |
92c83d79 | 674 | const char *arg; |
9d09e663 | 675 | |
92c83d79 HM |
676 | /* Put off the number of raid devices argument to get to dev pairs */ |
677 | arg = dm_shift_arg(as); | |
678 | if (!arg) | |
679 | return -EINVAL; | |
680 | ||
681 | for (i = 0; i < rs->md.raid_disks; i++) { | |
9d09e663 N |
682 | rs->dev[i].rdev.raid_disk = i; |
683 | ||
684 | rs->dev[i].meta_dev = NULL; | |
685 | rs->dev[i].data_dev = NULL; | |
686 | ||
687 | /* | |
688 | * There are no offsets, since there is a separate device | |
689 | * for data and metadata. | |
690 | */ | |
691 | rs->dev[i].rdev.data_offset = 0; | |
692 | rs->dev[i].rdev.mddev = &rs->md; | |
693 | ||
92c83d79 HM |
694 | arg = dm_shift_arg(as); |
695 | if (!arg) | |
696 | return -EINVAL; | |
697 | ||
698 | if (strcmp(arg, "-")) { | |
bd83a4c4 MS |
699 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
700 | &rs->dev[i].meta_dev); | |
701 | if (r) { | |
702 | rs->ti->error = "RAID metadata device lookup failure"; | |
703 | return r; | |
704 | } | |
b12d437b JB |
705 | |
706 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); | |
bd83a4c4 MS |
707 | if (!rs->dev[i].rdev.sb_page) { |
708 | rs->ti->error = "Failed to allocate superblock page"; | |
709 | return -ENOMEM; | |
710 | } | |
9d09e663 N |
711 | } |
712 | ||
92c83d79 HM |
713 | arg = dm_shift_arg(as); |
714 | if (!arg) | |
715 | return -EINVAL; | |
716 | ||
717 | if (!strcmp(arg, "-")) { | |
9d09e663 | 718 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && |
bd83a4c4 MS |
719 | (!rs->dev[i].rdev.recovery_offset)) { |
720 | rs->ti->error = "Drive designated for rebuild not specified"; | |
721 | return -EINVAL; | |
722 | } | |
9d09e663 | 723 | |
bd83a4c4 MS |
724 | if (rs->dev[i].meta_dev) { |
725 | rs->ti->error = "No data device supplied with metadata device"; | |
726 | return -EINVAL; | |
727 | } | |
b12d437b | 728 | |
9d09e663 N |
729 | continue; |
730 | } | |
731 | ||
bd83a4c4 MS |
732 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
733 | &rs->dev[i].data_dev); | |
734 | if (r) { | |
735 | rs->ti->error = "RAID device lookup failure"; | |
736 | return r; | |
737 | } | |
9d09e663 | 738 | |
b12d437b JB |
739 | if (rs->dev[i].meta_dev) { |
740 | metadata_available = 1; | |
741 | rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; | |
742 | } | |
9d09e663 | 743 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; |
3a1c1ef2 | 744 | list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); |
9d09e663 N |
745 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) |
746 | rebuild++; | |
747 | } | |
748 | ||
749 | if (metadata_available) { | |
750 | rs->md.external = 0; | |
751 | rs->md.persistent = 1; | |
752 | rs->md.major_version = 2; | |
753 | } else if (rebuild && !rs->md.recovery_cp) { | |
754 | /* | |
755 | * Without metadata, we will not be able to tell if the array | |
756 | * is in-sync or not - we must assume it is not. Therefore, | |
757 | * it is impossible to rebuild a drive. | |
758 | * | |
759 | * Even if there is metadata, the on-disk information may | |
760 | * indicate that the array is not in-sync and it will then | |
761 | * fail at that time. | |
762 | * | |
763 | * User could specify 'nosync' option if desperate. | |
764 | */ | |
bd83a4c4 MS |
765 | rs->ti->error = "Unable to rebuild drive while array is not in-sync"; |
766 | return -EINVAL; | |
9d09e663 N |
767 | } |
768 | ||
769 | return 0; | |
770 | } | |
771 | ||
c1084561 JB |
772 | /* |
773 | * validate_region_size | |
774 | * @rs | |
775 | * @region_size: region size in sectors. If 0, pick a size (4MiB default). | |
776 | * | |
777 | * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). | |
778 | * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. | |
779 | * | |
780 | * Returns: 0 on success, -EINVAL on failure. | |
781 | */ | |
782 | static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |
783 | { | |
784 | unsigned long min_region_size = rs->ti->len / (1 << 21); | |
785 | ||
786 | if (!region_size) { | |
787 | /* | |
43157840 | 788 | * Choose a reasonable default. All figures in sectors. |
c1084561 JB |
789 | */ |
790 | if (min_region_size > (1 << 13)) { | |
3a0f9aae | 791 | /* If not a power of 2, make it the next power of 2 */ |
042745ee | 792 | region_size = roundup_pow_of_two(min_region_size); |
c1084561 JB |
793 | DMINFO("Choosing default region size of %lu sectors", |
794 | region_size); | |
c1084561 JB |
795 | } else { |
796 | DMINFO("Choosing default region size of 4MiB"); | |
797 | region_size = 1 << 13; /* sectors */ | |
798 | } | |
799 | } else { | |
800 | /* | |
801 | * Validate user-supplied value. | |
802 | */ | |
bd83a4c4 MS |
803 | if (region_size > rs->ti->len) { |
804 | rs->ti->error = "Supplied region size is too large"; | |
805 | return -EINVAL; | |
806 | } | |
c1084561 JB |
807 | |
808 | if (region_size < min_region_size) { | |
809 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", | |
810 | region_size, min_region_size); | |
bd83a4c4 MS |
811 | rs->ti->error = "Supplied region size is too small"; |
812 | return -EINVAL; | |
c1084561 JB |
813 | } |
814 | ||
bd83a4c4 MS |
815 | if (!is_power_of_2(region_size)) { |
816 | rs->ti->error = "Region size is not a power of 2"; | |
817 | return -EINVAL; | |
818 | } | |
c1084561 | 819 | |
bd83a4c4 MS |
820 | if (region_size < rs->md.chunk_sectors) { |
821 | rs->ti->error = "Region size is smaller than the chunk size"; | |
822 | return -EINVAL; | |
823 | } | |
c1084561 JB |
824 | } |
825 | ||
826 | /* | |
827 | * Convert sectors to bytes. | |
828 | */ | |
829 | rs->md.bitmap_info.chunksize = (region_size << 9); | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
eb649123 | 834 | /* |
55ebbb59 | 835 | * validate_raid_redundancy |
eb649123 JB |
836 | * @rs |
837 | * | |
55ebbb59 JB |
838 | * Determine if there are enough devices in the array that haven't |
839 | * failed (or are being rebuilt) to form a usable array. | |
eb649123 JB |
840 | * |
841 | * Returns: 0 on success, -EINVAL on failure. | |
842 | */ | |
55ebbb59 | 843 | static int validate_raid_redundancy(struct raid_set *rs) |
eb649123 JB |
844 | { |
845 | unsigned i, rebuild_cnt = 0; | |
3f6bbd3f | 846 | unsigned rebuilds_per_group = 0, copies, d; |
fe5d2f4a | 847 | unsigned group_size, last_group_start; |
eb649123 | 848 | |
eb649123 | 849 | for (i = 0; i < rs->md.raid_disks; i++) |
55ebbb59 JB |
850 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || |
851 | !rs->dev[i].rdev.sb_page) | |
eb649123 JB |
852 | rebuild_cnt++; |
853 | ||
854 | switch (rs->raid_type->level) { | |
855 | case 1: | |
856 | if (rebuild_cnt >= rs->md.raid_disks) | |
857 | goto too_many; | |
858 | break; | |
859 | case 4: | |
860 | case 5: | |
861 | case 6: | |
862 | if (rebuild_cnt > rs->raid_type->parity_devs) | |
863 | goto too_many; | |
864 | break; | |
865 | case 10: | |
4ec1e369 JB |
866 | copies = raid10_md_layout_to_copies(rs->md.layout); |
867 | if (rebuild_cnt < copies) | |
868 | break; | |
869 | ||
870 | /* | |
871 | * It is possible to have a higher rebuild count for RAID10, | |
872 | * as long as the failed devices occur in different mirror | |
873 | * groups (i.e. different stripes). | |
874 | * | |
4ec1e369 JB |
875 | * When checking "near" format, make sure no adjacent devices |
876 | * have failed beyond what can be handled. In addition to the | |
877 | * simple case where the number of devices is a multiple of the | |
878 | * number of copies, we must also handle cases where the number | |
879 | * of devices is not a multiple of the number of copies. | |
43157840 MS |
880 | * E.g. dev1 dev2 dev3 dev4 dev5 |
881 | * A A B B C | |
882 | * C D D E E | |
4ec1e369 | 883 | */ |
fe5d2f4a JB |
884 | if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { |
885 | for (i = 0; i < rs->md.raid_disks * copies; i++) { | |
886 | if (!(i % copies)) | |
887 | rebuilds_per_group = 0; | |
888 | d = i % rs->md.raid_disks; | |
889 | if ((!rs->dev[d].rdev.sb_page || | |
890 | !test_bit(In_sync, &rs->dev[d].rdev.flags)) && | |
891 | (++rebuilds_per_group >= copies)) | |
892 | goto too_many; | |
893 | } | |
894 | break; | |
895 | } | |
896 | ||
897 | /* | |
898 | * When checking "far" and "offset" formats, we need to ensure | |
899 | * that the device that holds its copy is not also dead or | |
900 | * being rebuilt. (Note that "far" and "offset" formats only | |
901 | * support two copies right now. These formats also only ever | |
902 | * use the 'use_far_sets' variant.) | |
903 | * | |
904 | * This check is somewhat complicated by the need to account | |
43157840 | 905 | * for arrays that are not a multiple of (far) copies. This |
fe5d2f4a JB |
906 | * results in the need to treat the last (potentially larger) |
907 | * set differently. | |
908 | */ | |
909 | group_size = (rs->md.raid_disks / copies); | |
910 | last_group_start = (rs->md.raid_disks / group_size) - 1; | |
911 | last_group_start *= group_size; | |
912 | for (i = 0; i < rs->md.raid_disks; i++) { | |
913 | if (!(i % copies) && !(i > last_group_start)) | |
55ebbb59 | 914 | rebuilds_per_group = 0; |
fe5d2f4a JB |
915 | if ((!rs->dev[i].rdev.sb_page || |
916 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) && | |
4ec1e369 | 917 | (++rebuilds_per_group >= copies)) |
fe5d2f4a | 918 | goto too_many; |
4ec1e369 JB |
919 | } |
920 | break; | |
eb649123 | 921 | default: |
55ebbb59 JB |
922 | if (rebuild_cnt) |
923 | return -EINVAL; | |
eb649123 JB |
924 | } |
925 | ||
926 | return 0; | |
927 | ||
928 | too_many: | |
eb649123 JB |
929 | return -EINVAL; |
930 | } | |
931 | ||
9d09e663 N |
932 | /* |
933 | * Possible arguments are... | |
9d09e663 N |
934 | * <chunk_size> [optional_args] |
935 | * | |
32737279 JB |
936 | * Argument definitions |
937 | * <chunk_size> The number of sectors per disk that | |
43157840 | 938 | * will form the "stripe" |
32737279 | 939 | * [[no]sync] Force or prevent recovery of the |
43157840 | 940 | * entire array |
9d09e663 | 941 | * [rebuild <idx>] Rebuild the drive indicated by the index |
32737279 | 942 | * [daemon_sleep <ms>] Time between bitmap daemon work to |
43157840 | 943 | * clear bits |
9d09e663 N |
944 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
945 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization | |
46bed2b5 | 946 | * [write_mostly <idx>] Indicate a write mostly drive via index |
9d09e663 N |
947 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) |
948 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs | |
43157840 | 949 | * [region_size <sectors>] Defines granularity of bitmap |
63f33b8d JB |
950 | * |
951 | * RAID10-only options: | |
43157840 | 952 | * [raid10_copies <# copies>] Number of copies. (Default: 2) |
fe5d2f4a | 953 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) |
9d09e663 | 954 | */ |
92c83d79 | 955 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, |
9d09e663 N |
956 | unsigned num_raid_params) |
957 | { | |
33e53f06 | 958 | int raid10_format = ALGORITHM_RAID10_DEFAULT; |
63f33b8d | 959 | unsigned raid10_copies = 2; |
eb649123 | 960 | unsigned i; |
92c83d79 | 961 | unsigned value, region_size = 0; |
c039c332 | 962 | sector_t sectors_per_dev = rs->ti->len; |
542f9038 | 963 | sector_t max_io_len; |
92c83d79 | 964 | const char *arg, *key; |
702108d1 | 965 | struct raid_dev *rd; |
33e53f06 | 966 | struct raid_type *rt = rs->raid_type; |
92c83d79 HM |
967 | |
968 | arg = dm_shift_arg(as); | |
969 | num_raid_params--; /* Account for chunk_size argument */ | |
970 | ||
bd83a4c4 MS |
971 | if (kstrtouint(arg, 10, &value) < 0) { |
972 | rs->ti->error = "Bad numerical argument given for chunk_size"; | |
973 | return -EINVAL; | |
974 | } | |
9d09e663 N |
975 | |
976 | /* | |
977 | * First, parse the in-order required arguments | |
32737279 | 978 | * "chunk_size" is the only argument of this type. |
9d09e663 | 979 | */ |
33e53f06 | 980 | if (rt_is_raid1(rt)) { |
32737279 JB |
981 | if (value) |
982 | DMERR("Ignoring chunk size parameter for RAID 1"); | |
983 | value = 0; | |
bd83a4c4 MS |
984 | } else if (!is_power_of_2(value)) { |
985 | rs->ti->error = "Chunk size must be a power of 2"; | |
986 | return -EINVAL; | |
987 | } else if (value < 8) { | |
988 | rs->ti->error = "Chunk size value is too small"; | |
989 | return -EINVAL; | |
990 | } | |
9d09e663 N |
991 | |
992 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; | |
9d09e663 N |
993 | |
994 | /* | |
b12d437b JB |
995 | * We set each individual device as In_sync with a completed |
996 | * 'recovery_offset'. If there has been a device failure or | |
997 | * replacement then one of the following cases applies: | |
998 | * | |
999 | * 1) User specifies 'rebuild'. | |
43157840 | 1000 | * - Device is reset when param is read. |
b12d437b | 1001 | * 2) A new device is supplied. |
43157840 | 1002 | * - No matching superblock found, resets device. |
b12d437b | 1003 | * 3) Device failure was transient and returns on reload. |
43157840 | 1004 | * - Failure noticed, resets device for bitmap replay. |
b12d437b | 1005 | * 4) Device hadn't completed recovery after previous failure. |
43157840 | 1006 | * - Superblock is read and overrides recovery_offset. |
b12d437b JB |
1007 | * |
1008 | * What is found in the superblocks of the devices is always | |
1009 | * authoritative, unless 'rebuild' or '[no]sync' was specified. | |
9d09e663 | 1010 | */ |
b12d437b | 1011 | for (i = 0; i < rs->md.raid_disks; i++) { |
9d09e663 | 1012 | set_bit(In_sync, &rs->dev[i].rdev.flags); |
b12d437b JB |
1013 | rs->dev[i].rdev.recovery_offset = MaxSector; |
1014 | } | |
9d09e663 | 1015 | |
b12d437b JB |
1016 | /* |
1017 | * Second, parse the unordered optional arguments | |
1018 | */ | |
9d09e663 | 1019 | for (i = 0; i < num_raid_params; i++) { |
4763e543 | 1020 | key = dm_shift_arg(as); |
bd83a4c4 MS |
1021 | if (!key) { |
1022 | rs->ti->error = "Not enough raid parameters given"; | |
1023 | return -EINVAL; | |
1024 | } | |
92c83d79 | 1025 | |
3fa6cf38 | 1026 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) { |
4286325b | 1027 | if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { |
bd83a4c4 MS |
1028 | rs->ti->error = "Only one 'nosync' argument allowed"; |
1029 | return -EINVAL; | |
1030 | } | |
9d09e663 | 1031 | rs->md.recovery_cp = MaxSector; |
9d09e663 N |
1032 | continue; |
1033 | } | |
3fa6cf38 | 1034 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) { |
4286325b | 1035 | if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { |
bd83a4c4 MS |
1036 | rs->ti->error = "Only one 'sync' argument allowed"; |
1037 | return -EINVAL; | |
1038 | } | |
9d09e663 | 1039 | rs->md.recovery_cp = 0; |
4763e543 HM |
1040 | continue; |
1041 | } | |
3fa6cf38 | 1042 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { |
4286325b | 1043 | if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { |
bd83a4c4 MS |
1044 | rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; |
1045 | return -EINVAL; | |
1046 | } | |
9d09e663 N |
1047 | continue; |
1048 | } | |
1049 | ||
92c83d79 HM |
1050 | arg = dm_shift_arg(as); |
1051 | i++; /* Account for the argument pairs */ | |
bd83a4c4 MS |
1052 | if (!arg) { |
1053 | rs->ti->error = "Wrong number of raid parameters given"; | |
1054 | return -EINVAL; | |
1055 | } | |
63f33b8d | 1056 | |
702108d1 HM |
1057 | /* |
1058 | * Parameters that take a string value are checked here. | |
1059 | */ | |
1060 | ||
3fa6cf38 | 1061 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) { |
4286325b | 1062 | if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { |
bd83a4c4 MS |
1063 | rs->ti->error = "Only one 'raid10_format' argument pair allowed"; |
1064 | return -EINVAL; | |
1065 | } | |
1066 | if (!rt_is_raid10(rt)) { | |
1067 | rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; | |
1068 | return -EINVAL; | |
1069 | } | |
33e53f06 | 1070 | raid10_format = raid10_name_to_format(arg); |
bd83a4c4 MS |
1071 | if (raid10_format < 0) { |
1072 | rs->ti->error = "Invalid 'raid10_format' value given"; | |
1073 | return raid10_format; | |
1074 | } | |
63f33b8d JB |
1075 | continue; |
1076 | } | |
1077 | ||
bd83a4c4 MS |
1078 | if (kstrtouint(arg, 10, &value) < 0) { |
1079 | rs->ti->error = "Bad numerical argument given in raid params"; | |
1080 | return -EINVAL; | |
1081 | } | |
702108d1 | 1082 | |
3fa6cf38 | 1083 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) { |
702108d1 HM |
1084 | /* |
1085 | * "rebuild" is being passed in by userspace to provide | |
1086 | * indexes of replaced devices and to set up additional | |
1087 | * devices on raid level takeover. | |
43157840 | 1088 | */ |
bb91a63f | 1089 | if (!__within_range(value, 0, rs->raid_disks - 1)) { |
bd83a4c4 MS |
1090 | rs->ti->error = "Invalid rebuild index given"; |
1091 | return -EINVAL; | |
1092 | } | |
702108d1 | 1093 | |
bd83a4c4 MS |
1094 | if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { |
1095 | rs->ti->error = "rebuild for this index already given"; | |
1096 | return -EINVAL; | |
1097 | } | |
ecbfb9f1 | 1098 | |
702108d1 HM |
1099 | rd = rs->dev + value; |
1100 | clear_bit(In_sync, &rd->rdev.flags); | |
1101 | clear_bit(Faulty, &rd->rdev.flags); | |
1102 | rd->rdev.recovery_offset = 0; | |
4286325b | 1103 | set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); |
3fa6cf38 | 1104 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) { |
bd83a4c4 MS |
1105 | if (!rt_is_raid1(rt)) { |
1106 | rs->ti->error = "write_mostly option is only valid for RAID1"; | |
1107 | return -EINVAL; | |
1108 | } | |
702108d1 | 1109 | |
bb91a63f | 1110 | if (!__within_range(value, 0, rs->md.raid_disks - 1)) { |
bd83a4c4 MS |
1111 | rs->ti->error = "Invalid write_mostly index given"; |
1112 | return -EINVAL; | |
1113 | } | |
9d09e663 | 1114 | |
46bed2b5 | 1115 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); |
4286325b | 1116 | set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); |
3fa6cf38 | 1117 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { |
bd83a4c4 MS |
1118 | if (!rt_is_raid1(rt)) { |
1119 | rs->ti->error = "max_write_behind option is only valid for RAID1"; | |
1120 | return -EINVAL; | |
1121 | } | |
702108d1 | 1122 | |
4286325b | 1123 | if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { |
bd83a4c4 MS |
1124 | rs->ti->error = "Only one max_write_behind argument pair allowed"; |
1125 | return -EINVAL; | |
1126 | } | |
9d09e663 N |
1127 | |
1128 | /* | |
1129 | * In device-mapper, we specify things in sectors, but | |
1130 | * MD records this value in kB | |
1131 | */ | |
1132 | value /= 2; | |
bd83a4c4 MS |
1133 | if (value > COUNTER_MAX) { |
1134 | rs->ti->error = "Max write-behind limit out of range"; | |
1135 | return -EINVAL; | |
1136 | } | |
702108d1 | 1137 | |
9d09e663 | 1138 | rs->md.bitmap_info.max_write_behind = value; |
3fa6cf38 | 1139 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) { |
4286325b | 1140 | if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { |
bd83a4c4 MS |
1141 | rs->ti->error = "Only one daemon_sleep argument pair allowed"; |
1142 | return -EINVAL; | |
1143 | } | |
1144 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { | |
1145 | rs->ti->error = "daemon sleep period out of range"; | |
1146 | return -EINVAL; | |
1147 | } | |
9d09e663 | 1148 | rs->md.bitmap_info.daemon_sleep = value; |
3fa6cf38 | 1149 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) { |
4763e543 | 1150 | /* Userspace passes new data_offset after having extended the the data image LV */ |
4286325b | 1151 | if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { |
bd83a4c4 MS |
1152 | rs->ti->error = "Only one data_offset argument pair allowed"; |
1153 | return -EINVAL; | |
1154 | } | |
4763e543 | 1155 | /* Ensure sensible data offset */ |
bd83a4c4 MS |
1156 | if (value < 0) { |
1157 | rs->ti->error = "Bogus data_offset value"; | |
1158 | return -EINVAL; | |
1159 | } | |
4763e543 | 1160 | rs->data_offset = value; |
3fa6cf38 | 1161 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) { |
4763e543 | 1162 | /* Define the +/-# of disks to add to/remove from the given raid set */ |
4286325b | 1163 | if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { |
bd83a4c4 MS |
1164 | rs->ti->error = "Only one delta_disks argument pair allowed"; |
1165 | return -EINVAL; | |
1166 | } | |
4763e543 | 1167 | /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ |
bb91a63f | 1168 | if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) { |
bd83a4c4 MS |
1169 | rs->ti->error = "Too many delta_disk requested"; |
1170 | return -EINVAL; | |
1171 | } | |
4763e543 HM |
1172 | |
1173 | rs->delta_disks = value; | |
3fa6cf38 | 1174 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) { |
4286325b | 1175 | if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { |
bd83a4c4 MS |
1176 | rs->ti->error = "Only one stripe_cache argument pair allowed"; |
1177 | return -EINVAL; | |
1178 | } | |
1179 | ||
9d09e663 N |
1180 | /* |
1181 | * In device-mapper, we specify things in sectors, but | |
1182 | * MD records this value in kB | |
1183 | */ | |
1184 | value /= 2; | |
1185 | ||
bd83a4c4 MS |
1186 | if (!rt_is_raid456(rt)) { |
1187 | rs->ti->error = "Inappropriate argument: stripe_cache"; | |
1188 | return -EINVAL; | |
1189 | } | |
1190 | if (raid5_set_cache_size(&rs->md, (int)value)) { | |
1191 | rs->ti->error = "Bad stripe_cache size"; | |
1192 | return -EINVAL; | |
1193 | } | |
702108d1 | 1194 | |
3fa6cf38 | 1195 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { |
4286325b | 1196 | if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
bd83a4c4 MS |
1197 | rs->ti->error = "Only one min_recovery_rate argument pair allowed"; |
1198 | return -EINVAL; | |
1199 | } | |
1200 | if (value > INT_MAX) { | |
1201 | rs->ti->error = "min_recovery_rate out of range"; | |
1202 | return -EINVAL; | |
1203 | } | |
9d09e663 | 1204 | rs->md.sync_speed_min = (int)value; |
3fa6cf38 | 1205 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { |
4286325b | 1206 | if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
bd83a4c4 MS |
1207 | rs->ti->error = "Only one max_recovery_rate argument pair allowed"; |
1208 | return -EINVAL; | |
1209 | } | |
1210 | if (value > INT_MAX) { | |
1211 | rs->ti->error = "max_recovery_rate out of range"; | |
1212 | return -EINVAL; | |
1213 | } | |
9d09e663 | 1214 | rs->md.sync_speed_max = (int)value; |
3fa6cf38 | 1215 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) { |
4286325b | 1216 | if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { |
bd83a4c4 MS |
1217 | rs->ti->error = "Only one region_size argument pair allowed"; |
1218 | return -EINVAL; | |
1219 | } | |
702108d1 | 1220 | |
c1084561 | 1221 | region_size = value; |
3fa6cf38 | 1222 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) { |
4286325b | 1223 | if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { |
bd83a4c4 MS |
1224 | rs->ti->error = "Only one raid10_copies argument pair allowed"; |
1225 | return -EINVAL; | |
1226 | } | |
702108d1 | 1227 | |
bb91a63f | 1228 | if (!__within_range(value, 2, rs->md.raid_disks)) { |
bd83a4c4 MS |
1229 | rs->ti->error = "Bad value for 'raid10_copies'"; |
1230 | return -EINVAL; | |
1231 | } | |
702108d1 | 1232 | |
63f33b8d | 1233 | raid10_copies = value; |
9d09e663 N |
1234 | } else { |
1235 | DMERR("Unable to parse RAID parameter: %s", key); | |
bd83a4c4 MS |
1236 | rs->ti->error = "Unable to parse RAID parameter"; |
1237 | return -EINVAL; | |
9d09e663 N |
1238 | } |
1239 | } | |
1240 | ||
c1084561 JB |
1241 | if (validate_region_size(rs, region_size)) |
1242 | return -EINVAL; | |
1243 | ||
1244 | if (rs->md.chunk_sectors) | |
542f9038 | 1245 | max_io_len = rs->md.chunk_sectors; |
c1084561 | 1246 | else |
542f9038 | 1247 | max_io_len = region_size; |
c1084561 | 1248 | |
542f9038 MS |
1249 | if (dm_set_target_max_io_len(rs->ti, max_io_len)) |
1250 | return -EINVAL; | |
32737279 | 1251 | |
33e53f06 | 1252 | if (rt_is_raid10(rt)) { |
bd83a4c4 MS |
1253 | if (raid10_copies > rs->md.raid_disks) { |
1254 | rs->ti->error = "Not enough devices to satisfy specification"; | |
1255 | return -EINVAL; | |
1256 | } | |
63f33b8d | 1257 | |
33e53f06 | 1258 | rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
bd83a4c4 MS |
1259 | if (rs->md.new_layout < 0) { |
1260 | rs->ti->error = "Error getting raid10 format"; | |
1261 | return rs->md.new_layout; | |
1262 | } | |
33e53f06 HM |
1263 | |
1264 | rt = get_raid_type_by_ll(10, rs->md.new_layout); | |
bd83a4c4 MS |
1265 | if (!rt) { |
1266 | rs->ti->error = "Failed to recognize new raid10 layout"; | |
1267 | return -EINVAL; | |
1268 | } | |
33e53f06 HM |
1269 | |
1270 | if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || | |
1271 | rt->algorithm == ALGORITHM_RAID10_NEAR) && | |
4286325b | 1272 | test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { |
bd83a4c4 MS |
1273 | rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; |
1274 | return -EINVAL; | |
1275 | } | |
fe5d2f4a | 1276 | |
63f33b8d JB |
1277 | /* (Len * #mirrors) / #devices */ |
1278 | sectors_per_dev = rs->ti->len * raid10_copies; | |
1279 | sector_div(sectors_per_dev, rs->md.raid_disks); | |
1280 | ||
33e53f06 | 1281 | rs->md.layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
63f33b8d | 1282 | rs->md.new_layout = rs->md.layout; |
33e53f06 | 1283 | } else if (!rt_is_raid1(rt) && |
bd83a4c4 MS |
1284 | sector_div(sectors_per_dev, (rs->md.raid_disks - rt->parity_devs))) { |
1285 | rs->ti->error = "Target length not divisible by number of data devices"; | |
1286 | return -EINVAL; | |
1287 | } | |
702108d1 | 1288 | |
33e53f06 | 1289 | rs->raid10_copies = raid10_copies; |
c039c332 JB |
1290 | rs->md.dev_sectors = sectors_per_dev; |
1291 | ||
9d09e663 N |
1292 | /* Assume there are no metadata devices until the drives are parsed */ |
1293 | rs->md.persistent = 0; | |
1294 | rs->md.external = 1; | |
1295 | ||
f090279e HM |
1296 | /* Check, if any invalid ctr arguments have been passed in for the raid level */ |
1297 | return rs_check_for_invalid_flags(rs); | |
9d09e663 N |
1298 | } |
1299 | ||
3a1c1ef2 HM |
1300 | /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */ |
1301 | static unsigned int mddev_data_stripes(struct raid_set *rs) | |
1302 | { | |
1303 | return rs->md.raid_disks - rs->raid_type->parity_devs; | |
1304 | } | |
1305 | ||
9d09e663 N |
1306 | static void do_table_event(struct work_struct *ws) |
1307 | { | |
1308 | struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); | |
1309 | ||
1310 | dm_table_event(rs->ti->table); | |
1311 | } | |
1312 | ||
1313 | static int raid_is_congested(struct dm_target_callbacks *cb, int bits) | |
1314 | { | |
1315 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | |
1316 | ||
5c675f83 | 1317 | return mddev_congested(&rs->md, bits); |
9d09e663 N |
1318 | } |
1319 | ||
ecbfb9f1 HM |
1320 | /* |
1321 | * Make sure a valid takover (level switch) is being requested on @rs | |
1322 | * | |
1323 | * Conversions of raid sets from one MD personality to another | |
1324 | * have to conform to restrictions which are enforced here. | |
1325 | * | |
1326 | * Degration is already checked for in rs_check_conversion() below. | |
1327 | */ | |
1328 | static int rs_check_takeover(struct raid_set *rs) | |
1329 | { | |
1330 | struct mddev *mddev = &rs->md; | |
1331 | unsigned int near_copies; | |
1332 | ||
1333 | switch (mddev->level) { | |
1334 | case 0: | |
1335 | /* raid0 -> raid1/5 with one disk */ | |
1336 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1337 | mddev->raid_disks == 1) | |
1338 | return 0; | |
1339 | ||
1340 | /* raid0 -> raid10 */ | |
1341 | if (mddev->new_level == 10 && | |
1342 | !(rs->raid_disks % 2)) | |
1343 | return 0; | |
1344 | ||
1345 | /* raid0 with multiple disks -> raid4/5/6 */ | |
bb91a63f | 1346 | if (__within_range(mddev->new_level, 4, 6) && |
ecbfb9f1 HM |
1347 | mddev->new_layout == ALGORITHM_PARITY_N && |
1348 | mddev->raid_disks > 1) | |
1349 | return 0; | |
1350 | ||
1351 | break; | |
1352 | ||
1353 | case 10: | |
1354 | /* Can't takeover raid10_offset! */ | |
1355 | if (_is_raid10_offset(mddev->layout)) | |
1356 | break; | |
1357 | ||
1358 | near_copies = _raid10_near_copies(mddev->layout); | |
1359 | ||
1360 | /* raid10* -> raid0 */ | |
1361 | if (mddev->new_level == 0) { | |
1362 | /* Can takeover raid10_near with raid disks divisable by data copies! */ | |
1363 | if (near_copies > 1 && | |
1364 | !(mddev->raid_disks % near_copies)) { | |
1365 | mddev->raid_disks /= near_copies; | |
1366 | mddev->delta_disks = mddev->raid_disks; | |
1367 | return 0; | |
1368 | } | |
1369 | ||
1370 | /* Can takeover raid10_far */ | |
1371 | if (near_copies == 1 && | |
1372 | _raid10_far_copies(mddev->layout) > 1) | |
1373 | return 0; | |
1374 | ||
1375 | break; | |
1376 | } | |
1377 | ||
1378 | /* raid10_{near,far} -> raid1 */ | |
1379 | if (mddev->new_level == 1 && | |
1380 | max(near_copies, _raid10_far_copies(mddev->layout)) == mddev->raid_disks) | |
1381 | return 0; | |
1382 | ||
1383 | /* raid10_{near,far} with 2 disks -> raid4/5 */ | |
bb91a63f | 1384 | if (__within_range(mddev->new_level, 4, 5) && |
ecbfb9f1 HM |
1385 | mddev->raid_disks == 2) |
1386 | return 0; | |
1387 | break; | |
1388 | ||
1389 | case 1: | |
1390 | /* raid1 with 2 disks -> raid4/5 */ | |
bb91a63f | 1391 | if (__within_range(mddev->new_level, 4, 5) && |
ecbfb9f1 HM |
1392 | mddev->raid_disks == 2) { |
1393 | mddev->degraded = 1; | |
1394 | return 0; | |
1395 | } | |
1396 | ||
1397 | /* raid1 -> raid0 */ | |
1398 | if (mddev->new_level == 0 && | |
1399 | mddev->raid_disks == 1) | |
1400 | return 0; | |
1401 | ||
1402 | /* raid1 -> raid10 */ | |
1403 | if (mddev->new_level == 10) | |
1404 | return 0; | |
1405 | ||
1406 | break; | |
1407 | ||
1408 | case 4: | |
1409 | /* raid4 -> raid0 */ | |
1410 | if (mddev->new_level == 0) | |
1411 | return 0; | |
1412 | ||
1413 | /* raid4 -> raid1/5 with 2 disks */ | |
1414 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1415 | mddev->raid_disks == 2) | |
1416 | return 0; | |
1417 | ||
1418 | /* raid4 -> raid5/6 with parity N */ | |
bb91a63f | 1419 | if (__within_range(mddev->new_level, 5, 6) && |
ecbfb9f1 HM |
1420 | mddev->layout == ALGORITHM_PARITY_N) |
1421 | return 0; | |
1422 | break; | |
1423 | ||
1424 | case 5: | |
1425 | /* raid5 with parity N -> raid0 */ | |
1426 | if (mddev->new_level == 0 && | |
1427 | mddev->layout == ALGORITHM_PARITY_N) | |
1428 | return 0; | |
1429 | ||
1430 | /* raid5 with parity N -> raid4 */ | |
1431 | if (mddev->new_level == 4 && | |
1432 | mddev->layout == ALGORITHM_PARITY_N) | |
1433 | return 0; | |
1434 | ||
1435 | /* raid5 with 2 disks -> raid1/4/10 */ | |
1436 | if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && | |
1437 | mddev->raid_disks == 2) | |
1438 | return 0; | |
1439 | ||
1440 | /* raid5 with parity N -> raid6 with parity N */ | |
1441 | if (mddev->new_level == 6 && | |
1442 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
bb91a63f | 1443 | __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) |
ecbfb9f1 HM |
1444 | return 0; |
1445 | break; | |
1446 | ||
1447 | case 6: | |
1448 | /* raid6 with parity N -> raid0 */ | |
1449 | if (mddev->new_level == 0 && | |
1450 | mddev->layout == ALGORITHM_PARITY_N) | |
1451 | return 0; | |
1452 | ||
1453 | /* raid6 with parity N -> raid4 */ | |
1454 | if (mddev->new_level == 4 && | |
1455 | mddev->layout == ALGORITHM_PARITY_N) | |
1456 | return 0; | |
1457 | ||
1458 | /* raid6_*_n with parity N -> raid5_* */ | |
1459 | if (mddev->new_level == 5 && | |
1460 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
bb91a63f | 1461 | __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) |
ecbfb9f1 HM |
1462 | return 0; |
1463 | ||
1464 | default: | |
1465 | break; | |
1466 | } | |
1467 | ||
bd83a4c4 MS |
1468 | rs->ti->error = "takeover not possible"; |
1469 | return -EINVAL; | |
ecbfb9f1 HM |
1470 | } |
1471 | ||
1472 | /* True if @rs requested to be taken over */ | |
1473 | static bool rs_takeover_requested(struct raid_set *rs) | |
1474 | { | |
1475 | return rs->md.new_level != rs->md.level; | |
1476 | } | |
1477 | ||
33e53f06 | 1478 | /* Features */ |
9b6e5423 | 1479 | #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */ |
33e53f06 HM |
1480 | |
1481 | /* State flags for sb->flags */ | |
1482 | #define SB_FLAG_RESHAPE_ACTIVE 0x1 | |
1483 | #define SB_FLAG_RESHAPE_BACKWARDS 0x2 | |
1484 | ||
b12d437b JB |
1485 | /* |
1486 | * This structure is never routinely used by userspace, unlike md superblocks. | |
1487 | * Devices with this superblock should only ever be accessed via device-mapper. | |
1488 | */ | |
1489 | #define DM_RAID_MAGIC 0x64526D44 | |
1490 | struct dm_raid_superblock { | |
1491 | __le32 magic; /* "DmRd" */ | |
9b6e5423 | 1492 | __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */ |
b12d437b | 1493 | |
33e53f06 HM |
1494 | __le32 num_devices; /* Number of devices in this raid set. (Max 64) */ |
1495 | __le32 array_position; /* The position of this drive in the raid set */ | |
b12d437b JB |
1496 | |
1497 | __le64 events; /* Incremented by md when superblock updated */ | |
9b6e5423 | 1498 | __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */ |
33e53f06 | 1499 | /* indicate failures (see extension below) */ |
b12d437b JB |
1500 | |
1501 | /* | |
1502 | * This offset tracks the progress of the repair or replacement of | |
1503 | * an individual drive. | |
1504 | */ | |
1505 | __le64 disk_recovery_offset; | |
1506 | ||
1507 | /* | |
33e53f06 | 1508 | * This offset tracks the progress of the initial raid set |
b12d437b JB |
1509 | * synchronisation/parity calculation. |
1510 | */ | |
1511 | __le64 array_resync_offset; | |
1512 | ||
1513 | /* | |
33e53f06 | 1514 | * raid characteristics |
b12d437b JB |
1515 | */ |
1516 | __le32 level; | |
1517 | __le32 layout; | |
1518 | __le32 stripe_sectors; | |
1519 | ||
33e53f06 | 1520 | /******************************************************************** |
9b6e5423 | 1521 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! |
33e53f06 | 1522 | * |
9b6e5423 | 1523 | * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist |
33e53f06 HM |
1524 | */ |
1525 | ||
1526 | __le32 flags; /* Flags defining array states for reshaping */ | |
1527 | ||
1528 | /* | |
1529 | * This offset tracks the progress of a raid | |
1530 | * set reshape in order to be able to restart it | |
1531 | */ | |
1532 | __le64 reshape_position; | |
1533 | ||
1534 | /* | |
1535 | * These define the properties of the array in case of an interrupted reshape | |
1536 | */ | |
1537 | __le32 new_level; | |
1538 | __le32 new_layout; | |
1539 | __le32 new_stripe_sectors; | |
1540 | __le32 delta_disks; | |
1541 | ||
1542 | __le64 array_sectors; /* Array size in sectors */ | |
1543 | ||
1544 | /* | |
1545 | * Sector offsets to data on devices (reshaping). | |
1546 | * Needed to support out of place reshaping, thus | |
1547 | * not writing over any stripes whilst converting | |
1548 | * them from old to new layout | |
1549 | */ | |
1550 | __le64 data_offset; | |
1551 | __le64 new_data_offset; | |
1552 | ||
1553 | __le64 sectors; /* Used device size in sectors */ | |
1554 | ||
1555 | /* | |
1556 | * Additonal Bit field of devices indicating failures to support | |
9b6e5423 | 1557 | * up to 256 devices with the 1.9.0 on-disk metadata format |
33e53f06 HM |
1558 | */ |
1559 | __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1]; | |
1560 | ||
1561 | __le32 incompat_features; /* Used to indicate any incompatible features */ | |
1562 | ||
1563 | /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */ | |
b12d437b JB |
1564 | } __packed; |
1565 | ||
3cb03002 | 1566 | static int read_disk_sb(struct md_rdev *rdev, int size) |
b12d437b JB |
1567 | { |
1568 | BUG_ON(!rdev->sb_page); | |
1569 | ||
1570 | if (rdev->sb_loaded) | |
1571 | return 0; | |
1572 | ||
796a5cf0 | 1573 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { |
0447568f JB |
1574 | DMERR("Failed to read superblock of device at position %d", |
1575 | rdev->raid_disk); | |
c32fb9e7 | 1576 | md_error(rdev->mddev, rdev); |
b12d437b JB |
1577 | return -EINVAL; |
1578 | } | |
1579 | ||
1580 | rdev->sb_loaded = 1; | |
1581 | ||
1582 | return 0; | |
1583 | } | |
1584 | ||
33e53f06 HM |
1585 | static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1586 | { | |
1587 | failed_devices[0] = le64_to_cpu(sb->failed_devices); | |
1588 | memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); | |
1589 | ||
4286325b | 1590 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { |
33e53f06 HM |
1591 | int i = ARRAY_SIZE(sb->extended_failed_devices); |
1592 | ||
1593 | while (i--) | |
1594 | failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]); | |
1595 | } | |
1596 | } | |
1597 | ||
7b34df74 HM |
1598 | static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1599 | { | |
1600 | int i = ARRAY_SIZE(sb->extended_failed_devices); | |
1601 | ||
1602 | sb->failed_devices = cpu_to_le64(failed_devices[0]); | |
1603 | while (i--) | |
1604 | sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]); | |
1605 | } | |
1606 | ||
1607 | /* | |
1608 | * Synchronize the superblock members with the raid set properties | |
1609 | * | |
1610 | * All superblock data is little endian. | |
1611 | */ | |
fd01b88c | 1612 | static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
b12d437b | 1613 | { |
7b34df74 HM |
1614 | bool update_failed_devices = false; |
1615 | unsigned int i; | |
1616 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | |
b12d437b | 1617 | struct dm_raid_superblock *sb; |
81f382f9 | 1618 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
b12d437b | 1619 | |
7b34df74 HM |
1620 | /* No metadata device, no superblock */ |
1621 | if (!rdev->meta_bdev) | |
1622 | return; | |
1623 | ||
1624 | BUG_ON(!rdev->sb_page); | |
1625 | ||
b12d437b | 1626 | sb = page_address(rdev->sb_page); |
b12d437b | 1627 | |
7b34df74 | 1628 | sb_retrieve_failed_devices(sb, failed_devices); |
b12d437b | 1629 | |
7b34df74 HM |
1630 | for (i = 0; i < rs->raid_disks; i++) |
1631 | if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { | |
1632 | update_failed_devices = true; | |
1633 | set_bit(i, (void *) failed_devices); | |
1634 | } | |
1635 | ||
1636 | if (update_failed_devices) | |
1637 | sb_update_failed_devices(sb, failed_devices); | |
b12d437b JB |
1638 | |
1639 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); | |
9b6e5423 | 1640 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); |
b12d437b JB |
1641 | |
1642 | sb->num_devices = cpu_to_le32(mddev->raid_disks); | |
1643 | sb->array_position = cpu_to_le32(rdev->raid_disk); | |
1644 | ||
1645 | sb->events = cpu_to_le64(mddev->events); | |
b12d437b JB |
1646 | |
1647 | sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); | |
1648 | sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); | |
1649 | ||
1650 | sb->level = cpu_to_le32(mddev->level); | |
1651 | sb->layout = cpu_to_le32(mddev->layout); | |
1652 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | |
7b34df74 HM |
1653 | |
1654 | sb->new_level = cpu_to_le32(mddev->new_level); | |
1655 | sb->new_layout = cpu_to_le32(mddev->new_layout); | |
1656 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); | |
1657 | ||
1658 | sb->delta_disks = cpu_to_le32(mddev->delta_disks); | |
1659 | ||
1660 | smp_rmb(); /* Make sure we access most recent reshape position */ | |
1661 | sb->reshape_position = cpu_to_le64(mddev->reshape_position); | |
1662 | if (le64_to_cpu(sb->reshape_position) != MaxSector) { | |
1663 | /* Flag ongoing reshape */ | |
1664 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE); | |
1665 | ||
1666 | if (mddev->delta_disks < 0 || mddev->reshape_backwards) | |
1667 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); | |
4286325b MS |
1668 | } else { |
1669 | /* Clear reshape flags */ | |
1670 | sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS)); | |
1671 | } | |
7b34df74 HM |
1672 | |
1673 | sb->array_sectors = cpu_to_le64(mddev->array_sectors); | |
1674 | sb->data_offset = cpu_to_le64(rdev->data_offset); | |
1675 | sb->new_data_offset = cpu_to_le64(rdev->new_data_offset); | |
1676 | sb->sectors = cpu_to_le64(rdev->sectors); | |
1677 | ||
1678 | /* Zero out the rest of the payload after the size of the superblock */ | |
1679 | memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); | |
b12d437b JB |
1680 | } |
1681 | ||
1682 | /* | |
1683 | * super_load | |
1684 | * | |
1685 | * This function creates a superblock if one is not found on the device | |
1686 | * and will decide which superblock to use if there's a choice. | |
1687 | * | |
1688 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | |
1689 | */ | |
3cb03002 | 1690 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
b12d437b | 1691 | { |
73c6f239 | 1692 | int r; |
b12d437b JB |
1693 | struct dm_raid_superblock *sb; |
1694 | struct dm_raid_superblock *refsb; | |
1695 | uint64_t events_sb, events_refsb; | |
1696 | ||
1697 | rdev->sb_start = 0; | |
40d43c4b HM |
1698 | rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
1699 | if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { | |
1700 | DMERR("superblock size of a logical block is no longer valid"); | |
1701 | return -EINVAL; | |
1702 | } | |
b12d437b | 1703 | |
73c6f239 HM |
1704 | r = read_disk_sb(rdev, rdev->sb_size); |
1705 | if (r) | |
1706 | return r; | |
b12d437b JB |
1707 | |
1708 | sb = page_address(rdev->sb_page); | |
3aa3b2b2 JB |
1709 | |
1710 | /* | |
1711 | * Two cases that we want to write new superblocks and rebuild: | |
1712 | * 1) New device (no matching magic number) | |
1713 | * 2) Device specified for rebuild (!In_sync w/ offset == 0) | |
1714 | */ | |
1715 | if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || | |
1716 | (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { | |
b12d437b JB |
1717 | super_sync(rdev->mddev, rdev); |
1718 | ||
1719 | set_bit(FirstUse, &rdev->flags); | |
9b6e5423 | 1720 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); |
b12d437b JB |
1721 | |
1722 | /* Force writing of superblocks to disk */ | |
1723 | set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); | |
1724 | ||
1725 | /* Any superblock is better than none, choose that if given */ | |
1726 | return refdev ? 0 : 1; | |
1727 | } | |
1728 | ||
1729 | if (!refdev) | |
1730 | return 1; | |
1731 | ||
1732 | events_sb = le64_to_cpu(sb->events); | |
1733 | ||
1734 | refsb = page_address(refdev->sb_page); | |
1735 | events_refsb = le64_to_cpu(refsb->events); | |
1736 | ||
1737 | return (events_sb > events_refsb) ? 1 : 0; | |
1738 | } | |
1739 | ||
33e53f06 | 1740 | static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b JB |
1741 | { |
1742 | int role; | |
33e53f06 HM |
1743 | unsigned int d; |
1744 | struct mddev *mddev = &rs->md; | |
b12d437b | 1745 | uint64_t events_sb; |
33e53f06 | 1746 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; |
b12d437b | 1747 | struct dm_raid_superblock *sb; |
33e53f06 | 1748 | uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0; |
dafb20fa | 1749 | struct md_rdev *r; |
b12d437b JB |
1750 | struct dm_raid_superblock *sb2; |
1751 | ||
1752 | sb = page_address(rdev->sb_page); | |
1753 | events_sb = le64_to_cpu(sb->events); | |
b12d437b JB |
1754 | |
1755 | /* | |
1756 | * Initialise to 1 if this is a new superblock. | |
1757 | */ | |
1758 | mddev->events = events_sb ? : 1; | |
1759 | ||
33e53f06 HM |
1760 | mddev->reshape_position = MaxSector; |
1761 | ||
b12d437b | 1762 | /* |
33e53f06 HM |
1763 | * Reshaping is supported, e.g. reshape_position is valid |
1764 | * in superblock and superblock content is authoritative. | |
b12d437b | 1765 | */ |
4286325b | 1766 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { |
33e53f06 HM |
1767 | /* Superblock is authoritative wrt given raid set layout! */ |
1768 | mddev->raid_disks = le32_to_cpu(sb->num_devices); | |
1769 | mddev->level = le32_to_cpu(sb->level); | |
1770 | mddev->layout = le32_to_cpu(sb->layout); | |
1771 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); | |
1772 | mddev->new_level = le32_to_cpu(sb->new_level); | |
1773 | mddev->new_layout = le32_to_cpu(sb->new_layout); | |
1774 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); | |
1775 | mddev->delta_disks = le32_to_cpu(sb->delta_disks); | |
1776 | mddev->array_sectors = le64_to_cpu(sb->array_sectors); | |
1777 | ||
1778 | /* raid was reshaping and got interrupted */ | |
4286325b MS |
1779 | if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) { |
1780 | if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { | |
33e53f06 HM |
1781 | DMERR("Reshape requested but raid set is still reshaping"); |
1782 | return -EINVAL; | |
1783 | } | |
b12d437b | 1784 | |
33e53f06 | 1785 | if (mddev->delta_disks < 0 || |
4286325b | 1786 | (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) |
33e53f06 HM |
1787 | mddev->reshape_backwards = 1; |
1788 | else | |
1789 | mddev->reshape_backwards = 0; | |
1790 | ||
1791 | mddev->reshape_position = le64_to_cpu(sb->reshape_position); | |
1792 | rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); | |
1793 | } | |
1794 | ||
1795 | } else { | |
1796 | /* | |
9b6e5423 | 1797 | * No takeover/reshaping, because we don't have the extended v1.9.0 metadata |
33e53f06 HM |
1798 | */ |
1799 | if (le32_to_cpu(sb->level) != mddev->level) { | |
1800 | DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); | |
1801 | return -EINVAL; | |
1802 | } | |
1803 | if (le32_to_cpu(sb->layout) != mddev->layout) { | |
1804 | DMERR("Reshaping raid sets not yet supported. (raid layout change)"); | |
43157840 MS |
1805 | DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); |
1806 | DMERR(" Old layout: %s w/ %d copies", | |
33e53f06 HM |
1807 | raid10_md_layout_to_format(le32_to_cpu(sb->layout)), |
1808 | raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); | |
43157840 | 1809 | DMERR(" New layout: %s w/ %d copies", |
33e53f06 HM |
1810 | raid10_md_layout_to_format(mddev->layout), |
1811 | raid10_md_layout_to_copies(mddev->layout)); | |
1812 | return -EINVAL; | |
1813 | } | |
1814 | if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { | |
1815 | DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); | |
1816 | return -EINVAL; | |
1817 | } | |
1818 | ||
1819 | /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */ | |
1820 | if (!rt_is_raid1(rs->raid_type) && | |
1821 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { | |
1822 | DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)", | |
1823 | sb->num_devices, mddev->raid_disks); | |
1824 | return -EINVAL; | |
1825 | } | |
1826 | ||
1827 | /* Table line is checked vs. authoritative superblock */ | |
1828 | rs_set_new(rs); | |
b12d437b JB |
1829 | } |
1830 | ||
4286325b | 1831 | if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) |
b12d437b JB |
1832 | mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); |
1833 | ||
1834 | /* | |
1835 | * During load, we set FirstUse if a new superblock was written. | |
1836 | * There are two reasons we might not have a superblock: | |
33e53f06 | 1837 | * 1) The raid set is brand new - in which case, all of the |
43157840 | 1838 | * devices must have their In_sync bit set. Also, |
b12d437b | 1839 | * recovery_cp must be 0, unless forced. |
33e53f06 | 1840 | * 2) This is a new device being added to an old raid set |
b12d437b JB |
1841 | * and the new device needs to be rebuilt - in which |
1842 | * case the In_sync bit will /not/ be set and | |
1843 | * recovery_cp must be MaxSector. | |
1844 | */ | |
33e53f06 | 1845 | d = 0; |
dafb20fa | 1846 | rdev_for_each(r, mddev) { |
33e53f06 HM |
1847 | if (test_bit(FirstUse, &r->flags)) |
1848 | new_devs++; | |
1849 | ||
b12d437b | 1850 | if (!test_bit(In_sync, &r->flags)) { |
33e53f06 HM |
1851 | DMINFO("Device %d specified for rebuild; clearing superblock", |
1852 | r->raid_disk); | |
b12d437b | 1853 | rebuilds++; |
33e53f06 HM |
1854 | |
1855 | if (test_bit(FirstUse, &r->flags)) | |
1856 | rebuild_and_new++; | |
1857 | } | |
1858 | ||
1859 | d++; | |
b12d437b JB |
1860 | } |
1861 | ||
33e53f06 HM |
1862 | if (new_devs == rs->raid_disks || !rebuilds) { |
1863 | /* Replace a broken device */ | |
1864 | if (new_devs == 1 && !rs->delta_disks) | |
1865 | ; | |
1866 | if (new_devs == rs->raid_disks) { | |
1867 | DMINFO("Superblocks created for new raid set"); | |
b12d437b | 1868 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
4286325b | 1869 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
33e53f06 HM |
1870 | mddev->recovery_cp = 0; |
1871 | } else if (new_devs && new_devs != rs->raid_disks && !rebuilds) { | |
1872 | DMERR("New device injected into existing raid set without " | |
1873 | "'delta_disks' or 'rebuild' parameter specified"); | |
b12d437b JB |
1874 | return -EINVAL; |
1875 | } | |
33e53f06 HM |
1876 | } else if (new_devs && new_devs != rebuilds) { |
1877 | DMERR("%u 'rebuild' devices cannot be injected into" | |
1878 | " a raid set with %u other first-time devices", | |
1879 | rebuilds, new_devs); | |
b12d437b | 1880 | return -EINVAL; |
33e53f06 HM |
1881 | } else if (rebuilds) { |
1882 | if (rebuild_and_new && rebuilds != rebuild_and_new) { | |
1883 | DMERR("new device%s provided without 'rebuild'", | |
1884 | new_devs > 1 ? "s" : ""); | |
1885 | return -EINVAL; | |
1886 | } else if (mddev->recovery_cp != MaxSector) { | |
1887 | DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", | |
1888 | (unsigned long long) mddev->recovery_cp); | |
1889 | return -EINVAL; | |
1890 | } else if (mddev->reshape_position != MaxSector) { | |
1891 | DMERR("'rebuild' specified while raid set is being reshaped"); | |
1892 | return -EINVAL; | |
1893 | } | |
b12d437b JB |
1894 | } |
1895 | ||
1896 | /* | |
1897 | * Now we set the Faulty bit for those devices that are | |
1898 | * recorded in the superblock as failed. | |
1899 | */ | |
33e53f06 | 1900 | sb_retrieve_failed_devices(sb, failed_devices); |
dafb20fa | 1901 | rdev_for_each(r, mddev) { |
b12d437b JB |
1902 | if (!r->sb_page) |
1903 | continue; | |
1904 | sb2 = page_address(r->sb_page); | |
1905 | sb2->failed_devices = 0; | |
33e53f06 | 1906 | memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices)); |
b12d437b JB |
1907 | |
1908 | /* | |
1909 | * Check for any device re-ordering. | |
1910 | */ | |
1911 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { | |
1912 | role = le32_to_cpu(sb2->array_position); | |
33e53f06 HM |
1913 | if (role < 0) |
1914 | continue; | |
1915 | ||
b12d437b | 1916 | if (role != r->raid_disk) { |
33e53f06 HM |
1917 | if (_is_raid10_near(mddev->layout)) { |
1918 | if (mddev->raid_disks % _raid10_near_copies(mddev->layout) || | |
bd83a4c4 MS |
1919 | rs->raid_disks % rs->raid10_copies) { |
1920 | rs->ti->error = | |
1921 | "Cannot change raid10 near set to odd # of devices!"; | |
1922 | return -EINVAL; | |
1923 | } | |
33e53f06 HM |
1924 | |
1925 | sb2->array_position = cpu_to_le32(r->raid_disk); | |
1926 | ||
1927 | } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && | |
bd83a4c4 MS |
1928 | !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && |
1929 | !rt_is_raid1(rs->raid_type)) { | |
1930 | rs->ti->error = "Cannot change device positions in raid set"; | |
1931 | return -EINVAL; | |
1932 | } | |
33e53f06 | 1933 | |
bd83a4c4 | 1934 | DMINFO("raid device #%d now at position #%d", role, r->raid_disk); |
b12d437b JB |
1935 | } |
1936 | ||
1937 | /* | |
1938 | * Partial recovery is performed on | |
1939 | * returning failed devices. | |
1940 | */ | |
33e53f06 | 1941 | if (test_bit(role, (void *) failed_devices)) |
b12d437b JB |
1942 | set_bit(Faulty, &r->flags); |
1943 | } | |
1944 | } | |
1945 | ||
1946 | return 0; | |
1947 | } | |
1948 | ||
0cf45031 | 1949 | static int super_validate(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b | 1950 | { |
0cf45031 | 1951 | struct mddev *mddev = &rs->md; |
33e53f06 HM |
1952 | struct dm_raid_superblock *sb; |
1953 | ||
3a1c1ef2 | 1954 | if (rs_is_raid0(rs) || !rdev->sb_page) |
33e53f06 HM |
1955 | return 0; |
1956 | ||
1957 | sb = page_address(rdev->sb_page); | |
b12d437b JB |
1958 | |
1959 | /* | |
1960 | * If mddev->events is not set, we know we have not yet initialized | |
1961 | * the array. | |
1962 | */ | |
33e53f06 | 1963 | if (!mddev->events && super_init_validation(rs, rdev)) |
b12d437b JB |
1964 | return -EINVAL; |
1965 | ||
9b6e5423 MS |
1966 | if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { |
1967 | rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; | |
1968 | return -EINVAL; | |
1969 | } | |
1970 | ||
1971 | if (sb->incompat_features) { | |
ecbfb9f1 | 1972 | rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; |
4c9971ca HM |
1973 | return -EINVAL; |
1974 | } | |
1975 | ||
0cf45031 | 1976 | /* Enable bitmap creation for RAID levels != 0 */ |
676fa5ad | 1977 | mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); |
0cf45031 HM |
1978 | rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
1979 | ||
33e53f06 HM |
1980 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
1981 | /* Retrieve device size stored in superblock to be prepared for shrink */ | |
1982 | rdev->sectors = le64_to_cpu(sb->sectors); | |
b12d437b | 1983 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
33e53f06 HM |
1984 | if (rdev->recovery_offset == MaxSector) |
1985 | set_bit(In_sync, &rdev->flags); | |
1986 | /* | |
1987 | * If no reshape in progress -> we're recovering single | |
1988 | * disk(s) and have to set the device(s) to out-of-sync | |
1989 | */ | |
1990 | else if (rs->md.reshape_position == MaxSector) | |
1991 | clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */ | |
b12d437b JB |
1992 | } |
1993 | ||
1994 | /* | |
1995 | * If a device comes back, set it as not In_sync and no longer faulty. | |
1996 | */ | |
33e53f06 HM |
1997 | if (test_and_clear_bit(Faulty, &rdev->flags)) { |
1998 | rdev->recovery_offset = 0; | |
b12d437b JB |
1999 | clear_bit(In_sync, &rdev->flags); |
2000 | rdev->saved_raid_disk = rdev->raid_disk; | |
b12d437b JB |
2001 | } |
2002 | ||
33e53f06 HM |
2003 | /* Reshape support -> restore repective data offsets */ |
2004 | rdev->data_offset = le64_to_cpu(sb->data_offset); | |
2005 | rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); | |
b12d437b JB |
2006 | |
2007 | return 0; | |
2008 | } | |
2009 | ||
2010 | /* | |
2011 | * Analyse superblocks and select the freshest. | |
2012 | */ | |
2013 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |
2014 | { | |
73c6f239 | 2015 | int r; |
0447568f | 2016 | struct raid_dev *dev; |
a9ad8526 | 2017 | struct md_rdev *rdev, *tmp, *freshest; |
fd01b88c | 2018 | struct mddev *mddev = &rs->md; |
b12d437b JB |
2019 | |
2020 | freshest = NULL; | |
a9ad8526 | 2021 | rdev_for_each_safe(rdev, tmp, mddev) { |
761becff | 2022 | /* |
c76d53f4 | 2023 | * Skipping super_load due to CTR_FLAG_SYNC will cause |
761becff | 2024 | * the array to undergo initialization again as |
43157840 | 2025 | * though it were new. This is the intended effect |
761becff JB |
2026 | * of the "sync" directive. |
2027 | * | |
2028 | * When reshaping capability is added, we must ensure | |
2029 | * that the "sync" directive is disallowed during the | |
2030 | * reshape. | |
2031 | */ | |
4286325b | 2032 | if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) |
761becff JB |
2033 | continue; |
2034 | ||
b12d437b JB |
2035 | if (!rdev->meta_bdev) |
2036 | continue; | |
2037 | ||
73c6f239 | 2038 | r = super_load(rdev, freshest); |
b12d437b | 2039 | |
73c6f239 | 2040 | switch (r) { |
b12d437b JB |
2041 | case 1: |
2042 | freshest = rdev; | |
2043 | break; | |
2044 | case 0: | |
2045 | break; | |
2046 | default: | |
0447568f | 2047 | dev = container_of(rdev, struct raid_dev, rdev); |
55ebbb59 JB |
2048 | if (dev->meta_dev) |
2049 | dm_put_device(ti, dev->meta_dev); | |
0447568f | 2050 | |
55ebbb59 JB |
2051 | dev->meta_dev = NULL; |
2052 | rdev->meta_bdev = NULL; | |
0447568f | 2053 | |
55ebbb59 JB |
2054 | if (rdev->sb_page) |
2055 | put_page(rdev->sb_page); | |
0447568f | 2056 | |
55ebbb59 | 2057 | rdev->sb_page = NULL; |
0447568f | 2058 | |
55ebbb59 | 2059 | rdev->sb_loaded = 0; |
0447568f | 2060 | |
55ebbb59 JB |
2061 | /* |
2062 | * We might be able to salvage the data device | |
2063 | * even though the meta device has failed. For | |
2064 | * now, we behave as though '- -' had been | |
2065 | * set for this device in the table. | |
2066 | */ | |
2067 | if (dev->data_dev) | |
2068 | dm_put_device(ti, dev->data_dev); | |
0447568f | 2069 | |
55ebbb59 JB |
2070 | dev->data_dev = NULL; |
2071 | rdev->bdev = NULL; | |
0447568f | 2072 | |
55ebbb59 | 2073 | list_del(&rdev->same_set); |
b12d437b JB |
2074 | } |
2075 | } | |
2076 | ||
2077 | if (!freshest) | |
2078 | return 0; | |
2079 | ||
bd83a4c4 MS |
2080 | if (validate_raid_redundancy(rs)) { |
2081 | rs->ti->error = "Insufficient redundancy to activate array"; | |
2082 | return -EINVAL; | |
2083 | } | |
55ebbb59 | 2084 | |
b12d437b JB |
2085 | /* |
2086 | * Validation of the freshest device provides the source of | |
2087 | * validation for the remaining devices. | |
2088 | */ | |
bd83a4c4 MS |
2089 | if (super_validate(rs, freshest)) { |
2090 | rs->ti->error = "Unable to assemble array: Invalid superblocks"; | |
2091 | return -EINVAL; | |
2092 | } | |
b12d437b | 2093 | |
dafb20fa | 2094 | rdev_for_each(rdev, mddev) |
0cf45031 | 2095 | if ((rdev != freshest) && super_validate(rs, rdev)) |
b12d437b JB |
2096 | return -EINVAL; |
2097 | ||
2098 | return 0; | |
2099 | } | |
2100 | ||
ecbfb9f1 HM |
2101 | /* Userpace reordered disks -> adjust raid_disk indexes in @rs */ |
2102 | static void _reorder_raid_disk_indexes(struct raid_set *rs) | |
2103 | { | |
2104 | int i = 0; | |
2105 | struct md_rdev *rdev; | |
2106 | ||
2107 | rdev_for_each(rdev, &rs->md) { | |
2108 | rdev->raid_disk = i++; | |
2109 | rdev->saved_raid_disk = rdev->new_raid_disk = -1; | |
2110 | } | |
2111 | } | |
2112 | ||
2113 | /* | |
2114 | * Setup @rs for takeover by a different raid level | |
2115 | */ | |
2116 | static int rs_setup_takeover(struct raid_set *rs) | |
2117 | { | |
2118 | struct mddev *mddev = &rs->md; | |
2119 | struct md_rdev *rdev; | |
2120 | unsigned int d = mddev->raid_disks = rs->raid_disks; | |
2121 | sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; | |
2122 | ||
2123 | if (rt_is_raid10(rs->raid_type)) { | |
2124 | if (mddev->level == 0) { | |
2125 | /* Userpace reordered disks -> adjust raid_disk indexes */ | |
2126 | _reorder_raid_disk_indexes(rs); | |
2127 | ||
2128 | /* raid0 -> raid10_far layout */ | |
2129 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, | |
2130 | rs->raid10_copies); | |
2131 | } else if (mddev->level == 1) | |
2132 | /* raid1 -> raid10_near layout */ | |
2133 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, | |
2134 | rs->raid_disks); | |
2135 | else | |
2136 | return -EINVAL; | |
2137 | ||
2138 | } | |
2139 | ||
2140 | clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2141 | mddev->recovery_cp = MaxSector; | |
2142 | ||
2143 | while (d--) { | |
2144 | rdev = &rs->dev[d].rdev; | |
2145 | ||
2146 | if (test_bit(d, (void *) rs->rebuild_disks)) { | |
2147 | clear_bit(In_sync, &rdev->flags); | |
2148 | clear_bit(Faulty, &rdev->flags); | |
2149 | mddev->recovery_cp = rdev->recovery_offset = 0; | |
2150 | /* Bitmap has to be created when we do an "up" takeover */ | |
2151 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2152 | } | |
2153 | ||
2154 | rdev->new_data_offset = new_data_offset; | |
2155 | } | |
2156 | ||
ecbfb9f1 HM |
2157 | return 0; |
2158 | } | |
2159 | ||
75b8e04b | 2160 | /* |
48cf06bc HM |
2161 | * Enable/disable discard support on RAID set depending on |
2162 | * RAID level and discard properties of underlying RAID members. | |
75b8e04b | 2163 | */ |
ecbfb9f1 | 2164 | static void configure_discard_support(struct raid_set *rs) |
75b8e04b | 2165 | { |
48cf06bc HM |
2166 | int i; |
2167 | bool raid456; | |
ecbfb9f1 | 2168 | struct dm_target *ti = rs->ti; |
48cf06bc | 2169 | |
75b8e04b HM |
2170 | /* Assume discards not supported until after checks below. */ |
2171 | ti->discards_supported = false; | |
2172 | ||
2173 | /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ | |
48cf06bc | 2174 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); |
75b8e04b | 2175 | |
48cf06bc | 2176 | for (i = 0; i < rs->md.raid_disks; i++) { |
d20c4b08 | 2177 | struct request_queue *q; |
48cf06bc | 2178 | |
d20c4b08 HM |
2179 | if (!rs->dev[i].rdev.bdev) |
2180 | continue; | |
2181 | ||
2182 | q = bdev_get_queue(rs->dev[i].rdev.bdev); | |
48cf06bc HM |
2183 | if (!q || !blk_queue_discard(q)) |
2184 | return; | |
2185 | ||
2186 | if (raid456) { | |
2187 | if (!q->limits.discard_zeroes_data) | |
2188 | return; | |
2189 | if (!devices_handle_discard_safely) { | |
2190 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); | |
2191 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); | |
2192 | return; | |
2193 | } | |
2194 | } | |
2195 | } | |
2196 | ||
2197 | /* All RAID members properly support discards */ | |
75b8e04b HM |
2198 | ti->discards_supported = true; |
2199 | ||
2200 | /* | |
2201 | * RAID1 and RAID10 personalities require bio splitting, | |
48cf06bc | 2202 | * RAID0/4/5/6 don't and process large discard bios properly. |
75b8e04b | 2203 | */ |
48cf06bc | 2204 | ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); |
75b8e04b HM |
2205 | ti->num_discard_bios = 1; |
2206 | } | |
2207 | ||
9d09e663 | 2208 | /* |
73c6f239 | 2209 | * Construct a RAID0/1/10/4/5/6 mapping: |
9d09e663 | 2210 | * Args: |
43157840 MS |
2211 | * <raid_type> <#raid_params> <raid_params>{0,} \ |
2212 | * <#raid_devs> [<meta_dev1> <dev1>]{1,} | |
9d09e663 | 2213 | * |
43157840 | 2214 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for |
9d09e663 | 2215 | * details on possible <raid_params>. |
73c6f239 HM |
2216 | * |
2217 | * Userspace is free to initialize the metadata devices, hence the superblocks to | |
2218 | * enforce recreation based on the passed in table parameters. | |
2219 | * | |
9d09e663 N |
2220 | */ |
2221 | static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |
2222 | { | |
73c6f239 | 2223 | int r; |
9d09e663 | 2224 | struct raid_type *rt; |
92c83d79 | 2225 | unsigned num_raid_params, num_raid_devs; |
9d09e663 | 2226 | struct raid_set *rs = NULL; |
92c83d79 HM |
2227 | const char *arg; |
2228 | struct dm_arg_set as = { argc, argv }, as_nrd; | |
2229 | struct dm_arg _args[] = { | |
2230 | { 0, as.argc, "Cannot understand number of raid parameters" }, | |
2231 | { 1, 254, "Cannot understand number of raid devices parameters" } | |
2232 | }; | |
2233 | ||
2234 | /* Must have <raid_type> */ | |
2235 | arg = dm_shift_arg(&as); | |
bd83a4c4 MS |
2236 | if (!arg) { |
2237 | ti->error = "No arguments"; | |
2238 | return -EINVAL; | |
2239 | } | |
9d09e663 | 2240 | |
92c83d79 | 2241 | rt = get_raid_type(arg); |
bd83a4c4 MS |
2242 | if (!rt) { |
2243 | ti->error = "Unrecognised raid_type"; | |
2244 | return -EINVAL; | |
2245 | } | |
9d09e663 | 2246 | |
92c83d79 HM |
2247 | /* Must have <#raid_params> */ |
2248 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) | |
43157840 | 2249 | return -EINVAL; |
9d09e663 | 2250 | |
92c83d79 HM |
2251 | /* number of raid device tupples <meta_dev data_dev> */ |
2252 | as_nrd = as; | |
2253 | dm_consume_args(&as_nrd, num_raid_params); | |
2254 | _args[1].max = (as_nrd.argc - 1) / 2; | |
2255 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) | |
43157840 | 2256 | return -EINVAL; |
9d09e663 | 2257 | |
bb91a63f | 2258 | if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) { |
bd83a4c4 MS |
2259 | ti->error = "Invalid number of supplied raid devices"; |
2260 | return -EINVAL; | |
2261 | } | |
3ca5a21a | 2262 | |
92c83d79 | 2263 | rs = context_alloc(ti, rt, num_raid_devs); |
9d09e663 N |
2264 | if (IS_ERR(rs)) |
2265 | return PTR_ERR(rs); | |
2266 | ||
92c83d79 | 2267 | r = parse_raid_params(rs, &as, num_raid_params); |
73c6f239 | 2268 | if (r) |
9d09e663 N |
2269 | goto bad; |
2270 | ||
702108d1 | 2271 | r = parse_dev_params(rs, &as); |
73c6f239 | 2272 | if (r) |
9d09e663 N |
2273 | goto bad; |
2274 | ||
b12d437b | 2275 | rs->md.sync_super = super_sync; |
ecbfb9f1 HM |
2276 | |
2277 | /* | |
2278 | * Backup any new raid set level, layout, ... | |
2279 | * requested to be able to compare to superblock | |
2280 | * members for conversion decisions. | |
2281 | */ | |
2282 | rs_config_backup(rs); | |
2283 | ||
73c6f239 HM |
2284 | r = analyse_superblocks(ti, rs); |
2285 | if (r) | |
b12d437b JB |
2286 | goto bad; |
2287 | ||
9d09e663 | 2288 | INIT_WORK(&rs->md.event_work, do_table_event); |
9d09e663 | 2289 | ti->private = rs; |
55a62eef | 2290 | ti->num_flush_bios = 1; |
9d09e663 | 2291 | |
ecbfb9f1 HM |
2292 | /* Restore any requested new layout for conversion decision */ |
2293 | rs_config_restore(rs); | |
2294 | ||
75b8e04b | 2295 | /* |
ecbfb9f1 HM |
2296 | * If a takeover is needed, just set the level to |
2297 | * the new requested one and allow the raid set to run. | |
75b8e04b | 2298 | */ |
ecbfb9f1 HM |
2299 | if (rs_takeover_requested(rs)) { |
2300 | r = rs_check_takeover(rs); | |
2301 | if (r) | |
2302 | return r; | |
2303 | ||
2304 | r = rs_setup_takeover(rs); | |
2305 | if (r) | |
2306 | return r; | |
2307 | ||
3a1c1ef2 | 2308 | /* Tell preresume to update superblocks with new layout */ |
4286325b | 2309 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
3a1c1ef2 HM |
2310 | rs_set_new(rs); |
2311 | } else | |
2312 | rs_set_cur(rs); | |
ecbfb9f1 HM |
2313 | |
2314 | /* Start raid set read-only and assumed clean to change in raid_resume() */ | |
2315 | rs->md.ro = 1; | |
2316 | rs->md.in_sync = 1; | |
2317 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); | |
75b8e04b | 2318 | |
0cf45031 HM |
2319 | /* Has to be held on running the array */ |
2320 | mddev_lock_nointr(&rs->md); | |
73c6f239 | 2321 | r = md_run(&rs->md); |
9d09e663 | 2322 | rs->md.in_sync = 0; /* Assume already marked dirty */ |
0cf45031 | 2323 | mddev_unlock(&rs->md); |
9d09e663 | 2324 | |
73c6f239 | 2325 | if (r) { |
9d09e663 N |
2326 | ti->error = "Fail to run raid array"; |
2327 | goto bad; | |
2328 | } | |
2329 | ||
63f33b8d | 2330 | if (ti->len != rs->md.array_sectors) { |
bd83a4c4 MS |
2331 | ti->error = "Array size does not match requested target length"; |
2332 | r = -EINVAL; | |
63f33b8d JB |
2333 | goto size_mismatch; |
2334 | } | |
9d09e663 | 2335 | rs->callbacks.congested_fn = raid_is_congested; |
9d09e663 N |
2336 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
2337 | ||
32737279 | 2338 | mddev_suspend(&rs->md); |
9d09e663 N |
2339 | return 0; |
2340 | ||
63f33b8d JB |
2341 | size_mismatch: |
2342 | md_stop(&rs->md); | |
9d09e663 N |
2343 | bad: |
2344 | context_free(rs); | |
2345 | ||
73c6f239 | 2346 | return r; |
9d09e663 N |
2347 | } |
2348 | ||
2349 | static void raid_dtr(struct dm_target *ti) | |
2350 | { | |
2351 | struct raid_set *rs = ti->private; | |
2352 | ||
2353 | list_del_init(&rs->callbacks.list); | |
2354 | md_stop(&rs->md); | |
2355 | context_free(rs); | |
2356 | } | |
2357 | ||
7de3ee57 | 2358 | static int raid_map(struct dm_target *ti, struct bio *bio) |
9d09e663 N |
2359 | { |
2360 | struct raid_set *rs = ti->private; | |
fd01b88c | 2361 | struct mddev *mddev = &rs->md; |
9d09e663 N |
2362 | |
2363 | mddev->pers->make_request(mddev, bio); | |
2364 | ||
2365 | return DM_MAPIO_SUBMITTED; | |
2366 | } | |
2367 | ||
3a1c1ef2 | 2368 | /* Return string describing the current sync action of @mddev */ |
be83651f JB |
2369 | static const char *decipher_sync_action(struct mddev *mddev) |
2370 | { | |
2371 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | |
2372 | return "frozen"; | |
2373 | ||
2374 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2375 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { | |
2376 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | |
2377 | return "reshape"; | |
2378 | ||
2379 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | |
2380 | if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | |
2381 | return "resync"; | |
2382 | else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) | |
2383 | return "check"; | |
2384 | return "repair"; | |
2385 | } | |
2386 | ||
2387 | if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) | |
2388 | return "recover"; | |
2389 | } | |
2390 | ||
2391 | return "idle"; | |
2392 | } | |
2393 | ||
3a1c1ef2 HM |
2394 | /* |
2395 | * Return status string @rdev | |
2396 | * | |
2397 | * Status characters: | |
2398 | * | |
2399 | * 'D' = Dead/Failed device | |
2400 | * 'a' = Alive but not in-sync | |
2401 | * 'A' = Alive and in-sync | |
2402 | */ | |
2403 | static const char *_raid_dev_status(struct md_rdev *rdev, bool array_in_sync) | |
9d09e663 | 2404 | { |
3a1c1ef2 HM |
2405 | if (test_bit(Faulty, &rdev->flags)) |
2406 | return "D"; | |
2407 | else if (!array_in_sync || !test_bit(In_sync, &rdev->flags)) | |
2408 | return "a"; | |
2409 | else | |
2410 | return "A"; | |
2411 | } | |
9d09e663 | 2412 | |
3a1c1ef2 HM |
2413 | /* Helper to return resync/reshape progress for @rs and @array_in_sync */ |
2414 | static sector_t rs_get_progress(struct raid_set *rs, | |
2415 | sector_t resync_max_sectors, bool *array_in_sync) | |
2416 | { | |
2417 | sector_t r, recovery_cp, curr_resync_completed; | |
2418 | struct mddev *mddev = &rs->md; | |
9d09e663 | 2419 | |
3a1c1ef2 HM |
2420 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; |
2421 | recovery_cp = mddev->recovery_cp; | |
2422 | *array_in_sync = false; | |
2423 | ||
2424 | if (rs_is_raid0(rs)) { | |
2425 | r = resync_max_sectors; | |
2426 | *array_in_sync = true; | |
2427 | ||
2428 | } else { | |
2429 | r = mddev->reshape_position; | |
2430 | ||
2431 | /* Reshape is relative to the array size */ | |
2432 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || | |
2433 | r != MaxSector) { | |
2434 | if (r == MaxSector) { | |
2435 | *array_in_sync = true; | |
2436 | r = resync_max_sectors; | |
0cf45031 | 2437 | } else { |
3a1c1ef2 HM |
2438 | /* Got to reverse on backward reshape */ |
2439 | if (mddev->reshape_backwards) | |
2440 | r = mddev->array_sectors - r; | |
2441 | ||
2442 | /* Devide by # of data stripes */ | |
2443 | sector_div(r, mddev_data_stripes(rs)); | |
0cf45031 | 2444 | } |
3a1c1ef2 HM |
2445 | |
2446 | /* Sync is relative to the component device size */ | |
2447 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | |
2448 | r = curr_resync_completed; | |
2449 | else | |
2450 | r = recovery_cp; | |
2451 | ||
2452 | if (r == MaxSector) { | |
2453 | /* | |
2454 | * Sync complete. | |
2455 | */ | |
2456 | *array_in_sync = true; | |
2457 | r = resync_max_sectors; | |
2458 | } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | |
2459 | /* | |
2460 | * If "check" or "repair" is occurring, the raid set has | |
2461 | * undergone an initial sync and the health characters | |
2462 | * should not be 'a' anymore. | |
2463 | */ | |
2464 | *array_in_sync = true; | |
0cf45031 | 2465 | } else { |
3a1c1ef2 | 2466 | struct md_rdev *rdev; |
be83651f | 2467 | |
3a1c1ef2 HM |
2468 | /* |
2469 | * The raid set may be doing an initial sync, or it may | |
43157840 | 2470 | * be rebuilding individual components. If all the |
3a1c1ef2 HM |
2471 | * devices are In_sync, then it is the raid set that is |
2472 | * being initialized. | |
2473 | */ | |
2474 | rdev_for_each(rdev, mddev) | |
2475 | if (!test_bit(In_sync, &rdev->flags)) | |
2476 | *array_in_sync = true; | |
2477 | #if 0 | |
2478 | r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */ | |
2479 | #endif | |
2e727c3c | 2480 | } |
3a1c1ef2 HM |
2481 | } |
2482 | ||
2483 | return r; | |
2484 | } | |
2485 | ||
2486 | /* Helper to return @dev name or "-" if !@dev */ | |
2487 | static const char *_get_dev_name(struct dm_dev *dev) | |
2488 | { | |
2489 | return dev ? dev->name : "-"; | |
2490 | } | |
2491 | ||
2492 | static void raid_status(struct dm_target *ti, status_type_t type, | |
2493 | unsigned int status_flags, char *result, unsigned int maxlen) | |
2494 | { | |
2495 | struct raid_set *rs = ti->private; | |
2496 | struct mddev *mddev = &rs->md; | |
2497 | struct r5conf *conf = mddev->private; | |
2498 | int max_nr_stripes = conf ? conf->max_nr_stripes : 0; | |
2499 | bool array_in_sync; | |
2500 | unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ | |
2501 | unsigned int sz = 0; | |
2502 | unsigned int write_mostly_params = 0; | |
2503 | sector_t progress, resync_max_sectors, resync_mismatches; | |
2504 | const char *sync_action; | |
2505 | struct raid_type *rt; | |
2506 | struct md_rdev *rdev; | |
2507 | ||
2508 | switch (type) { | |
2509 | case STATUSTYPE_INFO: | |
2510 | /* *Should* always succeed */ | |
2511 | rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); | |
2512 | if (!rt) | |
2513 | return; | |
2514 | ||
2515 | DMEMIT("%s %d ", rt ? rt->name : "unknown", mddev->raid_disks); | |
2516 | ||
2517 | /* Access most recent mddev properties for status output */ | |
2518 | smp_rmb(); | |
2519 | /* Get sensible max sectors even if raid set not yet started */ | |
4286325b | 2520 | resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? |
3a1c1ef2 HM |
2521 | mddev->resync_max_sectors : mddev->dev_sectors; |
2522 | progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); | |
2523 | resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? | |
2524 | (unsigned int) atomic64_read(&mddev->resync_mismatches) : 0; | |
2525 | sync_action = decipher_sync_action(&rs->md); | |
2526 | ||
2527 | /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */ | |
2528 | rdev_for_each(rdev, mddev) | |
2529 | DMEMIT(_raid_dev_status(rdev, array_in_sync)); | |
9d09e663 | 2530 | |
2e727c3c | 2531 | /* |
3a1c1ef2 | 2532 | * In-sync/Reshape ratio: |
2e727c3c | 2533 | * The in-sync ratio shows the progress of: |
3a1c1ef2 HM |
2534 | * - Initializing the raid set |
2535 | * - Rebuilding a subset of devices of the raid set | |
2e727c3c JB |
2536 | * The user can distinguish between the two by referring |
2537 | * to the status characters. | |
3a1c1ef2 HM |
2538 | * |
2539 | * The reshape ratio shows the progress of | |
2540 | * changing the raid layout or the number of | |
2541 | * disks of a raid set | |
2e727c3c | 2542 | */ |
3a1c1ef2 HM |
2543 | DMEMIT(" %llu/%llu", (unsigned long long) progress, |
2544 | (unsigned long long) resync_max_sectors); | |
9d09e663 | 2545 | |
be83651f | 2546 | /* |
3a1c1ef2 HM |
2547 | * v1.5.0+: |
2548 | * | |
be83651f | 2549 | * Sync action: |
3a1c1ef2 | 2550 | * See Documentation/device-mapper/dm-raid.txt for |
be83651f JB |
2551 | * information on each of these states. |
2552 | */ | |
3a1c1ef2 | 2553 | DMEMIT(" %s", sync_action); |
be83651f JB |
2554 | |
2555 | /* | |
3a1c1ef2 HM |
2556 | * v1.5.0+: |
2557 | * | |
be83651f JB |
2558 | * resync_mismatches/mismatch_cnt |
2559 | * This field shows the number of discrepancies found when | |
3a1c1ef2 | 2560 | * performing a "check" of the raid set. |
be83651f | 2561 | */ |
3a1c1ef2 | 2562 | DMEMIT(" %llu", (unsigned long long) resync_mismatches); |
9d09e663 | 2563 | |
3a1c1ef2 | 2564 | /* |
9b6e5423 | 2565 | * v1.9.0+: |
3a1c1ef2 HM |
2566 | * |
2567 | * data_offset (needed for out of space reshaping) | |
2568 | * This field shows the data offset into the data | |
2569 | * image LV where the first stripes data starts. | |
2570 | * | |
2571 | * We keep data_offset equal on all raid disks of the set, | |
2572 | * so retrieving it from the first raid disk is sufficient. | |
2573 | */ | |
2574 | DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); | |
2575 | break; | |
9d09e663 | 2576 | |
3a1c1ef2 HM |
2577 | case STATUSTYPE_TABLE: |
2578 | /* Report the table line string you would use to construct this raid set */ | |
2579 | ||
2580 | /* Calculate raid parameter count */ | |
2581 | rdev_for_each(rdev, mddev) | |
2582 | if (test_bit(WriteMostly, &rdev->flags)) | |
2583 | write_mostly_params += 2; | |
2584 | raid_param_cnt += memweight(rs->rebuild_disks, | |
2585 | DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks)) * 2 + | |
2586 | write_mostly_params + | |
2587 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + | |
2588 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; | |
2589 | /* Emit table line */ | |
2590 | DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); | |
4286325b | 2591 | if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) |
3fa6cf38 | 2592 | DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT), |
3a1c1ef2 | 2593 | raid10_md_layout_to_format(mddev->layout)); |
4286325b | 2594 | if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) |
3fa6cf38 | 2595 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES), |
3a1c1ef2 | 2596 | raid10_md_layout_to_copies(mddev->layout)); |
4286325b | 2597 | if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) |
3fa6cf38 | 2598 | DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC)); |
4286325b | 2599 | if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) |
3fa6cf38 | 2600 | DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC)); |
4286325b | 2601 | if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) |
3fa6cf38 | 2602 | DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE), |
3a1c1ef2 | 2603 | (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); |
4286325b | 2604 | if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) |
3fa6cf38 | 2605 | DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET), |
3a1c1ef2 | 2606 | (unsigned long long) rs->data_offset); |
4286325b | 2607 | if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) |
3fa6cf38 | 2608 | DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP), |
3a1c1ef2 | 2609 | mddev->bitmap_info.daemon_sleep); |
4286325b | 2610 | if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) |
3fa6cf38 | 2611 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS), |
3a1c1ef2 | 2612 | mddev->delta_disks); |
4286325b | 2613 | if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) |
3fa6cf38 | 2614 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE), |
3a1c1ef2 HM |
2615 | max_nr_stripes); |
2616 | rdev_for_each(rdev, mddev) | |
2617 | if (test_bit(rdev->raid_disk, (void *) rs->rebuild_disks)) | |
3fa6cf38 | 2618 | DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), |
3a1c1ef2 HM |
2619 | rdev->raid_disk); |
2620 | rdev_for_each(rdev, mddev) | |
2621 | if (test_bit(WriteMostly, &rdev->flags)) | |
3fa6cf38 | 2622 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY), |
3a1c1ef2 | 2623 | rdev->raid_disk); |
4286325b | 2624 | if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) |
3fa6cf38 | 2625 | DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND), |
3a1c1ef2 | 2626 | mddev->bitmap_info.max_write_behind); |
4286325b | 2627 | if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) |
3fa6cf38 | 2628 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE), |
3a1c1ef2 | 2629 | mddev->sync_speed_max); |
4286325b | 2630 | if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) |
3fa6cf38 | 2631 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE), |
3a1c1ef2 HM |
2632 | mddev->sync_speed_min); |
2633 | DMEMIT(" %d", rs->raid_disks); | |
2634 | rdev_for_each(rdev, mddev) { | |
2635 | struct raid_dev *rd = container_of(rdev, struct raid_dev, rdev); | |
2636 | ||
2637 | DMEMIT(" %s %s", _get_dev_name(rd->meta_dev), | |
2638 | _get_dev_name(rd->data_dev)); | |
9d09e663 N |
2639 | } |
2640 | } | |
9d09e663 N |
2641 | } |
2642 | ||
be83651f JB |
2643 | static int raid_message(struct dm_target *ti, unsigned argc, char **argv) |
2644 | { | |
2645 | struct raid_set *rs = ti->private; | |
2646 | struct mddev *mddev = &rs->md; | |
2647 | ||
2648 | if (!strcasecmp(argv[0], "reshape")) { | |
2649 | DMERR("Reshape not supported."); | |
2650 | return -EINVAL; | |
2651 | } | |
2652 | ||
2653 | if (!mddev->pers || !mddev->pers->sync_request) | |
2654 | return -EINVAL; | |
2655 | ||
2656 | if (!strcasecmp(argv[0], "frozen")) | |
2657 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2658 | else | |
2659 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2660 | ||
2661 | if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { | |
2662 | if (mddev->sync_thread) { | |
2663 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | |
2664 | md_reap_sync_thread(mddev); | |
2665 | } | |
2666 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2667 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | |
2668 | return -EBUSY; | |
2669 | else if (!strcasecmp(argv[0], "resync")) | |
3a1c1ef2 HM |
2670 | ; /* MD_RECOVERY_NEEDED set below */ |
2671 | else if (!strcasecmp(argv[0], "recover")) | |
be83651f | 2672 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
3a1c1ef2 | 2673 | else { |
be83651f JB |
2674 | if (!strcasecmp(argv[0], "check")) |
2675 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); | |
2676 | else if (!!strcasecmp(argv[0], "repair")) | |
2677 | return -EINVAL; | |
2678 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | |
2679 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | |
2680 | } | |
2681 | if (mddev->ro == 2) { | |
2682 | /* A write to sync_action is enough to justify | |
2683 | * canceling read-auto mode | |
2684 | */ | |
2685 | mddev->ro = 0; | |
3a1c1ef2 | 2686 | if (!mddev->suspended && mddev->sync_thread) |
be83651f JB |
2687 | md_wakeup_thread(mddev->sync_thread); |
2688 | } | |
2689 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
3a1c1ef2 | 2690 | if (!mddev->suspended && mddev->thread) |
be83651f JB |
2691 | md_wakeup_thread(mddev->thread); |
2692 | ||
2693 | return 0; | |
2694 | } | |
2695 | ||
2696 | static int raid_iterate_devices(struct dm_target *ti, | |
2697 | iterate_devices_callout_fn fn, void *data) | |
9d09e663 N |
2698 | { |
2699 | struct raid_set *rs = ti->private; | |
2700 | unsigned i; | |
73c6f239 | 2701 | int r = 0; |
9d09e663 | 2702 | |
73c6f239 | 2703 | for (i = 0; !r && i < rs->md.raid_disks; i++) |
9d09e663 | 2704 | if (rs->dev[i].data_dev) |
73c6f239 | 2705 | r = fn(ti, |
9d09e663 N |
2706 | rs->dev[i].data_dev, |
2707 | 0, /* No offset on data devs */ | |
2708 | rs->md.dev_sectors, | |
2709 | data); | |
2710 | ||
73c6f239 | 2711 | return r; |
9d09e663 N |
2712 | } |
2713 | ||
2714 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
2715 | { | |
2716 | struct raid_set *rs = ti->private; | |
2717 | unsigned chunk_size = rs->md.chunk_sectors << 9; | |
d1688a6d | 2718 | struct r5conf *conf = rs->md.private; |
9d09e663 N |
2719 | |
2720 | blk_limits_io_min(limits, chunk_size); | |
2721 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); | |
2722 | } | |
2723 | ||
2724 | static void raid_presuspend(struct dm_target *ti) | |
2725 | { | |
2726 | struct raid_set *rs = ti->private; | |
2727 | ||
2728 | md_stop_writes(&rs->md); | |
2729 | } | |
2730 | ||
2731 | static void raid_postsuspend(struct dm_target *ti) | |
2732 | { | |
2733 | struct raid_set *rs = ti->private; | |
2734 | ||
2735 | mddev_suspend(&rs->md); | |
2736 | } | |
2737 | ||
f381e71b | 2738 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
9d09e663 | 2739 | { |
9092c02d JB |
2740 | int i; |
2741 | uint64_t failed_devices, cleared_failed_devices = 0; | |
2742 | unsigned long flags; | |
2743 | struct dm_raid_superblock *sb; | |
9092c02d | 2744 | struct md_rdev *r; |
9d09e663 | 2745 | |
f381e71b JB |
2746 | for (i = 0; i < rs->md.raid_disks; i++) { |
2747 | r = &rs->dev[i].rdev; | |
2748 | if (test_bit(Faulty, &r->flags) && r->sb_page && | |
796a5cf0 MC |
2749 | sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, |
2750 | 1)) { | |
f381e71b JB |
2751 | DMINFO("Faulty %s device #%d has readable super block." |
2752 | " Attempting to revive it.", | |
2753 | rs->raid_type->name, i); | |
a4dc163a JB |
2754 | |
2755 | /* | |
2756 | * Faulty bit may be set, but sometimes the array can | |
2757 | * be suspended before the personalities can respond | |
2758 | * by removing the device from the array (i.e. calling | |
43157840 | 2759 | * 'hot_remove_disk'). If they haven't yet removed |
a4dc163a JB |
2760 | * the failed device, its 'raid_disk' number will be |
2761 | * '>= 0' - meaning we must call this function | |
2762 | * ourselves. | |
2763 | */ | |
2764 | if ((r->raid_disk >= 0) && | |
2765 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | |
2766 | /* Failed to revive this device, try next */ | |
2767 | continue; | |
2768 | ||
f381e71b JB |
2769 | r->raid_disk = i; |
2770 | r->saved_raid_disk = i; | |
2771 | flags = r->flags; | |
2772 | clear_bit(Faulty, &r->flags); | |
2773 | clear_bit(WriteErrorSeen, &r->flags); | |
2774 | clear_bit(In_sync, &r->flags); | |
2775 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | |
2776 | r->raid_disk = -1; | |
2777 | r->saved_raid_disk = -1; | |
2778 | r->flags = flags; | |
2779 | } else { | |
2780 | r->recovery_offset = 0; | |
2781 | cleared_failed_devices |= 1 << i; | |
2782 | } | |
2783 | } | |
2784 | } | |
2785 | if (cleared_failed_devices) { | |
2786 | rdev_for_each(r, &rs->md) { | |
2787 | sb = page_address(r->sb_page); | |
2788 | failed_devices = le64_to_cpu(sb->failed_devices); | |
2789 | failed_devices &= ~cleared_failed_devices; | |
2790 | sb->failed_devices = cpu_to_le64(failed_devices); | |
2791 | } | |
2792 | } | |
2793 | } | |
2794 | ||
ecbfb9f1 HM |
2795 | /* Load the dirty region bitmap */ |
2796 | static int _bitmap_load(struct raid_set *rs) | |
2797 | { | |
2798 | int r = 0; | |
2799 | ||
2800 | /* Try loading the bitmap unless "raid0", which does not have one */ | |
2801 | if (!rs_is_raid0(rs) && | |
4286325b | 2802 | !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { |
ecbfb9f1 HM |
2803 | r = bitmap_load(&rs->md); |
2804 | if (r) | |
2805 | DMERR("Failed to load bitmap"); | |
2806 | } | |
2807 | ||
2808 | return r; | |
2809 | } | |
2810 | ||
2811 | static int raid_preresume(struct dm_target *ti) | |
2812 | { | |
2813 | struct raid_set *rs = ti->private; | |
2814 | struct mddev *mddev = &rs->md; | |
2815 | ||
2816 | /* This is a resume after a suspend of the set -> it's already started */ | |
4286325b | 2817 | if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) |
ecbfb9f1 HM |
2818 | return 0; |
2819 | ||
2820 | /* | |
2821 | * The superblocks need to be updated on disk if the | |
2822 | * array is new or _bitmap_load will overwrite them | |
2823 | * in core with old data. | |
2824 | * | |
2825 | * In case the array got modified (takeover/reshape/resize) | |
2826 | * or the data offsets on the component devices changed, they | |
2827 | * have to be updated as well. | |
2828 | * | |
2829 | * Have to switch to readwrite and back in order to | |
2830 | * allow for the superblock updates. | |
2831 | */ | |
4286325b | 2832 | if (test_and_clear_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) { |
ecbfb9f1 HM |
2833 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
2834 | mddev->ro = 0; | |
2835 | md_update_sb(mddev, 1); | |
2836 | mddev->ro = 1; | |
2837 | } | |
2838 | ||
2839 | /* | |
2840 | * Disable/enable discard support on raid set after any | |
2841 | * conversion, because devices can have been added | |
2842 | */ | |
2843 | configure_discard_support(rs); | |
2844 | ||
2845 | /* Load the bitmap from disk unless raid0 */ | |
2846 | return _bitmap_load(rs); | |
2847 | } | |
2848 | ||
f381e71b JB |
2849 | static void raid_resume(struct dm_target *ti) |
2850 | { | |
2851 | struct raid_set *rs = ti->private; | |
ecbfb9f1 | 2852 | struct mddev *mddev = &rs->md; |
f381e71b | 2853 | |
4286325b | 2854 | if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { |
ecbfb9f1 HM |
2855 | /* |
2856 | * A secondary resume while the device is active. | |
2857 | * Take this opportunity to check whether any failed | |
2858 | * devices are reachable again. | |
2859 | */ | |
2860 | attempt_restore_of_faulty_devices(rs); | |
47525e59 | 2861 | } |
34f8ac6d | 2862 | |
ecbfb9f1 | 2863 | mddev->ro = 0; |
3a1c1ef2 HM |
2864 | mddev->in_sync = 0; |
2865 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2866 | ||
ecbfb9f1 HM |
2867 | if (mddev->suspended) |
2868 | mddev_resume(mddev); | |
9d09e663 N |
2869 | } |
2870 | ||
2871 | static struct target_type raid_target = { | |
2872 | .name = "raid", | |
9b6e5423 | 2873 | .version = {1, 9, 0}, |
9d09e663 N |
2874 | .module = THIS_MODULE, |
2875 | .ctr = raid_ctr, | |
2876 | .dtr = raid_dtr, | |
2877 | .map = raid_map, | |
2878 | .status = raid_status, | |
be83651f | 2879 | .message = raid_message, |
9d09e663 N |
2880 | .iterate_devices = raid_iterate_devices, |
2881 | .io_hints = raid_io_hints, | |
2882 | .presuspend = raid_presuspend, | |
2883 | .postsuspend = raid_postsuspend, | |
ecbfb9f1 | 2884 | .preresume = raid_preresume, |
9d09e663 N |
2885 | .resume = raid_resume, |
2886 | }; | |
2887 | ||
2888 | static int __init dm_raid_init(void) | |
2889 | { | |
fe5d2f4a JB |
2890 | DMINFO("Loading target version %u.%u.%u", |
2891 | raid_target.version[0], | |
2892 | raid_target.version[1], | |
2893 | raid_target.version[2]); | |
9d09e663 N |
2894 | return dm_register_target(&raid_target); |
2895 | } | |
2896 | ||
2897 | static void __exit dm_raid_exit(void) | |
2898 | { | |
2899 | dm_unregister_target(&raid_target); | |
2900 | } | |
2901 | ||
2902 | module_init(dm_raid_init); | |
2903 | module_exit(dm_raid_exit); | |
2904 | ||
48cf06bc HM |
2905 | module_param(devices_handle_discard_safely, bool, 0644); |
2906 | MODULE_PARM_DESC(devices_handle_discard_safely, | |
2907 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); | |
2908 | ||
ef9b85a6 MS |
2909 | MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target"); |
2910 | MODULE_ALIAS("dm-raid0"); | |
63f33b8d JB |
2911 | MODULE_ALIAS("dm-raid1"); |
2912 | MODULE_ALIAS("dm-raid10"); | |
9d09e663 N |
2913 | MODULE_ALIAS("dm-raid4"); |
2914 | MODULE_ALIAS("dm-raid5"); | |
2915 | MODULE_ALIAS("dm-raid6"); | |
2916 | MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); | |
3a1c1ef2 | 2917 | MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); |
9d09e663 | 2918 | MODULE_LICENSE("GPL"); |