]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm.h" | |
8 | #include "dm-bio-list.h" | |
06386bbf | 9 | #include "dm-bio-record.h" |
1da177e4 LT |
10 | #include "dm-io.h" |
11 | #include "dm-log.h" | |
12 | #include "kcopyd.h" | |
13 | ||
14 | #include <linux/ctype.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/pagemap.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/time.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/workqueue.h> | |
6f3c3f0a | 23 | #include <linux/log2.h> |
72f4b314 | 24 | #include <linux/hardirq.h> |
1da177e4 | 25 | |
72d94861 | 26 | #define DM_MSG_PREFIX "raid1" |
88be163a | 27 | #define DM_IO_PAGES 64 |
72d94861 | 28 | |
a8e6afa2 | 29 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
f44db678 | 30 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
a8e6afa2 | 31 | |
33184048 | 32 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); |
1da177e4 | 33 | |
1da177e4 LT |
34 | /*----------------------------------------------------------------- |
35 | * Region hash | |
36 | * | |
37 | * The mirror splits itself up into discrete regions. Each | |
38 | * region can be in one of three states: clean, dirty, | |
39 | * nosync. There is no need to put clean regions in the hash. | |
40 | * | |
41 | * In addition to being present in the hash table a region _may_ | |
42 | * be present on one of three lists. | |
43 | * | |
44 | * clean_regions: Regions on this list have no io pending to | |
45 | * them, they are in sync, we are no longer interested in them, | |
46 | * they are dull. rh_update_states() will remove them from the | |
47 | * hash table. | |
48 | * | |
49 | * quiesced_regions: These regions have been spun down, ready | |
50 | * for recovery. rh_recovery_start() will remove regions from | |
51 | * this list and hand them to kmirrord, which will schedule the | |
52 | * recovery io with kcopyd. | |
53 | * | |
54 | * recovered_regions: Regions that kcopyd has successfully | |
55 | * recovered. rh_update_states() will now schedule any delayed | |
56 | * io, up the recovery_count, and remove the region from the | |
57 | * hash. | |
58 | * | |
59 | * There are 2 locks: | |
60 | * A rw spin lock 'hash_lock' protects just the hash table, | |
61 | * this is never held in write mode from interrupt context, | |
62 | * which I believe means that we only have to disable irqs when | |
63 | * doing a write lock. | |
64 | * | |
65 | * An ordinary spin lock 'region_lock' that protects the three | |
66 | * lists in the region_hash, with the 'state', 'list' and | |
67 | * 'bhs_delayed' fields of the regions. This is used from irq | |
68 | * context, so all other uses will have to suspend local irqs. | |
69 | *---------------------------------------------------------------*/ | |
70 | struct mirror_set; | |
71 | struct region_hash { | |
72 | struct mirror_set *ms; | |
73 | uint32_t region_size; | |
74 | unsigned region_shift; | |
75 | ||
76 | /* holds persistent region state */ | |
77 | struct dirty_log *log; | |
78 | ||
79 | /* hash table */ | |
80 | rwlock_t hash_lock; | |
81 | mempool_t *region_pool; | |
82 | unsigned int mask; | |
83 | unsigned int nr_buckets; | |
84 | struct list_head *buckets; | |
85 | ||
86 | spinlock_t region_lock; | |
33184048 | 87 | atomic_t recovery_in_flight; |
1da177e4 LT |
88 | struct semaphore recovery_count; |
89 | struct list_head clean_regions; | |
90 | struct list_head quiesced_regions; | |
91 | struct list_head recovered_regions; | |
f44db678 | 92 | struct list_head failed_recovered_regions; |
1da177e4 LT |
93 | }; |
94 | ||
95 | enum { | |
96 | RH_CLEAN, | |
97 | RH_DIRTY, | |
98 | RH_NOSYNC, | |
99 | RH_RECOVERING | |
100 | }; | |
101 | ||
102 | struct region { | |
103 | struct region_hash *rh; /* FIXME: can we get rid of this ? */ | |
104 | region_t key; | |
105 | int state; | |
106 | ||
107 | struct list_head hash_list; | |
108 | struct list_head list; | |
109 | ||
110 | atomic_t pending; | |
111 | struct bio_list delayed_bios; | |
112 | }; | |
113 | ||
e4c8b3ba NB |
114 | |
115 | /*----------------------------------------------------------------- | |
116 | * Mirror set structures. | |
117 | *---------------------------------------------------------------*/ | |
72f4b314 JB |
118 | enum dm_raid1_error { |
119 | DM_RAID1_WRITE_ERROR, | |
120 | DM_RAID1_SYNC_ERROR, | |
121 | DM_RAID1_READ_ERROR | |
122 | }; | |
123 | ||
e4c8b3ba | 124 | struct mirror { |
aa5617c5 | 125 | struct mirror_set *ms; |
e4c8b3ba | 126 | atomic_t error_count; |
72f4b314 | 127 | uint32_t error_type; |
e4c8b3ba NB |
128 | struct dm_dev *dev; |
129 | sector_t offset; | |
130 | }; | |
131 | ||
132 | struct mirror_set { | |
133 | struct dm_target *ti; | |
134 | struct list_head list; | |
135 | struct region_hash rh; | |
136 | struct kcopyd_client *kcopyd_client; | |
a8e6afa2 | 137 | uint64_t features; |
e4c8b3ba | 138 | |
72f4b314 | 139 | spinlock_t lock; /* protects the lists */ |
e4c8b3ba NB |
140 | struct bio_list reads; |
141 | struct bio_list writes; | |
72f4b314 | 142 | struct bio_list failures; |
e4c8b3ba | 143 | |
88be163a | 144 | struct dm_io_client *io_client; |
06386bbf | 145 | mempool_t *read_record_pool; |
88be163a | 146 | |
e4c8b3ba NB |
147 | /* recovery */ |
148 | region_t nr_regions; | |
149 | int in_sync; | |
fc1ff958 | 150 | int log_failure; |
b80aa7a0 | 151 | atomic_t suspend; |
e4c8b3ba | 152 | |
72f4b314 | 153 | atomic_t default_mirror; /* Default mirror */ |
e4c8b3ba | 154 | |
6ad36fe2 HS |
155 | struct workqueue_struct *kmirrord_wq; |
156 | struct work_struct kmirrord_work; | |
72f4b314 | 157 | struct work_struct trigger_event; |
6ad36fe2 | 158 | |
e4c8b3ba NB |
159 | unsigned int nr_mirrors; |
160 | struct mirror mirror[0]; | |
161 | }; | |
162 | ||
1da177e4 LT |
163 | /* |
164 | * Conversion fns | |
165 | */ | |
166 | static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) | |
167 | { | |
e4c8b3ba | 168 | return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift; |
1da177e4 LT |
169 | } |
170 | ||
171 | static inline sector_t region_to_sector(struct region_hash *rh, region_t region) | |
172 | { | |
173 | return region << rh->region_shift; | |
174 | } | |
175 | ||
6ad36fe2 HS |
176 | static void wake(struct mirror_set *ms) |
177 | { | |
178 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); | |
179 | } | |
180 | ||
1da177e4 LT |
181 | /* FIXME move this */ |
182 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); | |
183 | ||
1da177e4 LT |
184 | #define MIN_REGIONS 64 |
185 | #define MAX_RECOVERY 1 | |
186 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, | |
187 | struct dirty_log *log, uint32_t region_size, | |
188 | region_t nr_regions) | |
189 | { | |
190 | unsigned int nr_buckets, max_buckets; | |
191 | size_t i; | |
192 | ||
193 | /* | |
194 | * Calculate a suitable number of buckets for our hash | |
195 | * table. | |
196 | */ | |
197 | max_buckets = nr_regions >> 6; | |
198 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | |
199 | ; | |
200 | nr_buckets >>= 1; | |
201 | ||
202 | rh->ms = ms; | |
203 | rh->log = log; | |
204 | rh->region_size = region_size; | |
205 | rh->region_shift = ffs(region_size) - 1; | |
206 | rwlock_init(&rh->hash_lock); | |
207 | rh->mask = nr_buckets - 1; | |
208 | rh->nr_buckets = nr_buckets; | |
209 | ||
210 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | |
211 | if (!rh->buckets) { | |
212 | DMERR("unable to allocate region hash memory"); | |
213 | return -ENOMEM; | |
214 | } | |
215 | ||
216 | for (i = 0; i < nr_buckets; i++) | |
217 | INIT_LIST_HEAD(rh->buckets + i); | |
218 | ||
219 | spin_lock_init(&rh->region_lock); | |
220 | sema_init(&rh->recovery_count, 0); | |
33184048 | 221 | atomic_set(&rh->recovery_in_flight, 0); |
1da177e4 LT |
222 | INIT_LIST_HEAD(&rh->clean_regions); |
223 | INIT_LIST_HEAD(&rh->quiesced_regions); | |
224 | INIT_LIST_HEAD(&rh->recovered_regions); | |
f44db678 | 225 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
1da177e4 | 226 | |
0eaae62a MD |
227 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
228 | sizeof(struct region)); | |
1da177e4 LT |
229 | if (!rh->region_pool) { |
230 | vfree(rh->buckets); | |
231 | rh->buckets = NULL; | |
232 | return -ENOMEM; | |
233 | } | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | static void rh_exit(struct region_hash *rh) | |
239 | { | |
240 | unsigned int h; | |
241 | struct region *reg, *nreg; | |
242 | ||
243 | BUG_ON(!list_empty(&rh->quiesced_regions)); | |
244 | for (h = 0; h < rh->nr_buckets; h++) { | |
245 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { | |
246 | BUG_ON(atomic_read(®->pending)); | |
247 | mempool_free(reg, rh->region_pool); | |
248 | } | |
249 | } | |
250 | ||
251 | if (rh->log) | |
252 | dm_destroy_dirty_log(rh->log); | |
253 | if (rh->region_pool) | |
254 | mempool_destroy(rh->region_pool); | |
255 | vfree(rh->buckets); | |
256 | } | |
257 | ||
258 | #define RH_HASH_MULT 2654435387U | |
259 | ||
260 | static inline unsigned int rh_hash(struct region_hash *rh, region_t region) | |
261 | { | |
262 | return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; | |
263 | } | |
264 | ||
265 | static struct region *__rh_lookup(struct region_hash *rh, region_t region) | |
266 | { | |
267 | struct region *reg; | |
268 | ||
269 | list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) | |
270 | if (reg->key == region) | |
271 | return reg; | |
272 | ||
273 | return NULL; | |
274 | } | |
275 | ||
276 | static void __rh_insert(struct region_hash *rh, struct region *reg) | |
277 | { | |
278 | unsigned int h = rh_hash(rh, reg->key); | |
279 | list_add(®->hash_list, rh->buckets + h); | |
280 | } | |
281 | ||
282 | static struct region *__rh_alloc(struct region_hash *rh, region_t region) | |
283 | { | |
284 | struct region *reg, *nreg; | |
285 | ||
286 | read_unlock(&rh->hash_lock); | |
c06aad85 DK |
287 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); |
288 | if (unlikely(!nreg)) | |
289 | nreg = kmalloc(sizeof(struct region), GFP_NOIO); | |
1da177e4 LT |
290 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? |
291 | RH_CLEAN : RH_NOSYNC; | |
292 | nreg->rh = rh; | |
293 | nreg->key = region; | |
294 | ||
295 | INIT_LIST_HEAD(&nreg->list); | |
296 | ||
297 | atomic_set(&nreg->pending, 0); | |
298 | bio_list_init(&nreg->delayed_bios); | |
299 | write_lock_irq(&rh->hash_lock); | |
300 | ||
301 | reg = __rh_lookup(rh, region); | |
302 | if (reg) | |
303 | /* we lost the race */ | |
304 | mempool_free(nreg, rh->region_pool); | |
305 | ||
306 | else { | |
307 | __rh_insert(rh, nreg); | |
308 | if (nreg->state == RH_CLEAN) { | |
309 | spin_lock(&rh->region_lock); | |
310 | list_add(&nreg->list, &rh->clean_regions); | |
311 | spin_unlock(&rh->region_lock); | |
312 | } | |
313 | reg = nreg; | |
314 | } | |
315 | write_unlock_irq(&rh->hash_lock); | |
316 | read_lock(&rh->hash_lock); | |
317 | ||
318 | return reg; | |
319 | } | |
320 | ||
321 | static inline struct region *__rh_find(struct region_hash *rh, region_t region) | |
322 | { | |
323 | struct region *reg; | |
324 | ||
325 | reg = __rh_lookup(rh, region); | |
326 | if (!reg) | |
327 | reg = __rh_alloc(rh, region); | |
328 | ||
329 | return reg; | |
330 | } | |
331 | ||
332 | static int rh_state(struct region_hash *rh, region_t region, int may_block) | |
333 | { | |
334 | int r; | |
335 | struct region *reg; | |
336 | ||
337 | read_lock(&rh->hash_lock); | |
338 | reg = __rh_lookup(rh, region); | |
339 | read_unlock(&rh->hash_lock); | |
340 | ||
341 | if (reg) | |
342 | return reg->state; | |
343 | ||
344 | /* | |
345 | * The region wasn't in the hash, so we fall back to the | |
346 | * dirty log. | |
347 | */ | |
348 | r = rh->log->type->in_sync(rh->log, region, may_block); | |
349 | ||
350 | /* | |
351 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | |
352 | * taken as a RH_NOSYNC | |
353 | */ | |
354 | return r == 1 ? RH_CLEAN : RH_NOSYNC; | |
355 | } | |
356 | ||
357 | static inline int rh_in_sync(struct region_hash *rh, | |
358 | region_t region, int may_block) | |
359 | { | |
360 | int state = rh_state(rh, region, may_block); | |
361 | return state == RH_CLEAN || state == RH_DIRTY; | |
362 | } | |
363 | ||
364 | static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) | |
365 | { | |
366 | struct bio *bio; | |
367 | ||
368 | while ((bio = bio_list_pop(bio_list))) { | |
369 | queue_bio(ms, bio, WRITE); | |
370 | } | |
371 | } | |
372 | ||
f3ee6b2f JB |
373 | static void complete_resync_work(struct region *reg, int success) |
374 | { | |
375 | struct region_hash *rh = reg->rh; | |
376 | ||
377 | rh->log->type->set_region_sync(rh->log, reg->key, success); | |
b80aa7a0 JB |
378 | |
379 | /* | |
380 | * Dispatch the bios before we call 'wake_up_all'. | |
381 | * This is important because if we are suspending, | |
382 | * we want to know that recovery is complete and | |
383 | * the work queue is flushed. If we wake_up_all | |
384 | * before we dispatch_bios (queue bios and call wake()), | |
385 | * then we risk suspending before the work queue | |
386 | * has been properly flushed. | |
387 | */ | |
f3ee6b2f JB |
388 | dispatch_bios(rh->ms, ®->delayed_bios); |
389 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
390 | wake_up_all(&_kmirrord_recovery_stopped); | |
391 | up(&rh->recovery_count); | |
392 | } | |
393 | ||
1da177e4 LT |
394 | static void rh_update_states(struct region_hash *rh) |
395 | { | |
396 | struct region *reg, *next; | |
397 | ||
398 | LIST_HEAD(clean); | |
399 | LIST_HEAD(recovered); | |
f44db678 | 400 | LIST_HEAD(failed_recovered); |
1da177e4 LT |
401 | |
402 | /* | |
403 | * Quickly grab the lists. | |
404 | */ | |
405 | write_lock_irq(&rh->hash_lock); | |
406 | spin_lock(&rh->region_lock); | |
407 | if (!list_empty(&rh->clean_regions)) { | |
408 | list_splice(&rh->clean_regions, &clean); | |
409 | INIT_LIST_HEAD(&rh->clean_regions); | |
410 | ||
943317ef | 411 | list_for_each_entry(reg, &clean, list) |
1da177e4 | 412 | list_del(®->hash_list); |
1da177e4 LT |
413 | } |
414 | ||
415 | if (!list_empty(&rh->recovered_regions)) { | |
416 | list_splice(&rh->recovered_regions, &recovered); | |
417 | INIT_LIST_HEAD(&rh->recovered_regions); | |
418 | ||
419 | list_for_each_entry (reg, &recovered, list) | |
420 | list_del(®->hash_list); | |
421 | } | |
f44db678 JB |
422 | |
423 | if (!list_empty(&rh->failed_recovered_regions)) { | |
424 | list_splice(&rh->failed_recovered_regions, &failed_recovered); | |
425 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | |
426 | ||
427 | list_for_each_entry(reg, &failed_recovered, list) | |
428 | list_del(®->hash_list); | |
429 | } | |
430 | ||
1da177e4 LT |
431 | spin_unlock(&rh->region_lock); |
432 | write_unlock_irq(&rh->hash_lock); | |
433 | ||
434 | /* | |
435 | * All the regions on the recovered and clean lists have | |
436 | * now been pulled out of the system, so no need to do | |
437 | * any more locking. | |
438 | */ | |
439 | list_for_each_entry_safe (reg, next, &recovered, list) { | |
440 | rh->log->type->clear_region(rh->log, reg->key); | |
f3ee6b2f | 441 | complete_resync_work(reg, 1); |
1da177e4 LT |
442 | mempool_free(reg, rh->region_pool); |
443 | } | |
444 | ||
f44db678 JB |
445 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { |
446 | complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); | |
447 | mempool_free(reg, rh->region_pool); | |
448 | } | |
449 | ||
943317ef JB |
450 | list_for_each_entry_safe(reg, next, &clean, list) { |
451 | rh->log->type->clear_region(rh->log, reg->key); | |
1da177e4 | 452 | mempool_free(reg, rh->region_pool); |
943317ef JB |
453 | } |
454 | ||
455 | rh->log->type->flush(rh->log); | |
1da177e4 LT |
456 | } |
457 | ||
458 | static void rh_inc(struct region_hash *rh, region_t region) | |
459 | { | |
460 | struct region *reg; | |
461 | ||
462 | read_lock(&rh->hash_lock); | |
463 | reg = __rh_find(rh, region); | |
844e8d90 | 464 | |
7692c5dd | 465 | spin_lock_irq(&rh->region_lock); |
844e8d90 JN |
466 | atomic_inc(®->pending); |
467 | ||
1da177e4 | 468 | if (reg->state == RH_CLEAN) { |
1da177e4 LT |
469 | reg->state = RH_DIRTY; |
470 | list_del_init(®->list); /* take off the clean list */ | |
7692c5dd JB |
471 | spin_unlock_irq(&rh->region_lock); |
472 | ||
473 | rh->log->type->mark_region(rh->log, reg->key); | |
474 | } else | |
475 | spin_unlock_irq(&rh->region_lock); | |
476 | ||
1da177e4 | 477 | |
1da177e4 LT |
478 | read_unlock(&rh->hash_lock); |
479 | } | |
480 | ||
481 | static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) | |
482 | { | |
483 | struct bio *bio; | |
484 | ||
485 | for (bio = bios->head; bio; bio = bio->bi_next) | |
486 | rh_inc(rh, bio_to_region(rh, bio)); | |
487 | } | |
488 | ||
489 | static void rh_dec(struct region_hash *rh, region_t region) | |
490 | { | |
491 | unsigned long flags; | |
492 | struct region *reg; | |
493 | int should_wake = 0; | |
494 | ||
495 | read_lock(&rh->hash_lock); | |
496 | reg = __rh_lookup(rh, region); | |
497 | read_unlock(&rh->hash_lock); | |
498 | ||
7692c5dd | 499 | spin_lock_irqsave(&rh->region_lock, flags); |
1da177e4 | 500 | if (atomic_dec_and_test(®->pending)) { |
930d332a JN |
501 | /* |
502 | * There is no pending I/O for this region. | |
503 | * We can move the region to corresponding list for next action. | |
504 | * At this point, the region is not yet connected to any list. | |
505 | * | |
506 | * If the state is RH_NOSYNC, the region should be kept off | |
507 | * from clean list. | |
508 | * The hash entry for RH_NOSYNC will remain in memory | |
509 | * until the region is recovered or the map is reloaded. | |
510 | */ | |
511 | ||
512 | /* do nothing for RH_NOSYNC */ | |
1da177e4 LT |
513 | if (reg->state == RH_RECOVERING) { |
514 | list_add_tail(®->list, &rh->quiesced_regions); | |
930d332a | 515 | } else if (reg->state == RH_DIRTY) { |
1da177e4 LT |
516 | reg->state = RH_CLEAN; |
517 | list_add(®->list, &rh->clean_regions); | |
518 | } | |
1da177e4 LT |
519 | should_wake = 1; |
520 | } | |
7692c5dd | 521 | spin_unlock_irqrestore(&rh->region_lock, flags); |
1da177e4 LT |
522 | |
523 | if (should_wake) | |
6ad36fe2 | 524 | wake(rh->ms); |
1da177e4 LT |
525 | } |
526 | ||
527 | /* | |
528 | * Starts quiescing a region in preparation for recovery. | |
529 | */ | |
530 | static int __rh_recovery_prepare(struct region_hash *rh) | |
531 | { | |
532 | int r; | |
533 | struct region *reg; | |
534 | region_t region; | |
535 | ||
536 | /* | |
537 | * Ask the dirty log what's next. | |
538 | */ | |
539 | r = rh->log->type->get_resync_work(rh->log, ®ion); | |
540 | if (r <= 0) | |
541 | return r; | |
542 | ||
543 | /* | |
544 | * Get this region, and start it quiescing by setting the | |
545 | * recovering flag. | |
546 | */ | |
547 | read_lock(&rh->hash_lock); | |
548 | reg = __rh_find(rh, region); | |
549 | read_unlock(&rh->hash_lock); | |
550 | ||
551 | spin_lock_irq(&rh->region_lock); | |
552 | reg->state = RH_RECOVERING; | |
553 | ||
554 | /* Already quiesced ? */ | |
555 | if (atomic_read(®->pending)) | |
556 | list_del_init(®->list); | |
179e0917 AM |
557 | else |
558 | list_move(®->list, &rh->quiesced_regions); | |
1da177e4 | 559 | |
1da177e4 LT |
560 | spin_unlock_irq(&rh->region_lock); |
561 | ||
562 | return 1; | |
563 | } | |
564 | ||
565 | static void rh_recovery_prepare(struct region_hash *rh) | |
566 | { | |
33184048 JB |
567 | /* Extra reference to avoid race with rh_stop_recovery */ |
568 | atomic_inc(&rh->recovery_in_flight); | |
569 | ||
570 | while (!down_trylock(&rh->recovery_count)) { | |
571 | atomic_inc(&rh->recovery_in_flight); | |
1da177e4 | 572 | if (__rh_recovery_prepare(rh) <= 0) { |
33184048 | 573 | atomic_dec(&rh->recovery_in_flight); |
1da177e4 LT |
574 | up(&rh->recovery_count); |
575 | break; | |
576 | } | |
33184048 JB |
577 | } |
578 | ||
579 | /* Drop the extra reference */ | |
580 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
581 | wake_up_all(&_kmirrord_recovery_stopped); | |
1da177e4 LT |
582 | } |
583 | ||
584 | /* | |
585 | * Returns any quiesced regions. | |
586 | */ | |
587 | static struct region *rh_recovery_start(struct region_hash *rh) | |
588 | { | |
589 | struct region *reg = NULL; | |
590 | ||
591 | spin_lock_irq(&rh->region_lock); | |
592 | if (!list_empty(&rh->quiesced_regions)) { | |
593 | reg = list_entry(rh->quiesced_regions.next, | |
594 | struct region, list); | |
595 | list_del_init(®->list); /* remove from the quiesced list */ | |
596 | } | |
597 | spin_unlock_irq(&rh->region_lock); | |
598 | ||
599 | return reg; | |
600 | } | |
601 | ||
1da177e4 LT |
602 | static void rh_recovery_end(struct region *reg, int success) |
603 | { | |
604 | struct region_hash *rh = reg->rh; | |
605 | ||
606 | spin_lock_irq(&rh->region_lock); | |
f44db678 JB |
607 | if (success) |
608 | list_add(®->list, ®->rh->recovered_regions); | |
609 | else { | |
610 | reg->state = RH_NOSYNC; | |
611 | list_add(®->list, ®->rh->failed_recovered_regions); | |
612 | } | |
1da177e4 LT |
613 | spin_unlock_irq(&rh->region_lock); |
614 | ||
6ad36fe2 | 615 | wake(rh->ms); |
1da177e4 LT |
616 | } |
617 | ||
fc1ff958 | 618 | static int rh_flush(struct region_hash *rh) |
1da177e4 | 619 | { |
fc1ff958 | 620 | return rh->log->type->flush(rh->log); |
1da177e4 LT |
621 | } |
622 | ||
623 | static void rh_delay(struct region_hash *rh, struct bio *bio) | |
624 | { | |
625 | struct region *reg; | |
626 | ||
627 | read_lock(&rh->hash_lock); | |
628 | reg = __rh_find(rh, bio_to_region(rh, bio)); | |
629 | bio_list_add(®->delayed_bios, bio); | |
630 | read_unlock(&rh->hash_lock); | |
631 | } | |
632 | ||
633 | static void rh_stop_recovery(struct region_hash *rh) | |
634 | { | |
635 | int i; | |
636 | ||
637 | /* wait for any recovering regions */ | |
638 | for (i = 0; i < MAX_RECOVERY; i++) | |
639 | down(&rh->recovery_count); | |
640 | } | |
641 | ||
642 | static void rh_start_recovery(struct region_hash *rh) | |
643 | { | |
644 | int i; | |
645 | ||
646 | for (i = 0; i < MAX_RECOVERY; i++) | |
647 | up(&rh->recovery_count); | |
648 | ||
6ad36fe2 | 649 | wake(rh->ms); |
1da177e4 LT |
650 | } |
651 | ||
06386bbf JB |
652 | #define MIN_READ_RECORDS 20 |
653 | struct dm_raid1_read_record { | |
654 | struct mirror *m; | |
655 | struct dm_bio_details details; | |
656 | }; | |
657 | ||
1da177e4 LT |
658 | /* |
659 | * Every mirror should look like this one. | |
660 | */ | |
661 | #define DEFAULT_MIRROR 0 | |
662 | ||
663 | /* | |
06386bbf JB |
664 | * This is yucky. We squirrel the mirror struct away inside |
665 | * bi_next for read/write buffers. This is safe since the bh | |
1da177e4 LT |
666 | * doesn't get submitted to the lower levels of block layer. |
667 | */ | |
06386bbf | 668 | static struct mirror *bio_get_m(struct bio *bio) |
1da177e4 | 669 | { |
06386bbf | 670 | return (struct mirror *) bio->bi_next; |
1da177e4 LT |
671 | } |
672 | ||
06386bbf | 673 | static void bio_set_m(struct bio *bio, struct mirror *m) |
1da177e4 | 674 | { |
06386bbf | 675 | bio->bi_next = (struct bio *) m; |
1da177e4 LT |
676 | } |
677 | ||
72f4b314 JB |
678 | static struct mirror *get_default_mirror(struct mirror_set *ms) |
679 | { | |
680 | return &ms->mirror[atomic_read(&ms->default_mirror)]; | |
681 | } | |
682 | ||
683 | static void set_default_mirror(struct mirror *m) | |
684 | { | |
685 | struct mirror_set *ms = m->ms; | |
686 | struct mirror *m0 = &(ms->mirror[0]); | |
687 | ||
688 | atomic_set(&ms->default_mirror, m - m0); | |
689 | } | |
690 | ||
691 | /* fail_mirror | |
692 | * @m: mirror device to fail | |
693 | * @error_type: one of the enum's, DM_RAID1_*_ERROR | |
694 | * | |
695 | * If errors are being handled, record the type of | |
696 | * error encountered for this device. If this type | |
697 | * of error has already been recorded, we can return; | |
698 | * otherwise, we must signal userspace by triggering | |
699 | * an event. Additionally, if the device is the | |
700 | * primary device, we must choose a new primary, but | |
701 | * only if the mirror is in-sync. | |
702 | * | |
703 | * This function must not block. | |
704 | */ | |
705 | static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) | |
706 | { | |
707 | struct mirror_set *ms = m->ms; | |
708 | struct mirror *new; | |
709 | ||
710 | if (!errors_handled(ms)) | |
711 | return; | |
712 | ||
713 | /* | |
714 | * error_count is used for nothing more than a | |
715 | * simple way to tell if a device has encountered | |
716 | * errors. | |
717 | */ | |
718 | atomic_inc(&m->error_count); | |
719 | ||
720 | if (test_and_set_bit(error_type, &m->error_type)) | |
721 | return; | |
722 | ||
723 | if (m != get_default_mirror(ms)) | |
724 | goto out; | |
725 | ||
726 | if (!ms->in_sync) { | |
727 | /* | |
728 | * Better to issue requests to same failing device | |
729 | * than to risk returning corrupt data. | |
730 | */ | |
731 | DMERR("Primary mirror (%s) failed while out-of-sync: " | |
732 | "Reads may fail.", m->dev->name); | |
733 | goto out; | |
734 | } | |
735 | ||
736 | for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) | |
737 | if (!atomic_read(&new->error_count)) { | |
738 | set_default_mirror(new); | |
739 | break; | |
740 | } | |
741 | ||
742 | if (unlikely(new == ms->mirror + ms->nr_mirrors)) | |
743 | DMWARN("All sides of mirror have failed."); | |
744 | ||
745 | out: | |
746 | schedule_work(&ms->trigger_event); | |
747 | } | |
748 | ||
1da177e4 LT |
749 | /*----------------------------------------------------------------- |
750 | * Recovery. | |
751 | * | |
752 | * When a mirror is first activated we may find that some regions | |
753 | * are in the no-sync state. We have to recover these by | |
754 | * recopying from the default mirror to all the others. | |
755 | *---------------------------------------------------------------*/ | |
756 | static void recovery_complete(int read_err, unsigned int write_err, | |
757 | void *context) | |
758 | { | |
8f0205b7 JB |
759 | struct region *reg = (struct region *)context; |
760 | struct mirror_set *ms = reg->rh->ms; | |
761 | int m, bit = 0; | |
1da177e4 | 762 | |
8f0205b7 | 763 | if (read_err) { |
f44db678 JB |
764 | /* Read error means the failure of default mirror. */ |
765 | DMERR_LIMIT("Unable to read primary mirror during recovery"); | |
8f0205b7 JB |
766 | fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); |
767 | } | |
f44db678 | 768 | |
8f0205b7 | 769 | if (write_err) { |
f44db678 JB |
770 | DMERR_LIMIT("Write error during recovery (error = 0x%x)", |
771 | write_err); | |
8f0205b7 JB |
772 | /* |
773 | * Bits correspond to devices (excluding default mirror). | |
774 | * The default mirror cannot change during recovery. | |
775 | */ | |
776 | for (m = 0; m < ms->nr_mirrors; m++) { | |
777 | if (&ms->mirror[m] == get_default_mirror(ms)) | |
778 | continue; | |
779 | if (test_bit(bit, &write_err)) | |
780 | fail_mirror(ms->mirror + m, | |
781 | DM_RAID1_SYNC_ERROR); | |
782 | bit++; | |
783 | } | |
784 | } | |
f44db678 | 785 | |
ce503f59 | 786 | rh_recovery_end(reg, !(read_err || write_err)); |
1da177e4 LT |
787 | } |
788 | ||
789 | static int recover(struct mirror_set *ms, struct region *reg) | |
790 | { | |
791 | int r; | |
792 | unsigned int i; | |
793 | struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; | |
794 | struct mirror *m; | |
795 | unsigned long flags = 0; | |
796 | ||
797 | /* fill in the source */ | |
72f4b314 | 798 | m = get_default_mirror(ms); |
1da177e4 LT |
799 | from.bdev = m->dev->bdev; |
800 | from.sector = m->offset + region_to_sector(reg->rh, reg->key); | |
801 | if (reg->key == (ms->nr_regions - 1)) { | |
802 | /* | |
803 | * The final region may be smaller than | |
804 | * region_size. | |
805 | */ | |
806 | from.count = ms->ti->len & (reg->rh->region_size - 1); | |
807 | if (!from.count) | |
808 | from.count = reg->rh->region_size; | |
809 | } else | |
810 | from.count = reg->rh->region_size; | |
811 | ||
812 | /* fill in the destinations */ | |
813 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | |
72f4b314 | 814 | if (&ms->mirror[i] == get_default_mirror(ms)) |
1da177e4 LT |
815 | continue; |
816 | ||
817 | m = ms->mirror + i; | |
818 | dest->bdev = m->dev->bdev; | |
819 | dest->sector = m->offset + region_to_sector(reg->rh, reg->key); | |
820 | dest->count = from.count; | |
821 | dest++; | |
822 | } | |
823 | ||
824 | /* hand to kcopyd */ | |
825 | set_bit(KCOPYD_IGNORE_ERROR, &flags); | |
826 | r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, | |
827 | recovery_complete, reg); | |
828 | ||
829 | return r; | |
830 | } | |
831 | ||
832 | static void do_recovery(struct mirror_set *ms) | |
833 | { | |
834 | int r; | |
835 | struct region *reg; | |
836 | struct dirty_log *log = ms->rh.log; | |
837 | ||
838 | /* | |
839 | * Start quiescing some regions. | |
840 | */ | |
841 | rh_recovery_prepare(&ms->rh); | |
842 | ||
843 | /* | |
844 | * Copy any already quiesced regions. | |
845 | */ | |
846 | while ((reg = rh_recovery_start(&ms->rh))) { | |
847 | r = recover(ms, reg); | |
848 | if (r) | |
849 | rh_recovery_end(reg, 0); | |
850 | } | |
851 | ||
852 | /* | |
853 | * Update the in sync flag. | |
854 | */ | |
855 | if (!ms->in_sync && | |
856 | (log->type->get_sync_count(log) == ms->nr_regions)) { | |
857 | /* the sync is complete */ | |
858 | dm_table_event(ms->ti->table); | |
859 | ms->in_sync = 1; | |
860 | } | |
861 | } | |
862 | ||
863 | /*----------------------------------------------------------------- | |
864 | * Reads | |
865 | *---------------------------------------------------------------*/ | |
866 | static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) | |
867 | { | |
06386bbf JB |
868 | struct mirror *m = get_default_mirror(ms); |
869 | ||
870 | do { | |
871 | if (likely(!atomic_read(&m->error_count))) | |
872 | return m; | |
873 | ||
874 | if (m-- == ms->mirror) | |
875 | m += ms->nr_mirrors; | |
876 | } while (m != get_default_mirror(ms)); | |
877 | ||
878 | return NULL; | |
879 | } | |
880 | ||
881 | static int default_ok(struct mirror *m) | |
882 | { | |
883 | struct mirror *default_mirror = get_default_mirror(m->ms); | |
884 | ||
885 | return !atomic_read(&default_mirror->error_count); | |
886 | } | |
887 | ||
888 | static int mirror_available(struct mirror_set *ms, struct bio *bio) | |
889 | { | |
890 | region_t region = bio_to_region(&ms->rh, bio); | |
891 | ||
892 | if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) | |
893 | return choose_mirror(ms, bio->bi_sector) ? 1 : 0; | |
894 | ||
895 | return 0; | |
1da177e4 LT |
896 | } |
897 | ||
898 | /* | |
899 | * remap a buffer to a particular mirror. | |
900 | */ | |
06386bbf JB |
901 | static sector_t map_sector(struct mirror *m, struct bio *bio) |
902 | { | |
903 | return m->offset + (bio->bi_sector - m->ms->ti->begin); | |
904 | } | |
905 | ||
906 | static void map_bio(struct mirror *m, struct bio *bio) | |
1da177e4 LT |
907 | { |
908 | bio->bi_bdev = m->dev->bdev; | |
06386bbf JB |
909 | bio->bi_sector = map_sector(m, bio); |
910 | } | |
911 | ||
912 | static void map_region(struct io_region *io, struct mirror *m, | |
913 | struct bio *bio) | |
914 | { | |
915 | io->bdev = m->dev->bdev; | |
916 | io->sector = map_sector(m, bio); | |
917 | io->count = bio->bi_size >> 9; | |
918 | } | |
919 | ||
920 | /*----------------------------------------------------------------- | |
921 | * Reads | |
922 | *---------------------------------------------------------------*/ | |
923 | static void read_callback(unsigned long error, void *context) | |
924 | { | |
925 | struct bio *bio = context; | |
926 | struct mirror *m; | |
927 | ||
928 | m = bio_get_m(bio); | |
929 | bio_set_m(bio, NULL); | |
930 | ||
931 | if (likely(!error)) { | |
932 | bio_endio(bio, 0); | |
933 | return; | |
934 | } | |
935 | ||
936 | fail_mirror(m, DM_RAID1_READ_ERROR); | |
937 | ||
938 | if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { | |
939 | DMWARN_LIMIT("Read failure on mirror device %s. " | |
940 | "Trying alternative device.", | |
941 | m->dev->name); | |
942 | queue_bio(m->ms, bio, bio_rw(bio)); | |
943 | return; | |
944 | } | |
945 | ||
946 | DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", | |
947 | m->dev->name); | |
948 | bio_endio(bio, -EIO); | |
949 | } | |
950 | ||
951 | /* Asynchronous read. */ | |
952 | static void read_async_bio(struct mirror *m, struct bio *bio) | |
953 | { | |
954 | struct io_region io; | |
955 | struct dm_io_request io_req = { | |
956 | .bi_rw = READ, | |
957 | .mem.type = DM_IO_BVEC, | |
958 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | |
959 | .notify.fn = read_callback, | |
960 | .notify.context = bio, | |
961 | .client = m->ms->io_client, | |
962 | }; | |
963 | ||
964 | map_region(&io, m, bio); | |
965 | bio_set_m(bio, m); | |
966 | (void) dm_io(&io_req, 1, &io, NULL); | |
1da177e4 LT |
967 | } |
968 | ||
969 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |
970 | { | |
971 | region_t region; | |
972 | struct bio *bio; | |
973 | struct mirror *m; | |
974 | ||
975 | while ((bio = bio_list_pop(reads))) { | |
976 | region = bio_to_region(&ms->rh, bio); | |
06386bbf | 977 | m = get_default_mirror(ms); |
1da177e4 LT |
978 | |
979 | /* | |
980 | * We can only read balance if the region is in sync. | |
981 | */ | |
06386bbf | 982 | if (likely(rh_in_sync(&ms->rh, region, 1))) |
1da177e4 | 983 | m = choose_mirror(ms, bio->bi_sector); |
06386bbf JB |
984 | else if (m && atomic_read(&m->error_count)) |
985 | m = NULL; | |
1da177e4 | 986 | |
06386bbf JB |
987 | if (likely(m)) |
988 | read_async_bio(m, bio); | |
989 | else | |
990 | bio_endio(bio, -EIO); | |
1da177e4 LT |
991 | } |
992 | } | |
993 | ||
994 | /*----------------------------------------------------------------- | |
995 | * Writes. | |
996 | * | |
997 | * We do different things with the write io depending on the | |
998 | * state of the region that it's in: | |
999 | * | |
1000 | * SYNC: increment pending, use kcopyd to write to *all* mirrors | |
1001 | * RECOVERING: delay the io until recovery completes | |
1002 | * NOSYNC: increment pending, just write to the default mirror | |
1003 | *---------------------------------------------------------------*/ | |
72f4b314 JB |
1004 | |
1005 | /* __bio_mark_nosync | |
1006 | * @ms | |
1007 | * @bio | |
1008 | * @done | |
1009 | * @error | |
1010 | * | |
1011 | * The bio was written on some mirror(s) but failed on other mirror(s). | |
1012 | * We can successfully endio the bio but should avoid the region being | |
1013 | * marked clean by setting the state RH_NOSYNC. | |
1014 | * | |
1015 | * This function is _not_ safe in interrupt context! | |
1016 | */ | |
1017 | static void __bio_mark_nosync(struct mirror_set *ms, | |
1018 | struct bio *bio, unsigned done, int error) | |
1019 | { | |
1020 | unsigned long flags; | |
1021 | struct region_hash *rh = &ms->rh; | |
1022 | struct dirty_log *log = ms->rh.log; | |
1023 | struct region *reg; | |
1024 | region_t region = bio_to_region(rh, bio); | |
1025 | int recovering = 0; | |
1026 | ||
1027 | /* We must inform the log that the sync count has changed. */ | |
1028 | log->type->set_region_sync(log, region, 0); | |
1029 | ms->in_sync = 0; | |
1030 | ||
1031 | read_lock(&rh->hash_lock); | |
1032 | reg = __rh_find(rh, region); | |
1033 | read_unlock(&rh->hash_lock); | |
1034 | ||
1035 | /* region hash entry should exist because write was in-flight */ | |
1036 | BUG_ON(!reg); | |
1037 | BUG_ON(!list_empty(®->list)); | |
1038 | ||
1039 | spin_lock_irqsave(&rh->region_lock, flags); | |
1040 | /* | |
1041 | * Possible cases: | |
1042 | * 1) RH_DIRTY | |
1043 | * 2) RH_NOSYNC: was dirty, other preceeding writes failed | |
1044 | * 3) RH_RECOVERING: flushing pending writes | |
1045 | * Either case, the region should have not been connected to list. | |
1046 | */ | |
1047 | recovering = (reg->state == RH_RECOVERING); | |
1048 | reg->state = RH_NOSYNC; | |
1049 | BUG_ON(!list_empty(®->list)); | |
1050 | spin_unlock_irqrestore(&rh->region_lock, flags); | |
1051 | ||
1052 | bio_endio(bio, error); | |
1053 | if (recovering) | |
1054 | complete_resync_work(reg, 0); | |
1055 | } | |
1056 | ||
1da177e4 LT |
1057 | static void write_callback(unsigned long error, void *context) |
1058 | { | |
72f4b314 | 1059 | unsigned i, ret = 0; |
1da177e4 LT |
1060 | struct bio *bio = (struct bio *) context; |
1061 | struct mirror_set *ms; | |
72f4b314 JB |
1062 | int uptodate = 0; |
1063 | int should_wake = 0; | |
1064 | unsigned long flags; | |
1da177e4 | 1065 | |
06386bbf JB |
1066 | ms = bio_get_m(bio)->ms; |
1067 | bio_set_m(bio, NULL); | |
1da177e4 LT |
1068 | |
1069 | /* | |
1070 | * NOTE: We don't decrement the pending count here, | |
1071 | * instead it is done by the targets endio function. | |
1072 | * This way we handle both writes to SYNC and NOSYNC | |
1073 | * regions with the same code. | |
1074 | */ | |
72f4b314 JB |
1075 | if (likely(!error)) |
1076 | goto out; | |
1da177e4 | 1077 | |
72f4b314 JB |
1078 | for (i = 0; i < ms->nr_mirrors; i++) |
1079 | if (test_bit(i, &error)) | |
1080 | fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); | |
1081 | else | |
1082 | uptodate = 1; | |
1083 | ||
1084 | if (unlikely(!uptodate)) { | |
1085 | DMERR("All replicated volumes dead, failing I/O"); | |
1086 | /* None of the writes succeeded, fail the I/O. */ | |
1087 | ret = -EIO; | |
1088 | } else if (errors_handled(ms)) { | |
1da177e4 | 1089 | /* |
72f4b314 JB |
1090 | * Need to raise event. Since raising |
1091 | * events can block, we need to do it in | |
1092 | * the main thread. | |
1da177e4 | 1093 | */ |
72f4b314 JB |
1094 | spin_lock_irqsave(&ms->lock, flags); |
1095 | if (!ms->failures.head) | |
1096 | should_wake = 1; | |
1097 | bio_list_add(&ms->failures, bio); | |
1098 | spin_unlock_irqrestore(&ms->lock, flags); | |
1099 | if (should_wake) | |
1100 | wake(ms); | |
1101 | return; | |
1da177e4 | 1102 | } |
72f4b314 JB |
1103 | out: |
1104 | bio_endio(bio, ret); | |
1da177e4 LT |
1105 | } |
1106 | ||
1107 | static void do_write(struct mirror_set *ms, struct bio *bio) | |
1108 | { | |
1109 | unsigned int i; | |
06386bbf | 1110 | struct io_region io[ms->nr_mirrors], *dest = io; |
1da177e4 | 1111 | struct mirror *m; |
88be163a MB |
1112 | struct dm_io_request io_req = { |
1113 | .bi_rw = WRITE, | |
1114 | .mem.type = DM_IO_BVEC, | |
1115 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | |
1116 | .notify.fn = write_callback, | |
1117 | .notify.context = bio, | |
1118 | .client = ms->io_client, | |
1119 | }; | |
1da177e4 | 1120 | |
06386bbf JB |
1121 | for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) |
1122 | map_region(dest++, m, bio); | |
1da177e4 | 1123 | |
06386bbf JB |
1124 | /* |
1125 | * Use default mirror because we only need it to retrieve the reference | |
1126 | * to the mirror set in write_callback(). | |
1127 | */ | |
1128 | bio_set_m(bio, get_default_mirror(ms)); | |
88be163a MB |
1129 | |
1130 | (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); | |
1da177e4 LT |
1131 | } |
1132 | ||
1133 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |
1134 | { | |
1135 | int state; | |
1136 | struct bio *bio; | |
1137 | struct bio_list sync, nosync, recover, *this_list = NULL; | |
1138 | ||
1139 | if (!writes->head) | |
1140 | return; | |
1141 | ||
1142 | /* | |
1143 | * Classify each write. | |
1144 | */ | |
1145 | bio_list_init(&sync); | |
1146 | bio_list_init(&nosync); | |
1147 | bio_list_init(&recover); | |
1148 | ||
1149 | while ((bio = bio_list_pop(writes))) { | |
1150 | state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); | |
1151 | switch (state) { | |
1152 | case RH_CLEAN: | |
1153 | case RH_DIRTY: | |
1154 | this_list = &sync; | |
1155 | break; | |
1156 | ||
1157 | case RH_NOSYNC: | |
1158 | this_list = &nosync; | |
1159 | break; | |
1160 | ||
1161 | case RH_RECOVERING: | |
1162 | this_list = &recover; | |
1163 | break; | |
1164 | } | |
1165 | ||
1166 | bio_list_add(this_list, bio); | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * Increment the pending counts for any regions that will | |
1171 | * be written to (writes to recover regions are going to | |
1172 | * be delayed). | |
1173 | */ | |
1174 | rh_inc_pending(&ms->rh, &sync); | |
1175 | rh_inc_pending(&ms->rh, &nosync); | |
fc1ff958 | 1176 | ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; |
1da177e4 LT |
1177 | |
1178 | /* | |
1179 | * Dispatch io. | |
1180 | */ | |
b80aa7a0 JB |
1181 | if (unlikely(ms->log_failure)) { |
1182 | spin_lock_irq(&ms->lock); | |
1183 | bio_list_merge(&ms->failures, &sync); | |
1184 | spin_unlock_irq(&ms->lock); | |
1185 | } else | |
fc1ff958 | 1186 | while ((bio = bio_list_pop(&sync))) |
b80aa7a0 | 1187 | do_write(ms, bio); |
1da177e4 LT |
1188 | |
1189 | while ((bio = bio_list_pop(&recover))) | |
1190 | rh_delay(&ms->rh, bio); | |
1191 | ||
1192 | while ((bio = bio_list_pop(&nosync))) { | |
06386bbf | 1193 | map_bio(get_default_mirror(ms), bio); |
1da177e4 LT |
1194 | generic_make_request(bio); |
1195 | } | |
1196 | } | |
1197 | ||
72f4b314 JB |
1198 | static void do_failures(struct mirror_set *ms, struct bio_list *failures) |
1199 | { | |
1200 | struct bio *bio; | |
1201 | ||
1202 | if (!failures->head) | |
1203 | return; | |
1204 | ||
b80aa7a0 JB |
1205 | if (!ms->log_failure) { |
1206 | while ((bio = bio_list_pop(failures))) | |
1207 | __bio_mark_nosync(ms, bio, bio->bi_size, 0); | |
1208 | return; | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * If the log has failed, unattempted writes are being | |
1213 | * put on the failures list. We can't issue those writes | |
1214 | * until a log has been marked, so we must store them. | |
1215 | * | |
1216 | * If a 'noflush' suspend is in progress, we can requeue | |
1217 | * the I/O's to the core. This give userspace a chance | |
1218 | * to reconfigure the mirror, at which point the core | |
1219 | * will reissue the writes. If the 'noflush' flag is | |
1220 | * not set, we have no choice but to return errors. | |
1221 | * | |
1222 | * Some writes on the failures list may have been | |
1223 | * submitted before the log failure and represent a | |
1224 | * failure to write to one of the devices. It is ok | |
1225 | * for us to treat them the same and requeue them | |
1226 | * as well. | |
1227 | */ | |
1228 | if (dm_noflush_suspending(ms->ti)) { | |
1229 | while ((bio = bio_list_pop(failures))) | |
1230 | bio_endio(bio, DM_ENDIO_REQUEUE); | |
1231 | return; | |
1232 | } | |
1233 | ||
1234 | if (atomic_read(&ms->suspend)) { | |
1235 | while ((bio = bio_list_pop(failures))) | |
1236 | bio_endio(bio, -EIO); | |
1237 | return; | |
1238 | } | |
1239 | ||
1240 | spin_lock_irq(&ms->lock); | |
1241 | bio_list_merge(&ms->failures, failures); | |
1242 | spin_unlock_irq(&ms->lock); | |
1243 | ||
1244 | wake(ms); | |
72f4b314 JB |
1245 | } |
1246 | ||
1247 | static void trigger_event(struct work_struct *work) | |
1248 | { | |
1249 | struct mirror_set *ms = | |
1250 | container_of(work, struct mirror_set, trigger_event); | |
1251 | ||
1252 | dm_table_event(ms->ti->table); | |
1253 | } | |
1254 | ||
1da177e4 LT |
1255 | /*----------------------------------------------------------------- |
1256 | * kmirrord | |
1257 | *---------------------------------------------------------------*/ | |
72f4b314 | 1258 | static int _do_mirror(struct work_struct *work) |
1da177e4 | 1259 | { |
6ad36fe2 HS |
1260 | struct mirror_set *ms =container_of(work, struct mirror_set, |
1261 | kmirrord_work); | |
72f4b314 JB |
1262 | struct bio_list reads, writes, failures; |
1263 | unsigned long flags; | |
1da177e4 | 1264 | |
72f4b314 | 1265 | spin_lock_irqsave(&ms->lock, flags); |
1da177e4 LT |
1266 | reads = ms->reads; |
1267 | writes = ms->writes; | |
72f4b314 | 1268 | failures = ms->failures; |
1da177e4 LT |
1269 | bio_list_init(&ms->reads); |
1270 | bio_list_init(&ms->writes); | |
72f4b314 JB |
1271 | bio_list_init(&ms->failures); |
1272 | spin_unlock_irqrestore(&ms->lock, flags); | |
1da177e4 LT |
1273 | |
1274 | rh_update_states(&ms->rh); | |
1275 | do_recovery(ms); | |
1276 | do_reads(ms, &reads); | |
1277 | do_writes(ms, &writes); | |
72f4b314 JB |
1278 | do_failures(ms, &failures); |
1279 | ||
1280 | return (ms->failures.head) ? 1 : 0; | |
1281 | } | |
1282 | ||
1283 | static void do_mirror(struct work_struct *work) | |
1284 | { | |
1285 | /* | |
1286 | * If _do_mirror returns 1, we give it | |
1287 | * another shot. This helps for cases like | |
1288 | * 'suspend' where we call flush_workqueue | |
1289 | * and expect all work to be finished. If | |
1290 | * a failure happens during a suspend, we | |
1291 | * couldn't issue a 'wake' because it would | |
1292 | * not be honored. Therefore, we return '1' | |
1293 | * from _do_mirror, and retry here. | |
1294 | */ | |
1295 | while (_do_mirror(work)) | |
1296 | schedule(); | |
1da177e4 LT |
1297 | } |
1298 | ||
72f4b314 | 1299 | |
1da177e4 LT |
1300 | /*----------------------------------------------------------------- |
1301 | * Target functions | |
1302 | *---------------------------------------------------------------*/ | |
1303 | static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |
1304 | uint32_t region_size, | |
1305 | struct dm_target *ti, | |
1306 | struct dirty_log *dl) | |
1307 | { | |
1308 | size_t len; | |
1309 | struct mirror_set *ms = NULL; | |
1310 | ||
1311 | if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) | |
1312 | return NULL; | |
1313 | ||
1314 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); | |
1315 | ||
dd00cc48 | 1316 | ms = kzalloc(len, GFP_KERNEL); |
1da177e4 | 1317 | if (!ms) { |
72d94861 | 1318 | ti->error = "Cannot allocate mirror context"; |
1da177e4 LT |
1319 | return NULL; |
1320 | } | |
1321 | ||
1da177e4 LT |
1322 | spin_lock_init(&ms->lock); |
1323 | ||
1324 | ms->ti = ti; | |
1325 | ms->nr_mirrors = nr_mirrors; | |
1326 | ms->nr_regions = dm_sector_div_up(ti->len, region_size); | |
1327 | ms->in_sync = 0; | |
b80aa7a0 JB |
1328 | ms->log_failure = 0; |
1329 | atomic_set(&ms->suspend, 0); | |
72f4b314 | 1330 | atomic_set(&ms->default_mirror, DEFAULT_MIRROR); |
1da177e4 | 1331 | |
06386bbf JB |
1332 | len = sizeof(struct dm_raid1_read_record); |
1333 | ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS, | |
1334 | len); | |
1335 | if (!ms->read_record_pool) { | |
1336 | ti->error = "Error creating mirror read_record_pool"; | |
1337 | kfree(ms); | |
1338 | return NULL; | |
1339 | } | |
1340 | ||
88be163a MB |
1341 | ms->io_client = dm_io_client_create(DM_IO_PAGES); |
1342 | if (IS_ERR(ms->io_client)) { | |
1343 | ti->error = "Error creating dm_io client"; | |
06386bbf | 1344 | mempool_destroy(ms->read_record_pool); |
88be163a MB |
1345 | kfree(ms); |
1346 | return NULL; | |
1347 | } | |
1348 | ||
1da177e4 | 1349 | if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { |
72d94861 | 1350 | ti->error = "Error creating dirty region hash"; |
a72cf737 | 1351 | dm_io_client_destroy(ms->io_client); |
06386bbf | 1352 | mempool_destroy(ms->read_record_pool); |
1da177e4 LT |
1353 | kfree(ms); |
1354 | return NULL; | |
1355 | } | |
1356 | ||
1357 | return ms; | |
1358 | } | |
1359 | ||
1360 | static void free_context(struct mirror_set *ms, struct dm_target *ti, | |
1361 | unsigned int m) | |
1362 | { | |
1363 | while (m--) | |
1364 | dm_put_device(ti, ms->mirror[m].dev); | |
1365 | ||
88be163a | 1366 | dm_io_client_destroy(ms->io_client); |
1da177e4 | 1367 | rh_exit(&ms->rh); |
06386bbf | 1368 | mempool_destroy(ms->read_record_pool); |
1da177e4 LT |
1369 | kfree(ms); |
1370 | } | |
1371 | ||
1372 | static inline int _check_region_size(struct dm_target *ti, uint32_t size) | |
1373 | { | |
6f3c3f0a | 1374 | return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) || |
1da177e4 LT |
1375 | size > ti->len); |
1376 | } | |
1377 | ||
1378 | static int get_mirror(struct mirror_set *ms, struct dm_target *ti, | |
1379 | unsigned int mirror, char **argv) | |
1380 | { | |
4ee218cd | 1381 | unsigned long long offset; |
1da177e4 | 1382 | |
4ee218cd | 1383 | if (sscanf(argv[1], "%llu", &offset) != 1) { |
72d94861 | 1384 | ti->error = "Invalid offset"; |
1da177e4 LT |
1385 | return -EINVAL; |
1386 | } | |
1387 | ||
1388 | if (dm_get_device(ti, argv[0], offset, ti->len, | |
1389 | dm_table_get_mode(ti->table), | |
1390 | &ms->mirror[mirror].dev)) { | |
72d94861 | 1391 | ti->error = "Device lookup failure"; |
1da177e4 LT |
1392 | return -ENXIO; |
1393 | } | |
1394 | ||
aa5617c5 | 1395 | ms->mirror[mirror].ms = ms; |
72f4b314 JB |
1396 | atomic_set(&(ms->mirror[mirror].error_count), 0); |
1397 | ms->mirror[mirror].error_type = 0; | |
1da177e4 LT |
1398 | ms->mirror[mirror].offset = offset; |
1399 | ||
1400 | return 0; | |
1401 | } | |
1402 | ||
1da177e4 LT |
1403 | /* |
1404 | * Create dirty log: log_type #log_params <log_params> | |
1405 | */ | |
1406 | static struct dirty_log *create_dirty_log(struct dm_target *ti, | |
1407 | unsigned int argc, char **argv, | |
1408 | unsigned int *args_used) | |
1409 | { | |
1410 | unsigned int param_count; | |
1411 | struct dirty_log *dl; | |
1412 | ||
1413 | if (argc < 2) { | |
72d94861 | 1414 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
1415 | return NULL; |
1416 | } | |
1417 | ||
1418 | if (sscanf(argv[1], "%u", ¶m_count) != 1) { | |
72d94861 | 1419 | ti->error = "Invalid mirror log argument count"; |
1da177e4 LT |
1420 | return NULL; |
1421 | } | |
1422 | ||
1423 | *args_used = 2 + param_count; | |
1424 | ||
1425 | if (argc < *args_used) { | |
72d94861 | 1426 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
1427 | return NULL; |
1428 | } | |
1429 | ||
1430 | dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); | |
1431 | if (!dl) { | |
72d94861 | 1432 | ti->error = "Error creating mirror dirty log"; |
1da177e4 LT |
1433 | return NULL; |
1434 | } | |
1435 | ||
1436 | if (!_check_region_size(ti, dl->type->get_region_size(dl))) { | |
72d94861 | 1437 | ti->error = "Invalid region size"; |
1da177e4 LT |
1438 | dm_destroy_dirty_log(dl); |
1439 | return NULL; | |
1440 | } | |
1441 | ||
1442 | return dl; | |
1443 | } | |
1444 | ||
a8e6afa2 JB |
1445 | static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, |
1446 | unsigned *args_used) | |
1447 | { | |
1448 | unsigned num_features; | |
1449 | struct dm_target *ti = ms->ti; | |
1450 | ||
1451 | *args_used = 0; | |
1452 | ||
1453 | if (!argc) | |
1454 | return 0; | |
1455 | ||
1456 | if (sscanf(argv[0], "%u", &num_features) != 1) { | |
1457 | ti->error = "Invalid number of features"; | |
1458 | return -EINVAL; | |
1459 | } | |
1460 | ||
1461 | argc--; | |
1462 | argv++; | |
1463 | (*args_used)++; | |
1464 | ||
1465 | if (num_features > argc) { | |
1466 | ti->error = "Not enough arguments to support feature count"; | |
1467 | return -EINVAL; | |
1468 | } | |
1469 | ||
1470 | if (!strcmp("handle_errors", argv[0])) | |
1471 | ms->features |= DM_RAID1_HANDLE_ERRORS; | |
1472 | else { | |
1473 | ti->error = "Unrecognised feature requested"; | |
1474 | return -EINVAL; | |
1475 | } | |
1476 | ||
1477 | (*args_used)++; | |
1478 | ||
1479 | return 0; | |
1480 | } | |
1481 | ||
1da177e4 LT |
1482 | /* |
1483 | * Construct a mirror mapping: | |
1484 | * | |
1485 | * log_type #log_params <log_params> | |
1486 | * #mirrors [mirror_path offset]{2,} | |
a8e6afa2 | 1487 | * [#features <features>] |
1da177e4 LT |
1488 | * |
1489 | * log_type is "core" or "disk" | |
1490 | * #log_params is between 1 and 3 | |
a8e6afa2 JB |
1491 | * |
1492 | * If present, features must be "handle_errors". | |
1da177e4 | 1493 | */ |
1da177e4 LT |
1494 | static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1495 | { | |
1496 | int r; | |
1497 | unsigned int nr_mirrors, m, args_used; | |
1498 | struct mirror_set *ms; | |
1499 | struct dirty_log *dl; | |
1500 | ||
1501 | dl = create_dirty_log(ti, argc, argv, &args_used); | |
1502 | if (!dl) | |
1503 | return -EINVAL; | |
1504 | ||
1505 | argv += args_used; | |
1506 | argc -= args_used; | |
1507 | ||
1508 | if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || | |
1509 | nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { | |
72d94861 | 1510 | ti->error = "Invalid number of mirrors"; |
1da177e4 LT |
1511 | dm_destroy_dirty_log(dl); |
1512 | return -EINVAL; | |
1513 | } | |
1514 | ||
1515 | argv++, argc--; | |
1516 | ||
a8e6afa2 JB |
1517 | if (argc < nr_mirrors * 2) { |
1518 | ti->error = "Too few mirror arguments"; | |
1da177e4 LT |
1519 | dm_destroy_dirty_log(dl); |
1520 | return -EINVAL; | |
1521 | } | |
1522 | ||
1523 | ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); | |
1524 | if (!ms) { | |
1525 | dm_destroy_dirty_log(dl); | |
1526 | return -ENOMEM; | |
1527 | } | |
1528 | ||
1529 | /* Get the mirror parameter sets */ | |
1530 | for (m = 0; m < nr_mirrors; m++) { | |
1531 | r = get_mirror(ms, ti, m, argv); | |
1532 | if (r) { | |
1533 | free_context(ms, ti, m); | |
1534 | return r; | |
1535 | } | |
1536 | argv += 2; | |
1537 | argc -= 2; | |
1538 | } | |
1539 | ||
1540 | ti->private = ms; | |
d88854f0 | 1541 | ti->split_io = ms->rh.region_size; |
1da177e4 | 1542 | |
6ad36fe2 HS |
1543 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1544 | if (!ms->kmirrord_wq) { | |
1545 | DMERR("couldn't start kmirrord"); | |
a72cf737 DM |
1546 | r = -ENOMEM; |
1547 | goto err_free_context; | |
6ad36fe2 HS |
1548 | } |
1549 | INIT_WORK(&ms->kmirrord_work, do_mirror); | |
72f4b314 | 1550 | INIT_WORK(&ms->trigger_event, trigger_event); |
6ad36fe2 | 1551 | |
a8e6afa2 | 1552 | r = parse_features(ms, argc, argv, &args_used); |
a72cf737 DM |
1553 | if (r) |
1554 | goto err_destroy_wq; | |
a8e6afa2 JB |
1555 | |
1556 | argv += args_used; | |
1557 | argc -= args_used; | |
1558 | ||
f44db678 JB |
1559 | /* |
1560 | * Any read-balancing addition depends on the | |
1561 | * DM_RAID1_HANDLE_ERRORS flag being present. | |
1562 | * This is because the decision to balance depends | |
1563 | * on the sync state of a region. If the above | |
1564 | * flag is not present, we ignore errors; and | |
1565 | * the sync state may be inaccurate. | |
1566 | */ | |
1567 | ||
a8e6afa2 JB |
1568 | if (argc) { |
1569 | ti->error = "Too many mirror arguments"; | |
a72cf737 DM |
1570 | r = -EINVAL; |
1571 | goto err_destroy_wq; | |
a8e6afa2 JB |
1572 | } |
1573 | ||
1da177e4 | 1574 | r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); |
a72cf737 DM |
1575 | if (r) |
1576 | goto err_destroy_wq; | |
1da177e4 | 1577 | |
6ad36fe2 | 1578 | wake(ms); |
1da177e4 | 1579 | return 0; |
a72cf737 DM |
1580 | |
1581 | err_destroy_wq: | |
1582 | destroy_workqueue(ms->kmirrord_wq); | |
1583 | err_free_context: | |
1584 | free_context(ms, ti, ms->nr_mirrors); | |
1585 | return r; | |
1da177e4 LT |
1586 | } |
1587 | ||
1588 | static void mirror_dtr(struct dm_target *ti) | |
1589 | { | |
1590 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1591 | ||
6ad36fe2 | 1592 | flush_workqueue(ms->kmirrord_wq); |
1da177e4 | 1593 | kcopyd_client_destroy(ms->kcopyd_client); |
6ad36fe2 | 1594 | destroy_workqueue(ms->kmirrord_wq); |
1da177e4 LT |
1595 | free_context(ms, ti, ms->nr_mirrors); |
1596 | } | |
1597 | ||
1598 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) | |
1599 | { | |
72f4b314 | 1600 | unsigned long flags; |
1da177e4 LT |
1601 | int should_wake = 0; |
1602 | struct bio_list *bl; | |
1603 | ||
1604 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; | |
72f4b314 | 1605 | spin_lock_irqsave(&ms->lock, flags); |
1da177e4 LT |
1606 | should_wake = !(bl->head); |
1607 | bio_list_add(bl, bio); | |
72f4b314 | 1608 | spin_unlock_irqrestore(&ms->lock, flags); |
1da177e4 LT |
1609 | |
1610 | if (should_wake) | |
6ad36fe2 | 1611 | wake(ms); |
1da177e4 LT |
1612 | } |
1613 | ||
1614 | /* | |
1615 | * Mirror mapping function | |
1616 | */ | |
1617 | static int mirror_map(struct dm_target *ti, struct bio *bio, | |
1618 | union map_info *map_context) | |
1619 | { | |
1620 | int r, rw = bio_rw(bio); | |
1621 | struct mirror *m; | |
1622 | struct mirror_set *ms = ti->private; | |
06386bbf | 1623 | struct dm_raid1_read_record *read_record = NULL; |
1da177e4 LT |
1624 | |
1625 | if (rw == WRITE) { | |
06386bbf JB |
1626 | /* Save region for mirror_end_io() handler */ |
1627 | map_context->ll = bio_to_region(&ms->rh, bio); | |
1da177e4 | 1628 | queue_bio(ms, bio, rw); |
d2a7ad29 | 1629 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1630 | } |
1631 | ||
1632 | r = ms->rh.log->type->in_sync(ms->rh.log, | |
1633 | bio_to_region(&ms->rh, bio), 0); | |
1634 | if (r < 0 && r != -EWOULDBLOCK) | |
1635 | return r; | |
1636 | ||
1da177e4 | 1637 | /* |
06386bbf | 1638 | * If region is not in-sync queue the bio. |
1da177e4 | 1639 | */ |
06386bbf JB |
1640 | if (!r || (r == -EWOULDBLOCK)) { |
1641 | if (rw == READA) | |
1642 | return -EWOULDBLOCK; | |
1da177e4 | 1643 | |
1da177e4 | 1644 | queue_bio(ms, bio, rw); |
d2a7ad29 | 1645 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1646 | } |
1647 | ||
06386bbf JB |
1648 | /* |
1649 | * The region is in-sync and we can perform reads directly. | |
1650 | * Store enough information so we can retry if it fails. | |
1651 | */ | |
1da177e4 | 1652 | m = choose_mirror(ms, bio->bi_sector); |
06386bbf | 1653 | if (unlikely(!m)) |
1da177e4 LT |
1654 | return -EIO; |
1655 | ||
06386bbf JB |
1656 | read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); |
1657 | if (likely(read_record)) { | |
1658 | dm_bio_record(&read_record->details, bio); | |
1659 | map_context->ptr = read_record; | |
1660 | read_record->m = m; | |
1661 | } | |
1662 | ||
1663 | map_bio(m, bio); | |
1664 | ||
d2a7ad29 | 1665 | return DM_MAPIO_REMAPPED; |
1da177e4 LT |
1666 | } |
1667 | ||
1668 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |
1669 | int error, union map_info *map_context) | |
1670 | { | |
1671 | int rw = bio_rw(bio); | |
1672 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
06386bbf JB |
1673 | struct mirror *m = NULL; |
1674 | struct dm_bio_details *bd = NULL; | |
1675 | struct dm_raid1_read_record *read_record = map_context->ptr; | |
1da177e4 LT |
1676 | |
1677 | /* | |
1678 | * We need to dec pending if this was a write. | |
1679 | */ | |
06386bbf JB |
1680 | if (rw == WRITE) { |
1681 | rh_dec(&ms->rh, map_context->ll); | |
1682 | return error; | |
1683 | } | |
1da177e4 | 1684 | |
06386bbf JB |
1685 | if (error == -EOPNOTSUPP) |
1686 | goto out; | |
1687 | ||
1688 | if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) | |
1689 | goto out; | |
1690 | ||
1691 | if (unlikely(error)) { | |
1692 | if (!read_record) { | |
1693 | /* | |
1694 | * There wasn't enough memory to record necessary | |
1695 | * information for a retry or there was no other | |
1696 | * mirror in-sync. | |
1697 | */ | |
1698 | DMERR_LIMIT("Mirror read failed from %s.", | |
1699 | m->dev->name); | |
1700 | return -EIO; | |
1701 | } | |
1702 | DMERR("Mirror read failed from %s. Trying alternative device.", | |
1703 | m->dev->name); | |
1704 | ||
1705 | m = read_record->m; | |
1706 | fail_mirror(m, DM_RAID1_READ_ERROR); | |
1707 | ||
1708 | /* | |
1709 | * A failed read is requeued for another attempt using an intact | |
1710 | * mirror. | |
1711 | */ | |
1712 | if (default_ok(m) || mirror_available(ms, bio)) { | |
1713 | bd = &read_record->details; | |
1714 | ||
1715 | dm_bio_restore(bd, bio); | |
1716 | mempool_free(read_record, ms->read_record_pool); | |
1717 | map_context->ptr = NULL; | |
1718 | queue_bio(ms, bio, rw); | |
1719 | return 1; | |
1720 | } | |
1721 | DMERR("All replicated volumes dead, failing I/O"); | |
1722 | } | |
1723 | ||
1724 | out: | |
1725 | if (read_record) { | |
1726 | mempool_free(read_record, ms->read_record_pool); | |
1727 | map_context->ptr = NULL; | |
1728 | } | |
1729 | ||
1730 | return error; | |
1da177e4 LT |
1731 | } |
1732 | ||
b80aa7a0 | 1733 | static void mirror_presuspend(struct dm_target *ti) |
1da177e4 LT |
1734 | { |
1735 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1736 | struct dirty_log *log = ms->rh.log; | |
1737 | ||
b80aa7a0 JB |
1738 | atomic_set(&ms->suspend, 1); |
1739 | ||
1740 | /* | |
1741 | * We must finish up all the work that we've | |
1742 | * generated (i.e. recovery work). | |
1743 | */ | |
1da177e4 | 1744 | rh_stop_recovery(&ms->rh); |
33184048 | 1745 | |
33184048 JB |
1746 | wait_event(_kmirrord_recovery_stopped, |
1747 | !atomic_read(&ms->rh.recovery_in_flight)); | |
1748 | ||
b80aa7a0 JB |
1749 | if (log->type->presuspend && log->type->presuspend(log)) |
1750 | /* FIXME: need better error handling */ | |
1751 | DMWARN("log presuspend failed"); | |
1752 | ||
1753 | /* | |
1754 | * Now that recovery is complete/stopped and the | |
1755 | * delayed bios are queued, we need to wait for | |
1756 | * the worker thread to complete. This way, | |
1757 | * we know that all of our I/O has been pushed. | |
1758 | */ | |
1759 | flush_workqueue(ms->kmirrord_wq); | |
1760 | } | |
1761 | ||
1762 | static void mirror_postsuspend(struct dm_target *ti) | |
1763 | { | |
1764 | struct mirror_set *ms = ti->private; | |
1765 | struct dirty_log *log = ms->rh.log; | |
1766 | ||
6b3df0d7 | 1767 | if (log->type->postsuspend && log->type->postsuspend(log)) |
1da177e4 | 1768 | /* FIXME: need better error handling */ |
b80aa7a0 | 1769 | DMWARN("log postsuspend failed"); |
1da177e4 LT |
1770 | } |
1771 | ||
1772 | static void mirror_resume(struct dm_target *ti) | |
1773 | { | |
b80aa7a0 | 1774 | struct mirror_set *ms = ti->private; |
1da177e4 | 1775 | struct dirty_log *log = ms->rh.log; |
b80aa7a0 JB |
1776 | |
1777 | atomic_set(&ms->suspend, 0); | |
1da177e4 LT |
1778 | if (log->type->resume && log->type->resume(log)) |
1779 | /* FIXME: need better error handling */ | |
1780 | DMWARN("log resume failed"); | |
1781 | rh_start_recovery(&ms->rh); | |
1782 | } | |
1783 | ||
af195ac8 JB |
1784 | /* |
1785 | * device_status_char | |
1786 | * @m: mirror device/leg we want the status of | |
1787 | * | |
1788 | * We return one character representing the most severe error | |
1789 | * we have encountered. | |
1790 | * A => Alive - No failures | |
1791 | * D => Dead - A write failure occurred leaving mirror out-of-sync | |
1792 | * S => Sync - A sychronization failure occurred, mirror out-of-sync | |
1793 | * R => Read - A read failure occurred, mirror data unaffected | |
1794 | * | |
1795 | * Returns: <char> | |
1796 | */ | |
1797 | static char device_status_char(struct mirror *m) | |
1798 | { | |
1799 | if (!atomic_read(&(m->error_count))) | |
1800 | return 'A'; | |
1801 | ||
1802 | return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : | |
1803 | (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : | |
1804 | (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; | |
1805 | } | |
1806 | ||
1807 | ||
1da177e4 LT |
1808 | static int mirror_status(struct dm_target *ti, status_type_t type, |
1809 | char *result, unsigned int maxlen) | |
1810 | { | |
315dcc22 | 1811 | unsigned int m, sz = 0; |
1da177e4 | 1812 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
af195ac8 JB |
1813 | struct dirty_log *log = ms->rh.log; |
1814 | char buffer[ms->nr_mirrors + 1]; | |
1da177e4 | 1815 | |
1da177e4 LT |
1816 | switch (type) { |
1817 | case STATUSTYPE_INFO: | |
1818 | DMEMIT("%d ", ms->nr_mirrors); | |
af195ac8 | 1819 | for (m = 0; m < ms->nr_mirrors; m++) { |
1da177e4 | 1820 | DMEMIT("%s ", ms->mirror[m].dev->name); |
af195ac8 JB |
1821 | buffer[m] = device_status_char(&(ms->mirror[m])); |
1822 | } | |
1823 | buffer[m] = '\0'; | |
1da177e4 | 1824 | |
af195ac8 JB |
1825 | DMEMIT("%llu/%llu 1 %s ", |
1826 | (unsigned long long)log->type->get_sync_count(ms->rh.log), | |
1827 | (unsigned long long)ms->nr_regions, buffer); | |
315dcc22 | 1828 | |
af195ac8 | 1829 | sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); |
315dcc22 | 1830 | |
1da177e4 LT |
1831 | break; |
1832 | ||
1833 | case STATUSTYPE_TABLE: | |
af195ac8 | 1834 | sz = log->type->status(ms->rh.log, type, result, maxlen); |
315dcc22 | 1835 | |
e52b8f6d | 1836 | DMEMIT("%d", ms->nr_mirrors); |
1da177e4 | 1837 | for (m = 0; m < ms->nr_mirrors; m++) |
e52b8f6d | 1838 | DMEMIT(" %s %llu", ms->mirror[m].dev->name, |
b80aa7a0 | 1839 | (unsigned long long)ms->mirror[m].offset); |
a8e6afa2 JB |
1840 | |
1841 | if (ms->features & DM_RAID1_HANDLE_ERRORS) | |
1842 | DMEMIT(" 1 handle_errors"); | |
1da177e4 LT |
1843 | } |
1844 | ||
1845 | return 0; | |
1846 | } | |
1847 | ||
1848 | static struct target_type mirror_target = { | |
1849 | .name = "mirror", | |
af195ac8 | 1850 | .version = {1, 0, 20}, |
1da177e4 LT |
1851 | .module = THIS_MODULE, |
1852 | .ctr = mirror_ctr, | |
1853 | .dtr = mirror_dtr, | |
1854 | .map = mirror_map, | |
1855 | .end_io = mirror_end_io, | |
b80aa7a0 | 1856 | .presuspend = mirror_presuspend, |
1da177e4 LT |
1857 | .postsuspend = mirror_postsuspend, |
1858 | .resume = mirror_resume, | |
1859 | .status = mirror_status, | |
1860 | }; | |
1861 | ||
1862 | static int __init dm_mirror_init(void) | |
1863 | { | |
1864 | int r; | |
1865 | ||
1866 | r = dm_dirty_log_init(); | |
1867 | if (r) | |
1868 | return r; | |
1869 | ||
1da177e4 LT |
1870 | r = dm_register_target(&mirror_target); |
1871 | if (r < 0) { | |
0cd33124 | 1872 | DMERR("Failed to register mirror target"); |
1da177e4 | 1873 | dm_dirty_log_exit(); |
1da177e4 LT |
1874 | } |
1875 | ||
1876 | return r; | |
1877 | } | |
1878 | ||
1879 | static void __exit dm_mirror_exit(void) | |
1880 | { | |
1881 | int r; | |
1882 | ||
1883 | r = dm_unregister_target(&mirror_target); | |
1884 | if (r < 0) | |
0cd33124 | 1885 | DMERR("unregister failed %d", r); |
1da177e4 | 1886 | |
1da177e4 LT |
1887 | dm_dirty_log_exit(); |
1888 | } | |
1889 | ||
1890 | /* Module hooks */ | |
1891 | module_init(dm_mirror_init); | |
1892 | module_exit(dm_mirror_exit); | |
1893 | ||
1894 | MODULE_DESCRIPTION(DM_NAME " mirror target"); | |
1895 | MODULE_AUTHOR("Joe Thornber"); | |
1896 | MODULE_LICENSE("GPL"); |