]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/md/dm-raid1.c
dm: remove queue next_ordered workaround for barriers
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
1f965b19 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
06386bbf 8#include "dm-bio-record.h"
1da177e4 9
1da177e4
LT
10#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
1da177e4 15#include <linux/workqueue.h>
1f965b19 16#include <linux/device-mapper.h>
a765e20e
AK
17#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
1f965b19 20#include <linux/dm-region-hash.h>
1da177e4 21
72d94861 22#define DM_MSG_PREFIX "raid1"
1f965b19
HM
23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
88be163a 25#define DM_IO_PAGES 64
1f965b19 26#define DM_KCOPYD_PAGES 64
72d94861 27
a8e6afa2 28#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 29#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 30
33184048 31static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 32
e4c8b3ba
NB
33/*-----------------------------------------------------------------
34 * Mirror set structures.
35 *---------------------------------------------------------------*/
72f4b314
JB
36enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
38 DM_RAID1_SYNC_ERROR,
39 DM_RAID1_READ_ERROR
40};
41
e4c8b3ba 42struct mirror {
aa5617c5 43 struct mirror_set *ms;
e4c8b3ba 44 atomic_t error_count;
39ed7adb 45 unsigned long error_type;
e4c8b3ba
NB
46 struct dm_dev *dev;
47 sector_t offset;
48};
49
50struct mirror_set {
51 struct dm_target *ti;
52 struct list_head list;
1f965b19 53
a8e6afa2 54 uint64_t features;
e4c8b3ba 55
72f4b314 56 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
57 struct bio_list reads;
58 struct bio_list writes;
72f4b314 59 struct bio_list failures;
e4c8b3ba 60
1f965b19
HM
61 struct dm_region_hash *rh;
62 struct dm_kcopyd_client *kcopyd_client;
88be163a 63 struct dm_io_client *io_client;
06386bbf 64 mempool_t *read_record_pool;
88be163a 65
e4c8b3ba
NB
66 /* recovery */
67 region_t nr_regions;
68 int in_sync;
fc1ff958 69 int log_failure;
b80aa7a0 70 atomic_t suspend;
e4c8b3ba 71
72f4b314 72 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 73
6ad36fe2
HS
74 struct workqueue_struct *kmirrord_wq;
75 struct work_struct kmirrord_work;
a2aebe03
MP
76 struct timer_list timer;
77 unsigned long timer_pending;
78
72f4b314 79 struct work_struct trigger_event;
6ad36fe2 80
1f965b19 81 unsigned nr_mirrors;
e4c8b3ba
NB
82 struct mirror mirror[0];
83};
84
1f965b19 85static void wakeup_mirrord(void *context)
1da177e4 86{
1f965b19 87 struct mirror_set *ms = context;
1da177e4 88
6ad36fe2
HS
89 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
90}
91
a2aebe03
MP
92static void delayed_wake_fn(unsigned long data)
93{
94 struct mirror_set *ms = (struct mirror_set *) data;
95
96 clear_bit(0, &ms->timer_pending);
1f965b19 97 wakeup_mirrord(ms);
a2aebe03
MP
98}
99
100static void delayed_wake(struct mirror_set *ms)
101{
102 if (test_and_set_bit(0, &ms->timer_pending))
103 return;
104
105 ms->timer.expires = jiffies + HZ / 5;
106 ms->timer.data = (unsigned long) ms;
107 ms->timer.function = delayed_wake_fn;
108 add_timer(&ms->timer);
109}
110
1f965b19 111static void wakeup_all_recovery_waiters(void *context)
1da177e4 112{
1f965b19 113 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
114}
115
1f965b19 116static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1da177e4
LT
117{
118 unsigned long flags;
1da177e4 119 int should_wake = 0;
1f965b19 120 struct bio_list *bl;
1da177e4 121
1f965b19
HM
122 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
123 spin_lock_irqsave(&ms->lock, flags);
124 should_wake = !(bl->head);
125 bio_list_add(bl, bio);
126 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
127
128 if (should_wake)
1f965b19 129 wakeup_mirrord(ms);
1da177e4
LT
130}
131
1f965b19 132static void dispatch_bios(void *context, struct bio_list *bio_list)
1da177e4 133{
1f965b19
HM
134 struct mirror_set *ms = context;
135 struct bio *bio;
1da177e4 136
1f965b19
HM
137 while ((bio = bio_list_pop(bio_list)))
138 queue_bio(ms, bio, WRITE);
1da177e4
LT
139}
140
06386bbf
JB
141#define MIN_READ_RECORDS 20
142struct dm_raid1_read_record {
143 struct mirror *m;
144 struct dm_bio_details details;
145};
146
95f8fac8
MP
147static struct kmem_cache *_dm_raid1_read_record_cache;
148
1da177e4
LT
149/*
150 * Every mirror should look like this one.
151 */
152#define DEFAULT_MIRROR 0
153
154/*
06386bbf
JB
155 * This is yucky. We squirrel the mirror struct away inside
156 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
157 * doesn't get submitted to the lower levels of block layer.
158 */
06386bbf 159static struct mirror *bio_get_m(struct bio *bio)
1da177e4 160{
06386bbf 161 return (struct mirror *) bio->bi_next;
1da177e4
LT
162}
163
06386bbf 164static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 165{
06386bbf 166 bio->bi_next = (struct bio *) m;
1da177e4
LT
167}
168
72f4b314
JB
169static struct mirror *get_default_mirror(struct mirror_set *ms)
170{
171 return &ms->mirror[atomic_read(&ms->default_mirror)];
172}
173
174static void set_default_mirror(struct mirror *m)
175{
176 struct mirror_set *ms = m->ms;
177 struct mirror *m0 = &(ms->mirror[0]);
178
179 atomic_set(&ms->default_mirror, m - m0);
180}
181
182/* fail_mirror
183 * @m: mirror device to fail
184 * @error_type: one of the enum's, DM_RAID1_*_ERROR
185 *
186 * If errors are being handled, record the type of
187 * error encountered for this device. If this type
188 * of error has already been recorded, we can return;
189 * otherwise, we must signal userspace by triggering
190 * an event. Additionally, if the device is the
191 * primary device, we must choose a new primary, but
192 * only if the mirror is in-sync.
193 *
194 * This function must not block.
195 */
196static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
197{
198 struct mirror_set *ms = m->ms;
199 struct mirror *new;
200
72f4b314
JB
201 /*
202 * error_count is used for nothing more than a
203 * simple way to tell if a device has encountered
204 * errors.
205 */
206 atomic_inc(&m->error_count);
207
208 if (test_and_set_bit(error_type, &m->error_type))
209 return;
210
d460c65a
JB
211 if (!errors_handled(ms))
212 return;
213
72f4b314
JB
214 if (m != get_default_mirror(ms))
215 goto out;
216
217 if (!ms->in_sync) {
218 /*
219 * Better to issue requests to same failing device
220 * than to risk returning corrupt data.
221 */
222 DMERR("Primary mirror (%s) failed while out-of-sync: "
223 "Reads may fail.", m->dev->name);
224 goto out;
225 }
226
227 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
228 if (!atomic_read(&new->error_count)) {
229 set_default_mirror(new);
230 break;
231 }
232
233 if (unlikely(new == ms->mirror + ms->nr_mirrors))
234 DMWARN("All sides of mirror have failed.");
235
236out:
237 schedule_work(&ms->trigger_event);
238}
239
1da177e4
LT
240/*-----------------------------------------------------------------
241 * Recovery.
242 *
243 * When a mirror is first activated we may find that some regions
244 * are in the no-sync state. We have to recover these by
245 * recopying from the default mirror to all the others.
246 *---------------------------------------------------------------*/
4cdc1d1f 247static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
248 void *context)
249{
1f965b19
HM
250 struct dm_region *reg = context;
251 struct mirror_set *ms = dm_rh_region_context(reg);
8f0205b7 252 int m, bit = 0;
1da177e4 253
8f0205b7 254 if (read_err) {
f44db678
JB
255 /* Read error means the failure of default mirror. */
256 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
257 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
258 }
f44db678 259
8f0205b7 260 if (write_err) {
4cdc1d1f 261 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 262 write_err);
8f0205b7
JB
263 /*
264 * Bits correspond to devices (excluding default mirror).
265 * The default mirror cannot change during recovery.
266 */
267 for (m = 0; m < ms->nr_mirrors; m++) {
268 if (&ms->mirror[m] == get_default_mirror(ms))
269 continue;
270 if (test_bit(bit, &write_err))
271 fail_mirror(ms->mirror + m,
272 DM_RAID1_SYNC_ERROR);
273 bit++;
274 }
275 }
f44db678 276
1f965b19 277 dm_rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
278}
279
1f965b19 280static int recover(struct mirror_set *ms, struct dm_region *reg)
1da177e4
LT
281{
282 int r;
1f965b19 283 unsigned i;
eb69aca5 284 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
285 struct mirror *m;
286 unsigned long flags = 0;
1f965b19
HM
287 region_t key = dm_rh_get_region_key(reg);
288 sector_t region_size = dm_rh_get_region_size(ms->rh);
1da177e4
LT
289
290 /* fill in the source */
72f4b314 291 m = get_default_mirror(ms);
1da177e4 292 from.bdev = m->dev->bdev;
1f965b19
HM
293 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
294 if (key == (ms->nr_regions - 1)) {
1da177e4
LT
295 /*
296 * The final region may be smaller than
297 * region_size.
298 */
1f965b19 299 from.count = ms->ti->len & (region_size - 1);
1da177e4 300 if (!from.count)
1f965b19 301 from.count = region_size;
1da177e4 302 } else
1f965b19 303 from.count = region_size;
1da177e4
LT
304
305 /* fill in the destinations */
306 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 307 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
308 continue;
309
310 m = ms->mirror + i;
311 dest->bdev = m->dev->bdev;
1f965b19 312 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
1da177e4
LT
313 dest->count = from.count;
314 dest++;
315 }
316
317 /* hand to kcopyd */
f7c83e2e
JB
318 if (!errors_handled(ms))
319 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
320
eb69aca5
HM
321 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
322 flags, recovery_complete, reg);
1da177e4
LT
323
324 return r;
325}
326
327static void do_recovery(struct mirror_set *ms)
328{
1f965b19
HM
329 struct dm_region *reg;
330 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 331 int r;
1da177e4
LT
332
333 /*
334 * Start quiescing some regions.
335 */
1f965b19 336 dm_rh_recovery_prepare(ms->rh);
1da177e4
LT
337
338 /*
339 * Copy any already quiesced regions.
340 */
1f965b19 341 while ((reg = dm_rh_recovery_start(ms->rh))) {
1da177e4
LT
342 r = recover(ms, reg);
343 if (r)
1f965b19 344 dm_rh_recovery_end(reg, 0);
1da177e4
LT
345 }
346
347 /*
348 * Update the in sync flag.
349 */
350 if (!ms->in_sync &&
351 (log->type->get_sync_count(log) == ms->nr_regions)) {
352 /* the sync is complete */
353 dm_table_event(ms->ti->table);
354 ms->in_sync = 1;
355 }
356}
357
358/*-----------------------------------------------------------------
359 * Reads
360 *---------------------------------------------------------------*/
361static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
362{
06386bbf
JB
363 struct mirror *m = get_default_mirror(ms);
364
365 do {
366 if (likely(!atomic_read(&m->error_count)))
367 return m;
368
369 if (m-- == ms->mirror)
370 m += ms->nr_mirrors;
371 } while (m != get_default_mirror(ms));
372
373 return NULL;
374}
375
376static int default_ok(struct mirror *m)
377{
378 struct mirror *default_mirror = get_default_mirror(m->ms);
379
380 return !atomic_read(&default_mirror->error_count);
381}
382
383static int mirror_available(struct mirror_set *ms, struct bio *bio)
384{
1f965b19
HM
385 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
386 region_t region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 387
1f965b19 388 if (log->type->in_sync(log, region, 0))
06386bbf
JB
389 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
390
391 return 0;
1da177e4
LT
392}
393
394/*
395 * remap a buffer to a particular mirror.
396 */
06386bbf
JB
397static sector_t map_sector(struct mirror *m, struct bio *bio)
398{
399 return m->offset + (bio->bi_sector - m->ms->ti->begin);
400}
401
402static void map_bio(struct mirror *m, struct bio *bio)
1da177e4
LT
403{
404 bio->bi_bdev = m->dev->bdev;
06386bbf
JB
405 bio->bi_sector = map_sector(m, bio);
406}
407
22a1ceb1 408static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
409 struct bio *bio)
410{
411 io->bdev = m->dev->bdev;
412 io->sector = map_sector(m, bio);
413 io->count = bio->bi_size >> 9;
414}
415
416/*-----------------------------------------------------------------
417 * Reads
418 *---------------------------------------------------------------*/
419static void read_callback(unsigned long error, void *context)
420{
421 struct bio *bio = context;
422 struct mirror *m;
423
424 m = bio_get_m(bio);
425 bio_set_m(bio, NULL);
426
427 if (likely(!error)) {
428 bio_endio(bio, 0);
429 return;
430 }
431
432 fail_mirror(m, DM_RAID1_READ_ERROR);
433
434 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
435 DMWARN_LIMIT("Read failure on mirror device %s. "
436 "Trying alternative device.",
437 m->dev->name);
438 queue_bio(m->ms, bio, bio_rw(bio));
439 return;
440 }
441
442 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
443 m->dev->name);
444 bio_endio(bio, -EIO);
445}
446
447/* Asynchronous read. */
448static void read_async_bio(struct mirror *m, struct bio *bio)
449{
22a1ceb1 450 struct dm_io_region io;
06386bbf
JB
451 struct dm_io_request io_req = {
452 .bi_rw = READ,
453 .mem.type = DM_IO_BVEC,
454 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
455 .notify.fn = read_callback,
456 .notify.context = bio,
457 .client = m->ms->io_client,
458 };
459
460 map_region(&io, m, bio);
461 bio_set_m(bio, m);
1f965b19
HM
462 BUG_ON(dm_io(&io_req, 1, &io, NULL));
463}
464
465static inline int region_in_sync(struct mirror_set *ms, region_t region,
466 int may_block)
467{
468 int state = dm_rh_get_state(ms->rh, region, may_block);
469 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
1da177e4
LT
470}
471
472static void do_reads(struct mirror_set *ms, struct bio_list *reads)
473{
474 region_t region;
475 struct bio *bio;
476 struct mirror *m;
477
478 while ((bio = bio_list_pop(reads))) {
1f965b19 479 region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 480 m = get_default_mirror(ms);
1da177e4
LT
481
482 /*
483 * We can only read balance if the region is in sync.
484 */
1f965b19 485 if (likely(region_in_sync(ms, region, 1)))
1da177e4 486 m = choose_mirror(ms, bio->bi_sector);
06386bbf
JB
487 else if (m && atomic_read(&m->error_count))
488 m = NULL;
1da177e4 489
06386bbf
JB
490 if (likely(m))
491 read_async_bio(m, bio);
492 else
493 bio_endio(bio, -EIO);
1da177e4
LT
494 }
495}
496
497/*-----------------------------------------------------------------
498 * Writes.
499 *
500 * We do different things with the write io depending on the
501 * state of the region that it's in:
502 *
503 * SYNC: increment pending, use kcopyd to write to *all* mirrors
504 * RECOVERING: delay the io until recovery completes
505 * NOSYNC: increment pending, just write to the default mirror
506 *---------------------------------------------------------------*/
72f4b314 507
72f4b314 508
1da177e4
LT
509static void write_callback(unsigned long error, void *context)
510{
72f4b314 511 unsigned i, ret = 0;
1da177e4
LT
512 struct bio *bio = (struct bio *) context;
513 struct mirror_set *ms;
72f4b314
JB
514 int uptodate = 0;
515 int should_wake = 0;
516 unsigned long flags;
1da177e4 517
06386bbf
JB
518 ms = bio_get_m(bio)->ms;
519 bio_set_m(bio, NULL);
1da177e4
LT
520
521 /*
522 * NOTE: We don't decrement the pending count here,
523 * instead it is done by the targets endio function.
524 * This way we handle both writes to SYNC and NOSYNC
525 * regions with the same code.
526 */
72f4b314
JB
527 if (likely(!error))
528 goto out;
1da177e4 529
72f4b314
JB
530 for (i = 0; i < ms->nr_mirrors; i++)
531 if (test_bit(i, &error))
532 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
533 else
534 uptodate = 1;
535
536 if (unlikely(!uptodate)) {
537 DMERR("All replicated volumes dead, failing I/O");
538 /* None of the writes succeeded, fail the I/O. */
539 ret = -EIO;
540 } else if (errors_handled(ms)) {
1da177e4 541 /*
72f4b314
JB
542 * Need to raise event. Since raising
543 * events can block, we need to do it in
544 * the main thread.
1da177e4 545 */
72f4b314
JB
546 spin_lock_irqsave(&ms->lock, flags);
547 if (!ms->failures.head)
548 should_wake = 1;
549 bio_list_add(&ms->failures, bio);
550 spin_unlock_irqrestore(&ms->lock, flags);
551 if (should_wake)
1f965b19 552 wakeup_mirrord(ms);
72f4b314 553 return;
1da177e4 554 }
72f4b314
JB
555out:
556 bio_endio(bio, ret);
1da177e4
LT
557}
558
559static void do_write(struct mirror_set *ms, struct bio *bio)
560{
561 unsigned int i;
22a1ceb1 562 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1da177e4 563 struct mirror *m;
88be163a
MB
564 struct dm_io_request io_req = {
565 .bi_rw = WRITE,
566 .mem.type = DM_IO_BVEC,
567 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
568 .notify.fn = write_callback,
569 .notify.context = bio,
570 .client = ms->io_client,
571 };
1da177e4 572
06386bbf
JB
573 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
574 map_region(dest++, m, bio);
1da177e4 575
06386bbf
JB
576 /*
577 * Use default mirror because we only need it to retrieve the reference
578 * to the mirror set in write_callback().
579 */
580 bio_set_m(bio, get_default_mirror(ms));
88be163a 581
1f965b19 582 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1da177e4
LT
583}
584
585static void do_writes(struct mirror_set *ms, struct bio_list *writes)
586{
587 int state;
588 struct bio *bio;
589 struct bio_list sync, nosync, recover, *this_list = NULL;
7513c2a7
JB
590 struct bio_list requeue;
591 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
592 region_t region;
1da177e4
LT
593
594 if (!writes->head)
595 return;
596
597 /*
598 * Classify each write.
599 */
600 bio_list_init(&sync);
601 bio_list_init(&nosync);
602 bio_list_init(&recover);
7513c2a7 603 bio_list_init(&requeue);
1da177e4
LT
604
605 while ((bio = bio_list_pop(writes))) {
7513c2a7
JB
606 region = dm_rh_bio_to_region(ms->rh, bio);
607
608 if (log->type->is_remote_recovering &&
609 log->type->is_remote_recovering(log, region)) {
610 bio_list_add(&requeue, bio);
611 continue;
612 }
613
614 state = dm_rh_get_state(ms->rh, region, 1);
1da177e4 615 switch (state) {
1f965b19
HM
616 case DM_RH_CLEAN:
617 case DM_RH_DIRTY:
1da177e4
LT
618 this_list = &sync;
619 break;
620
1f965b19 621 case DM_RH_NOSYNC:
1da177e4
LT
622 this_list = &nosync;
623 break;
624
1f965b19 625 case DM_RH_RECOVERING:
1da177e4
LT
626 this_list = &recover;
627 break;
628 }
629
630 bio_list_add(this_list, bio);
631 }
632
7513c2a7
JB
633 /*
634 * Add bios that are delayed due to remote recovery
635 * back on to the write queue
636 */
637 if (unlikely(requeue.head)) {
638 spin_lock_irq(&ms->lock);
639 bio_list_merge(&ms->writes, &requeue);
640 spin_unlock_irq(&ms->lock);
69885683 641 delayed_wake(ms);
7513c2a7
JB
642 }
643
1da177e4
LT
644 /*
645 * Increment the pending counts for any regions that will
646 * be written to (writes to recover regions are going to
647 * be delayed).
648 */
1f965b19
HM
649 dm_rh_inc_pending(ms->rh, &sync);
650 dm_rh_inc_pending(ms->rh, &nosync);
651 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
1da177e4
LT
652
653 /*
654 * Dispatch io.
655 */
b80aa7a0
JB
656 if (unlikely(ms->log_failure)) {
657 spin_lock_irq(&ms->lock);
658 bio_list_merge(&ms->failures, &sync);
659 spin_unlock_irq(&ms->lock);
1f965b19 660 wakeup_mirrord(ms);
b80aa7a0 661 } else
fc1ff958 662 while ((bio = bio_list_pop(&sync)))
b80aa7a0 663 do_write(ms, bio);
1da177e4
LT
664
665 while ((bio = bio_list_pop(&recover)))
1f965b19 666 dm_rh_delay(ms->rh, bio);
1da177e4
LT
667
668 while ((bio = bio_list_pop(&nosync))) {
06386bbf 669 map_bio(get_default_mirror(ms), bio);
1da177e4
LT
670 generic_make_request(bio);
671 }
672}
673
72f4b314
JB
674static void do_failures(struct mirror_set *ms, struct bio_list *failures)
675{
676 struct bio *bio;
677
678 if (!failures->head)
679 return;
680
b80aa7a0 681 if (!ms->log_failure) {
b34578a4 682 while ((bio = bio_list_pop(failures))) {
1f965b19
HM
683 ms->in_sync = 0;
684 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
b34578a4 685 }
b80aa7a0
JB
686 return;
687 }
688
689 /*
690 * If the log has failed, unattempted writes are being
691 * put on the failures list. We can't issue those writes
692 * until a log has been marked, so we must store them.
693 *
694 * If a 'noflush' suspend is in progress, we can requeue
695 * the I/O's to the core. This give userspace a chance
696 * to reconfigure the mirror, at which point the core
697 * will reissue the writes. If the 'noflush' flag is
698 * not set, we have no choice but to return errors.
699 *
700 * Some writes on the failures list may have been
701 * submitted before the log failure and represent a
702 * failure to write to one of the devices. It is ok
703 * for us to treat them the same and requeue them
704 * as well.
705 */
706 if (dm_noflush_suspending(ms->ti)) {
707 while ((bio = bio_list_pop(failures)))
708 bio_endio(bio, DM_ENDIO_REQUEUE);
709 return;
710 }
711
712 if (atomic_read(&ms->suspend)) {
713 while ((bio = bio_list_pop(failures)))
714 bio_endio(bio, -EIO);
715 return;
716 }
717
718 spin_lock_irq(&ms->lock);
719 bio_list_merge(&ms->failures, failures);
720 spin_unlock_irq(&ms->lock);
721
a2aebe03 722 delayed_wake(ms);
72f4b314
JB
723}
724
725static void trigger_event(struct work_struct *work)
726{
727 struct mirror_set *ms =
728 container_of(work, struct mirror_set, trigger_event);
729
730 dm_table_event(ms->ti->table);
731}
732
1da177e4
LT
733/*-----------------------------------------------------------------
734 * kmirrord
735 *---------------------------------------------------------------*/
a2aebe03 736static void do_mirror(struct work_struct *work)
1da177e4 737{
1f965b19
HM
738 struct mirror_set *ms = container_of(work, struct mirror_set,
739 kmirrord_work);
72f4b314
JB
740 struct bio_list reads, writes, failures;
741 unsigned long flags;
1da177e4 742
72f4b314 743 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
744 reads = ms->reads;
745 writes = ms->writes;
72f4b314 746 failures = ms->failures;
1da177e4
LT
747 bio_list_init(&ms->reads);
748 bio_list_init(&ms->writes);
72f4b314
JB
749 bio_list_init(&ms->failures);
750 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4 751
1f965b19 752 dm_rh_update_states(ms->rh, errors_handled(ms));
1da177e4
LT
753 do_recovery(ms);
754 do_reads(ms, &reads);
755 do_writes(ms, &writes);
72f4b314 756 do_failures(ms, &failures);
7ff14a36
MP
757
758 dm_table_unplug_all(ms->ti->table);
1da177e4
LT
759}
760
1da177e4
LT
761/*-----------------------------------------------------------------
762 * Target functions
763 *---------------------------------------------------------------*/
764static struct mirror_set *alloc_context(unsigned int nr_mirrors,
765 uint32_t region_size,
766 struct dm_target *ti,
416cd17b 767 struct dm_dirty_log *dl)
1da177e4
LT
768{
769 size_t len;
770 struct mirror_set *ms = NULL;
771
1da177e4
LT
772 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
773
dd00cc48 774 ms = kzalloc(len, GFP_KERNEL);
1da177e4 775 if (!ms) {
72d94861 776 ti->error = "Cannot allocate mirror context";
1da177e4
LT
777 return NULL;
778 }
779
1da177e4
LT
780 spin_lock_init(&ms->lock);
781
782 ms->ti = ti;
783 ms->nr_mirrors = nr_mirrors;
784 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
785 ms->in_sync = 0;
b80aa7a0
JB
786 ms->log_failure = 0;
787 atomic_set(&ms->suspend, 0);
72f4b314 788 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 789
95f8fac8
MP
790 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
791 _dm_raid1_read_record_cache);
792
06386bbf
JB
793 if (!ms->read_record_pool) {
794 ti->error = "Error creating mirror read_record_pool";
795 kfree(ms);
796 return NULL;
797 }
798
88be163a
MB
799 ms->io_client = dm_io_client_create(DM_IO_PAGES);
800 if (IS_ERR(ms->io_client)) {
801 ti->error = "Error creating dm_io client";
06386bbf 802 mempool_destroy(ms->read_record_pool);
88be163a
MB
803 kfree(ms);
804 return NULL;
805 }
806
1f965b19
HM
807 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
808 wakeup_all_recovery_waiters,
809 ms->ti->begin, MAX_RECOVERY,
810 dl, region_size, ms->nr_regions);
811 if (IS_ERR(ms->rh)) {
72d94861 812 ti->error = "Error creating dirty region hash";
a72cf737 813 dm_io_client_destroy(ms->io_client);
06386bbf 814 mempool_destroy(ms->read_record_pool);
1da177e4
LT
815 kfree(ms);
816 return NULL;
817 }
818
819 return ms;
820}
821
822static void free_context(struct mirror_set *ms, struct dm_target *ti,
823 unsigned int m)
824{
825 while (m--)
826 dm_put_device(ti, ms->mirror[m].dev);
827
88be163a 828 dm_io_client_destroy(ms->io_client);
1f965b19 829 dm_region_hash_destroy(ms->rh);
06386bbf 830 mempool_destroy(ms->read_record_pool);
1da177e4
LT
831 kfree(ms);
832}
833
1da177e4
LT
834static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
835 unsigned int mirror, char **argv)
836{
4ee218cd 837 unsigned long long offset;
1da177e4 838
4ee218cd 839 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 840 ti->error = "Invalid offset";
1da177e4
LT
841 return -EINVAL;
842 }
843
844 if (dm_get_device(ti, argv[0], offset, ti->len,
845 dm_table_get_mode(ti->table),
846 &ms->mirror[mirror].dev)) {
72d94861 847 ti->error = "Device lookup failure";
1da177e4
LT
848 return -ENXIO;
849 }
850
aa5617c5 851 ms->mirror[mirror].ms = ms;
72f4b314
JB
852 atomic_set(&(ms->mirror[mirror].error_count), 0);
853 ms->mirror[mirror].error_type = 0;
1da177e4
LT
854 ms->mirror[mirror].offset = offset;
855
856 return 0;
857}
858
1da177e4
LT
859/*
860 * Create dirty log: log_type #log_params <log_params>
861 */
416cd17b 862static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1f965b19
HM
863 unsigned argc, char **argv,
864 unsigned *args_used)
1da177e4 865{
1f965b19 866 unsigned param_count;
416cd17b 867 struct dm_dirty_log *dl;
1da177e4
LT
868
869 if (argc < 2) {
72d94861 870 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
871 return NULL;
872 }
873
874 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 875 ti->error = "Invalid mirror log argument count";
1da177e4
LT
876 return NULL;
877 }
878
879 *args_used = 2 + param_count;
880
881 if (argc < *args_used) {
72d94861 882 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
883 return NULL;
884 }
885
416cd17b 886 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
1da177e4 887 if (!dl) {
72d94861 888 ti->error = "Error creating mirror dirty log";
1da177e4
LT
889 return NULL;
890 }
891
1da177e4
LT
892 return dl;
893}
894
a8e6afa2
JB
895static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
896 unsigned *args_used)
897{
898 unsigned num_features;
899 struct dm_target *ti = ms->ti;
900
901 *args_used = 0;
902
903 if (!argc)
904 return 0;
905
906 if (sscanf(argv[0], "%u", &num_features) != 1) {
907 ti->error = "Invalid number of features";
908 return -EINVAL;
909 }
910
911 argc--;
912 argv++;
913 (*args_used)++;
914
915 if (num_features > argc) {
916 ti->error = "Not enough arguments to support feature count";
917 return -EINVAL;
918 }
919
920 if (!strcmp("handle_errors", argv[0]))
921 ms->features |= DM_RAID1_HANDLE_ERRORS;
922 else {
923 ti->error = "Unrecognised feature requested";
924 return -EINVAL;
925 }
926
927 (*args_used)++;
928
929 return 0;
930}
931
1da177e4
LT
932/*
933 * Construct a mirror mapping:
934 *
935 * log_type #log_params <log_params>
936 * #mirrors [mirror_path offset]{2,}
a8e6afa2 937 * [#features <features>]
1da177e4
LT
938 *
939 * log_type is "core" or "disk"
940 * #log_params is between 1 and 3
a8e6afa2
JB
941 *
942 * If present, features must be "handle_errors".
1da177e4 943 */
1da177e4
LT
944static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
945{
946 int r;
947 unsigned int nr_mirrors, m, args_used;
948 struct mirror_set *ms;
416cd17b 949 struct dm_dirty_log *dl;
1da177e4
LT
950
951 dl = create_dirty_log(ti, argc, argv, &args_used);
952 if (!dl)
953 return -EINVAL;
954
955 argv += args_used;
956 argc -= args_used;
957
958 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
eb69aca5 959 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
72d94861 960 ti->error = "Invalid number of mirrors";
416cd17b 961 dm_dirty_log_destroy(dl);
1da177e4
LT
962 return -EINVAL;
963 }
964
965 argv++, argc--;
966
a8e6afa2
JB
967 if (argc < nr_mirrors * 2) {
968 ti->error = "Too few mirror arguments";
416cd17b 969 dm_dirty_log_destroy(dl);
1da177e4
LT
970 return -EINVAL;
971 }
972
973 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
974 if (!ms) {
416cd17b 975 dm_dirty_log_destroy(dl);
1da177e4
LT
976 return -ENOMEM;
977 }
978
979 /* Get the mirror parameter sets */
980 for (m = 0; m < nr_mirrors; m++) {
981 r = get_mirror(ms, ti, m, argv);
982 if (r) {
983 free_context(ms, ti, m);
984 return r;
985 }
986 argv += 2;
987 argc -= 2;
988 }
989
990 ti->private = ms;
1f965b19 991 ti->split_io = dm_rh_get_region_size(ms->rh);
1da177e4 992
6ad36fe2
HS
993 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
994 if (!ms->kmirrord_wq) {
995 DMERR("couldn't start kmirrord");
a72cf737
DM
996 r = -ENOMEM;
997 goto err_free_context;
6ad36fe2
HS
998 }
999 INIT_WORK(&ms->kmirrord_work, do_mirror);
a2aebe03
MP
1000 init_timer(&ms->timer);
1001 ms->timer_pending = 0;
72f4b314 1002 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 1003
a8e6afa2 1004 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
1005 if (r)
1006 goto err_destroy_wq;
a8e6afa2
JB
1007
1008 argv += args_used;
1009 argc -= args_used;
1010
f44db678
JB
1011 /*
1012 * Any read-balancing addition depends on the
1013 * DM_RAID1_HANDLE_ERRORS flag being present.
1014 * This is because the decision to balance depends
1015 * on the sync state of a region. If the above
1016 * flag is not present, we ignore errors; and
1017 * the sync state may be inaccurate.
1018 */
1019
a8e6afa2
JB
1020 if (argc) {
1021 ti->error = "Too many mirror arguments";
a72cf737
DM
1022 r = -EINVAL;
1023 goto err_destroy_wq;
a8e6afa2
JB
1024 }
1025
1f965b19 1026 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
a72cf737
DM
1027 if (r)
1028 goto err_destroy_wq;
1da177e4 1029
1f965b19 1030 wakeup_mirrord(ms);
1da177e4 1031 return 0;
a72cf737
DM
1032
1033err_destroy_wq:
1034 destroy_workqueue(ms->kmirrord_wq);
1035err_free_context:
1036 free_context(ms, ti, ms->nr_mirrors);
1037 return r;
1da177e4
LT
1038}
1039
1040static void mirror_dtr(struct dm_target *ti)
1041{
1042 struct mirror_set *ms = (struct mirror_set *) ti->private;
1043
a2aebe03 1044 del_timer_sync(&ms->timer);
6ad36fe2 1045 flush_workqueue(ms->kmirrord_wq);
18776c73 1046 flush_scheduled_work();
eb69aca5 1047 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1048 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1049 free_context(ms, ti, ms->nr_mirrors);
1050}
1051
1da177e4
LT
1052/*
1053 * Mirror mapping function
1054 */
1055static int mirror_map(struct dm_target *ti, struct bio *bio,
1056 union map_info *map_context)
1057{
1058 int r, rw = bio_rw(bio);
1059 struct mirror *m;
1060 struct mirror_set *ms = ti->private;
06386bbf 1061 struct dm_raid1_read_record *read_record = NULL;
1f965b19 1062 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4
LT
1063
1064 if (rw == WRITE) {
06386bbf 1065 /* Save region for mirror_end_io() handler */
1f965b19 1066 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1da177e4 1067 queue_bio(ms, bio, rw);
d2a7ad29 1068 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1069 }
1070
1f965b19 1071 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1da177e4
LT
1072 if (r < 0 && r != -EWOULDBLOCK)
1073 return r;
1074
1da177e4 1075 /*
06386bbf 1076 * If region is not in-sync queue the bio.
1da177e4 1077 */
06386bbf
JB
1078 if (!r || (r == -EWOULDBLOCK)) {
1079 if (rw == READA)
1080 return -EWOULDBLOCK;
1da177e4 1081
1da177e4 1082 queue_bio(ms, bio, rw);
d2a7ad29 1083 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1084 }
1085
06386bbf
JB
1086 /*
1087 * The region is in-sync and we can perform reads directly.
1088 * Store enough information so we can retry if it fails.
1089 */
1da177e4 1090 m = choose_mirror(ms, bio->bi_sector);
06386bbf 1091 if (unlikely(!m))
1da177e4
LT
1092 return -EIO;
1093
06386bbf
JB
1094 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1095 if (likely(read_record)) {
1096 dm_bio_record(&read_record->details, bio);
1097 map_context->ptr = read_record;
1098 read_record->m = m;
1099 }
1100
1101 map_bio(m, bio);
1102
d2a7ad29 1103 return DM_MAPIO_REMAPPED;
1da177e4
LT
1104}
1105
1106static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1107 int error, union map_info *map_context)
1108{
1109 int rw = bio_rw(bio);
1110 struct mirror_set *ms = (struct mirror_set *) ti->private;
06386bbf
JB
1111 struct mirror *m = NULL;
1112 struct dm_bio_details *bd = NULL;
1113 struct dm_raid1_read_record *read_record = map_context->ptr;
1da177e4
LT
1114
1115 /*
1116 * We need to dec pending if this was a write.
1117 */
06386bbf 1118 if (rw == WRITE) {
1f965b19 1119 dm_rh_dec(ms->rh, map_context->ll);
06386bbf
JB
1120 return error;
1121 }
1da177e4 1122
06386bbf
JB
1123 if (error == -EOPNOTSUPP)
1124 goto out;
1125
1126 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1127 goto out;
1128
1129 if (unlikely(error)) {
1130 if (!read_record) {
1131 /*
1132 * There wasn't enough memory to record necessary
1133 * information for a retry or there was no other
1134 * mirror in-sync.
1135 */
e03f1a84 1136 DMERR_LIMIT("Mirror read failed.");
06386bbf
JB
1137 return -EIO;
1138 }
e03f1a84
AB
1139
1140 m = read_record->m;
1141
06386bbf
JB
1142 DMERR("Mirror read failed from %s. Trying alternative device.",
1143 m->dev->name);
1144
06386bbf
JB
1145 fail_mirror(m, DM_RAID1_READ_ERROR);
1146
1147 /*
1148 * A failed read is requeued for another attempt using an intact
1149 * mirror.
1150 */
1151 if (default_ok(m) || mirror_available(ms, bio)) {
1152 bd = &read_record->details;
1153
1154 dm_bio_restore(bd, bio);
1155 mempool_free(read_record, ms->read_record_pool);
1156 map_context->ptr = NULL;
1157 queue_bio(ms, bio, rw);
1158 return 1;
1159 }
1160 DMERR("All replicated volumes dead, failing I/O");
1161 }
1162
1163out:
1164 if (read_record) {
1165 mempool_free(read_record, ms->read_record_pool);
1166 map_context->ptr = NULL;
1167 }
1168
1169 return error;
1da177e4
LT
1170}
1171
b80aa7a0 1172static void mirror_presuspend(struct dm_target *ti)
1da177e4
LT
1173{
1174 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1175 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 1176
b80aa7a0
JB
1177 atomic_set(&ms->suspend, 1);
1178
1179 /*
1180 * We must finish up all the work that we've
1181 * generated (i.e. recovery work).
1182 */
1f965b19 1183 dm_rh_stop_recovery(ms->rh);
33184048 1184
33184048 1185 wait_event(_kmirrord_recovery_stopped,
1f965b19 1186 !dm_rh_recovery_in_flight(ms->rh));
33184048 1187
b80aa7a0
JB
1188 if (log->type->presuspend && log->type->presuspend(log))
1189 /* FIXME: need better error handling */
1190 DMWARN("log presuspend failed");
1191
1192 /*
1193 * Now that recovery is complete/stopped and the
1194 * delayed bios are queued, we need to wait for
1195 * the worker thread to complete. This way,
1196 * we know that all of our I/O has been pushed.
1197 */
1198 flush_workqueue(ms->kmirrord_wq);
1199}
1200
1201static void mirror_postsuspend(struct dm_target *ti)
1202{
1203 struct mirror_set *ms = ti->private;
1f965b19 1204 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0 1205
6b3df0d7 1206 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1207 /* FIXME: need better error handling */
b80aa7a0 1208 DMWARN("log postsuspend failed");
1da177e4
LT
1209}
1210
1211static void mirror_resume(struct dm_target *ti)
1212{
b80aa7a0 1213 struct mirror_set *ms = ti->private;
1f965b19 1214 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0
JB
1215
1216 atomic_set(&ms->suspend, 0);
1da177e4
LT
1217 if (log->type->resume && log->type->resume(log))
1218 /* FIXME: need better error handling */
1219 DMWARN("log resume failed");
1f965b19 1220 dm_rh_start_recovery(ms->rh);
1da177e4
LT
1221}
1222
af195ac8
JB
1223/*
1224 * device_status_char
1225 * @m: mirror device/leg we want the status of
1226 *
1227 * We return one character representing the most severe error
1228 * we have encountered.
1229 * A => Alive - No failures
1230 * D => Dead - A write failure occurred leaving mirror out-of-sync
1231 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1232 * R => Read - A read failure occurred, mirror data unaffected
1233 *
1234 * Returns: <char>
1235 */
1236static char device_status_char(struct mirror *m)
1237{
1238 if (!atomic_read(&(m->error_count)))
1239 return 'A';
1240
1241 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1242 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1243 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1244}
1245
1246
1da177e4
LT
1247static int mirror_status(struct dm_target *ti, status_type_t type,
1248 char *result, unsigned int maxlen)
1249{
315dcc22 1250 unsigned int m, sz = 0;
1da177e4 1251 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1252 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
af195ac8 1253 char buffer[ms->nr_mirrors + 1];
1da177e4 1254
1da177e4
LT
1255 switch (type) {
1256 case STATUSTYPE_INFO:
1257 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1258 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1259 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1260 buffer[m] = device_status_char(&(ms->mirror[m]));
1261 }
1262 buffer[m] = '\0';
1da177e4 1263
af195ac8 1264 DMEMIT("%llu/%llu 1 %s ",
1f965b19 1265 (unsigned long long)log->type->get_sync_count(log),
af195ac8 1266 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1267
1f965b19 1268 sz += log->type->status(log, type, result+sz, maxlen-sz);
315dcc22 1269
1da177e4
LT
1270 break;
1271
1272 case STATUSTYPE_TABLE:
1f965b19 1273 sz = log->type->status(log, type, result, maxlen);
315dcc22 1274
e52b8f6d 1275 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1276 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1277 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1278 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1279
1280 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1281 DMEMIT(" 1 handle_errors");
1da177e4
LT
1282 }
1283
1284 return 0;
1285}
1286
af4874e0
MS
1287static int mirror_iterate_devices(struct dm_target *ti,
1288 iterate_devices_callout_fn fn, void *data)
1289{
1290 struct mirror_set *ms = ti->private;
1291 int ret = 0;
1292 unsigned i;
1293
1294 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1295 ret = fn(ti, ms->mirror[i].dev,
1296 ms->mirror[i].offset, data);
1297
1298 return ret;
1299}
1300
1da177e4
LT
1301static struct target_type mirror_target = {
1302 .name = "mirror",
af4874e0 1303 .version = {1, 12, 0},
1da177e4
LT
1304 .module = THIS_MODULE,
1305 .ctr = mirror_ctr,
1306 .dtr = mirror_dtr,
1307 .map = mirror_map,
1308 .end_io = mirror_end_io,
b80aa7a0 1309 .presuspend = mirror_presuspend,
1da177e4
LT
1310 .postsuspend = mirror_postsuspend,
1311 .resume = mirror_resume,
1312 .status = mirror_status,
af4874e0 1313 .iterate_devices = mirror_iterate_devices,
1da177e4
LT
1314};
1315
1316static int __init dm_mirror_init(void)
1317{
1318 int r;
1319
95f8fac8
MP
1320 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1321 if (!_dm_raid1_read_record_cache) {
1322 DMERR("Can't allocate dm_raid1_read_record cache");
1323 r = -ENOMEM;
1324 goto bad_cache;
1325 }
1326
1da177e4 1327 r = dm_register_target(&mirror_target);
95f8fac8 1328 if (r < 0) {
0cd33124 1329 DMERR("Failed to register mirror target");
95f8fac8
MP
1330 goto bad_target;
1331 }
1332
1333 return 0;
1da177e4 1334
95f8fac8
MP
1335bad_target:
1336 kmem_cache_destroy(_dm_raid1_read_record_cache);
1337bad_cache:
1da177e4
LT
1338 return r;
1339}
1340
1341static void __exit dm_mirror_exit(void)
1342{
10d3bd09 1343 dm_unregister_target(&mirror_target);
95f8fac8 1344 kmem_cache_destroy(_dm_raid1_read_record_cache);
1da177e4
LT
1345}
1346
1347/* Module hooks */
1348module_init(dm_mirror_init);
1349module_exit(dm_mirror_exit);
1350
1351MODULE_DESCRIPTION(DM_NAME " mirror target");
1352MODULE_AUTHOR("Joe Thornber");
1353MODULE_LICENSE("GPL");