]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-bio-record.h" | |
9 | ||
10 | #include <linux/init.h> | |
11 | #include <linux/mempool.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/workqueue.h> | |
16 | #include <linux/device-mapper.h> | |
17 | #include <linux/dm-io.h> | |
18 | #include <linux/dm-dirty-log.h> | |
19 | #include <linux/dm-kcopyd.h> | |
20 | #include <linux/dm-region-hash.h> | |
21 | ||
22 | #define DM_MSG_PREFIX "raid1" | |
23 | ||
24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ | |
25 | ||
26 | #define DM_RAID1_HANDLE_ERRORS 0x01 | |
27 | #define DM_RAID1_KEEP_LOG 0x02 | |
28 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) | |
29 | #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG) | |
30 | ||
31 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); | |
32 | ||
33 | /*----------------------------------------------------------------- | |
34 | * Mirror set structures. | |
35 | *---------------------------------------------------------------*/ | |
36 | enum dm_raid1_error { | |
37 | DM_RAID1_WRITE_ERROR, | |
38 | DM_RAID1_FLUSH_ERROR, | |
39 | DM_RAID1_SYNC_ERROR, | |
40 | DM_RAID1_READ_ERROR | |
41 | }; | |
42 | ||
43 | struct mirror { | |
44 | struct mirror_set *ms; | |
45 | atomic_t error_count; | |
46 | unsigned long error_type; | |
47 | struct dm_dev *dev; | |
48 | sector_t offset; | |
49 | }; | |
50 | ||
51 | struct mirror_set { | |
52 | struct dm_target *ti; | |
53 | struct list_head list; | |
54 | ||
55 | uint64_t features; | |
56 | ||
57 | spinlock_t lock; /* protects the lists */ | |
58 | struct bio_list reads; | |
59 | struct bio_list writes; | |
60 | struct bio_list failures; | |
61 | struct bio_list holds; /* bios are waiting until suspend */ | |
62 | ||
63 | struct dm_region_hash *rh; | |
64 | struct dm_kcopyd_client *kcopyd_client; | |
65 | struct dm_io_client *io_client; | |
66 | ||
67 | /* recovery */ | |
68 | region_t nr_regions; | |
69 | int in_sync; | |
70 | int log_failure; | |
71 | int leg_failure; | |
72 | atomic_t suspend; | |
73 | ||
74 | atomic_t default_mirror; /* Default mirror */ | |
75 | ||
76 | struct workqueue_struct *kmirrord_wq; | |
77 | struct work_struct kmirrord_work; | |
78 | struct timer_list timer; | |
79 | unsigned long timer_pending; | |
80 | ||
81 | struct work_struct trigger_event; | |
82 | ||
83 | unsigned nr_mirrors; | |
84 | struct mirror mirror[0]; | |
85 | }; | |
86 | ||
87 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle, | |
88 | "A percentage of time allocated for raid resynchronization"); | |
89 | ||
90 | static void wakeup_mirrord(void *context) | |
91 | { | |
92 | struct mirror_set *ms = context; | |
93 | ||
94 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); | |
95 | } | |
96 | ||
97 | static void delayed_wake_fn(struct timer_list *t) | |
98 | { | |
99 | struct mirror_set *ms = from_timer(ms, t, timer); | |
100 | ||
101 | clear_bit(0, &ms->timer_pending); | |
102 | wakeup_mirrord(ms); | |
103 | } | |
104 | ||
105 | static void delayed_wake(struct mirror_set *ms) | |
106 | { | |
107 | if (test_and_set_bit(0, &ms->timer_pending)) | |
108 | return; | |
109 | ||
110 | ms->timer.expires = jiffies + HZ / 5; | |
111 | add_timer(&ms->timer); | |
112 | } | |
113 | ||
114 | static void wakeup_all_recovery_waiters(void *context) | |
115 | { | |
116 | wake_up_all(&_kmirrord_recovery_stopped); | |
117 | } | |
118 | ||
119 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) | |
120 | { | |
121 | unsigned long flags; | |
122 | int should_wake = 0; | |
123 | struct bio_list *bl; | |
124 | ||
125 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; | |
126 | spin_lock_irqsave(&ms->lock, flags); | |
127 | should_wake = !(bl->head); | |
128 | bio_list_add(bl, bio); | |
129 | spin_unlock_irqrestore(&ms->lock, flags); | |
130 | ||
131 | if (should_wake) | |
132 | wakeup_mirrord(ms); | |
133 | } | |
134 | ||
135 | static void dispatch_bios(void *context, struct bio_list *bio_list) | |
136 | { | |
137 | struct mirror_set *ms = context; | |
138 | struct bio *bio; | |
139 | ||
140 | while ((bio = bio_list_pop(bio_list))) | |
141 | queue_bio(ms, bio, WRITE); | |
142 | } | |
143 | ||
144 | struct dm_raid1_bio_record { | |
145 | struct mirror *m; | |
146 | /* if details->bi_disk == NULL, details were not saved */ | |
147 | struct dm_bio_details details; | |
148 | region_t write_region; | |
149 | }; | |
150 | ||
151 | /* | |
152 | * Every mirror should look like this one. | |
153 | */ | |
154 | #define DEFAULT_MIRROR 0 | |
155 | ||
156 | /* | |
157 | * This is yucky. We squirrel the mirror struct away inside | |
158 | * bi_next for read/write buffers. This is safe since the bh | |
159 | * doesn't get submitted to the lower levels of block layer. | |
160 | */ | |
161 | static struct mirror *bio_get_m(struct bio *bio) | |
162 | { | |
163 | return (struct mirror *) bio->bi_next; | |
164 | } | |
165 | ||
166 | static void bio_set_m(struct bio *bio, struct mirror *m) | |
167 | { | |
168 | bio->bi_next = (struct bio *) m; | |
169 | } | |
170 | ||
171 | static struct mirror *get_default_mirror(struct mirror_set *ms) | |
172 | { | |
173 | return &ms->mirror[atomic_read(&ms->default_mirror)]; | |
174 | } | |
175 | ||
176 | static void set_default_mirror(struct mirror *m) | |
177 | { | |
178 | struct mirror_set *ms = m->ms; | |
179 | struct mirror *m0 = &(ms->mirror[0]); | |
180 | ||
181 | atomic_set(&ms->default_mirror, m - m0); | |
182 | } | |
183 | ||
184 | static struct mirror *get_valid_mirror(struct mirror_set *ms) | |
185 | { | |
186 | struct mirror *m; | |
187 | ||
188 | for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) | |
189 | if (!atomic_read(&m->error_count)) | |
190 | return m; | |
191 | ||
192 | return NULL; | |
193 | } | |
194 | ||
195 | /* fail_mirror | |
196 | * @m: mirror device to fail | |
197 | * @error_type: one of the enum's, DM_RAID1_*_ERROR | |
198 | * | |
199 | * If errors are being handled, record the type of | |
200 | * error encountered for this device. If this type | |
201 | * of error has already been recorded, we can return; | |
202 | * otherwise, we must signal userspace by triggering | |
203 | * an event. Additionally, if the device is the | |
204 | * primary device, we must choose a new primary, but | |
205 | * only if the mirror is in-sync. | |
206 | * | |
207 | * This function must not block. | |
208 | */ | |
209 | static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) | |
210 | { | |
211 | struct mirror_set *ms = m->ms; | |
212 | struct mirror *new; | |
213 | ||
214 | ms->leg_failure = 1; | |
215 | ||
216 | /* | |
217 | * error_count is used for nothing more than a | |
218 | * simple way to tell if a device has encountered | |
219 | * errors. | |
220 | */ | |
221 | atomic_inc(&m->error_count); | |
222 | ||
223 | if (test_and_set_bit(error_type, &m->error_type)) | |
224 | return; | |
225 | ||
226 | if (!errors_handled(ms)) | |
227 | return; | |
228 | ||
229 | if (m != get_default_mirror(ms)) | |
230 | goto out; | |
231 | ||
232 | if (!ms->in_sync && !keep_log(ms)) { | |
233 | /* | |
234 | * Better to issue requests to same failing device | |
235 | * than to risk returning corrupt data. | |
236 | */ | |
237 | DMERR("Primary mirror (%s) failed while out-of-sync: " | |
238 | "Reads may fail.", m->dev->name); | |
239 | goto out; | |
240 | } | |
241 | ||
242 | new = get_valid_mirror(ms); | |
243 | if (new) | |
244 | set_default_mirror(new); | |
245 | else | |
246 | DMWARN("All sides of mirror have failed."); | |
247 | ||
248 | out: | |
249 | schedule_work(&ms->trigger_event); | |
250 | } | |
251 | ||
252 | static int mirror_flush(struct dm_target *ti) | |
253 | { | |
254 | struct mirror_set *ms = ti->private; | |
255 | unsigned long error_bits; | |
256 | ||
257 | unsigned int i; | |
258 | struct dm_io_region io[ms->nr_mirrors]; | |
259 | struct mirror *m; | |
260 | struct dm_io_request io_req = { | |
261 | .bi_op = REQ_OP_WRITE, | |
262 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, | |
263 | .mem.type = DM_IO_KMEM, | |
264 | .mem.ptr.addr = NULL, | |
265 | .client = ms->io_client, | |
266 | }; | |
267 | ||
268 | for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { | |
269 | io[i].bdev = m->dev->bdev; | |
270 | io[i].sector = 0; | |
271 | io[i].count = 0; | |
272 | } | |
273 | ||
274 | error_bits = -1; | |
275 | dm_io(&io_req, ms->nr_mirrors, io, &error_bits); | |
276 | if (unlikely(error_bits != 0)) { | |
277 | for (i = 0; i < ms->nr_mirrors; i++) | |
278 | if (test_bit(i, &error_bits)) | |
279 | fail_mirror(ms->mirror + i, | |
280 | DM_RAID1_FLUSH_ERROR); | |
281 | return -EIO; | |
282 | } | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | /*----------------------------------------------------------------- | |
288 | * Recovery. | |
289 | * | |
290 | * When a mirror is first activated we may find that some regions | |
291 | * are in the no-sync state. We have to recover these by | |
292 | * recopying from the default mirror to all the others. | |
293 | *---------------------------------------------------------------*/ | |
294 | static void recovery_complete(int read_err, unsigned long write_err, | |
295 | void *context) | |
296 | { | |
297 | struct dm_region *reg = context; | |
298 | struct mirror_set *ms = dm_rh_region_context(reg); | |
299 | int m, bit = 0; | |
300 | ||
301 | if (read_err) { | |
302 | /* Read error means the failure of default mirror. */ | |
303 | DMERR_LIMIT("Unable to read primary mirror during recovery"); | |
304 | fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); | |
305 | } | |
306 | ||
307 | if (write_err) { | |
308 | DMERR_LIMIT("Write error during recovery (error = 0x%lx)", | |
309 | write_err); | |
310 | /* | |
311 | * Bits correspond to devices (excluding default mirror). | |
312 | * The default mirror cannot change during recovery. | |
313 | */ | |
314 | for (m = 0; m < ms->nr_mirrors; m++) { | |
315 | if (&ms->mirror[m] == get_default_mirror(ms)) | |
316 | continue; | |
317 | if (test_bit(bit, &write_err)) | |
318 | fail_mirror(ms->mirror + m, | |
319 | DM_RAID1_SYNC_ERROR); | |
320 | bit++; | |
321 | } | |
322 | } | |
323 | ||
324 | dm_rh_recovery_end(reg, !(read_err || write_err)); | |
325 | } | |
326 | ||
327 | static int recover(struct mirror_set *ms, struct dm_region *reg) | |
328 | { | |
329 | int r; | |
330 | unsigned i; | |
331 | struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; | |
332 | struct mirror *m; | |
333 | unsigned long flags = 0; | |
334 | region_t key = dm_rh_get_region_key(reg); | |
335 | sector_t region_size = dm_rh_get_region_size(ms->rh); | |
336 | ||
337 | /* fill in the source */ | |
338 | m = get_default_mirror(ms); | |
339 | from.bdev = m->dev->bdev; | |
340 | from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); | |
341 | if (key == (ms->nr_regions - 1)) { | |
342 | /* | |
343 | * The final region may be smaller than | |
344 | * region_size. | |
345 | */ | |
346 | from.count = ms->ti->len & (region_size - 1); | |
347 | if (!from.count) | |
348 | from.count = region_size; | |
349 | } else | |
350 | from.count = region_size; | |
351 | ||
352 | /* fill in the destinations */ | |
353 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | |
354 | if (&ms->mirror[i] == get_default_mirror(ms)) | |
355 | continue; | |
356 | ||
357 | m = ms->mirror + i; | |
358 | dest->bdev = m->dev->bdev; | |
359 | dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); | |
360 | dest->count = from.count; | |
361 | dest++; | |
362 | } | |
363 | ||
364 | /* hand to kcopyd */ | |
365 | if (!errors_handled(ms)) | |
366 | set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); | |
367 | ||
368 | r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, | |
369 | flags, recovery_complete, reg); | |
370 | ||
371 | return r; | |
372 | } | |
373 | ||
374 | static void reset_ms_flags(struct mirror_set *ms) | |
375 | { | |
376 | unsigned int m; | |
377 | ||
378 | ms->leg_failure = 0; | |
379 | for (m = 0; m < ms->nr_mirrors; m++) { | |
380 | atomic_set(&(ms->mirror[m].error_count), 0); | |
381 | ms->mirror[m].error_type = 0; | |
382 | } | |
383 | } | |
384 | ||
385 | static void do_recovery(struct mirror_set *ms) | |
386 | { | |
387 | struct dm_region *reg; | |
388 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
389 | int r; | |
390 | ||
391 | /* | |
392 | * Start quiescing some regions. | |
393 | */ | |
394 | dm_rh_recovery_prepare(ms->rh); | |
395 | ||
396 | /* | |
397 | * Copy any already quiesced regions. | |
398 | */ | |
399 | while ((reg = dm_rh_recovery_start(ms->rh))) { | |
400 | r = recover(ms, reg); | |
401 | if (r) | |
402 | dm_rh_recovery_end(reg, 0); | |
403 | } | |
404 | ||
405 | /* | |
406 | * Update the in sync flag. | |
407 | */ | |
408 | if (!ms->in_sync && | |
409 | (log->type->get_sync_count(log) == ms->nr_regions)) { | |
410 | /* the sync is complete */ | |
411 | dm_table_event(ms->ti->table); | |
412 | ms->in_sync = 1; | |
413 | reset_ms_flags(ms); | |
414 | } | |
415 | } | |
416 | ||
417 | /*----------------------------------------------------------------- | |
418 | * Reads | |
419 | *---------------------------------------------------------------*/ | |
420 | static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) | |
421 | { | |
422 | struct mirror *m = get_default_mirror(ms); | |
423 | ||
424 | do { | |
425 | if (likely(!atomic_read(&m->error_count))) | |
426 | return m; | |
427 | ||
428 | if (m-- == ms->mirror) | |
429 | m += ms->nr_mirrors; | |
430 | } while (m != get_default_mirror(ms)); | |
431 | ||
432 | return NULL; | |
433 | } | |
434 | ||
435 | static int default_ok(struct mirror *m) | |
436 | { | |
437 | struct mirror *default_mirror = get_default_mirror(m->ms); | |
438 | ||
439 | return !atomic_read(&default_mirror->error_count); | |
440 | } | |
441 | ||
442 | static int mirror_available(struct mirror_set *ms, struct bio *bio) | |
443 | { | |
444 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
445 | region_t region = dm_rh_bio_to_region(ms->rh, bio); | |
446 | ||
447 | if (log->type->in_sync(log, region, 0)) | |
448 | return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
453 | /* | |
454 | * remap a buffer to a particular mirror. | |
455 | */ | |
456 | static sector_t map_sector(struct mirror *m, struct bio *bio) | |
457 | { | |
458 | if (unlikely(!bio->bi_iter.bi_size)) | |
459 | return 0; | |
460 | return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); | |
461 | } | |
462 | ||
463 | static void map_bio(struct mirror *m, struct bio *bio) | |
464 | { | |
465 | bio_set_dev(bio, m->dev->bdev); | |
466 | bio->bi_iter.bi_sector = map_sector(m, bio); | |
467 | } | |
468 | ||
469 | static void map_region(struct dm_io_region *io, struct mirror *m, | |
470 | struct bio *bio) | |
471 | { | |
472 | io->bdev = m->dev->bdev; | |
473 | io->sector = map_sector(m, bio); | |
474 | io->count = bio_sectors(bio); | |
475 | } | |
476 | ||
477 | static void hold_bio(struct mirror_set *ms, struct bio *bio) | |
478 | { | |
479 | /* | |
480 | * Lock is required to avoid race condition during suspend | |
481 | * process. | |
482 | */ | |
483 | spin_lock_irq(&ms->lock); | |
484 | ||
485 | if (atomic_read(&ms->suspend)) { | |
486 | spin_unlock_irq(&ms->lock); | |
487 | ||
488 | /* | |
489 | * If device is suspended, complete the bio. | |
490 | */ | |
491 | if (dm_noflush_suspending(ms->ti)) | |
492 | bio->bi_status = BLK_STS_DM_REQUEUE; | |
493 | else | |
494 | bio->bi_status = BLK_STS_IOERR; | |
495 | ||
496 | bio_endio(bio); | |
497 | return; | |
498 | } | |
499 | ||
500 | /* | |
501 | * Hold bio until the suspend is complete. | |
502 | */ | |
503 | bio_list_add(&ms->holds, bio); | |
504 | spin_unlock_irq(&ms->lock); | |
505 | } | |
506 | ||
507 | /*----------------------------------------------------------------- | |
508 | * Reads | |
509 | *---------------------------------------------------------------*/ | |
510 | static void read_callback(unsigned long error, void *context) | |
511 | { | |
512 | struct bio *bio = context; | |
513 | struct mirror *m; | |
514 | ||
515 | m = bio_get_m(bio); | |
516 | bio_set_m(bio, NULL); | |
517 | ||
518 | if (likely(!error)) { | |
519 | bio_endio(bio); | |
520 | return; | |
521 | } | |
522 | ||
523 | fail_mirror(m, DM_RAID1_READ_ERROR); | |
524 | ||
525 | if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { | |
526 | DMWARN_LIMIT("Read failure on mirror device %s. " | |
527 | "Trying alternative device.", | |
528 | m->dev->name); | |
529 | queue_bio(m->ms, bio, bio_data_dir(bio)); | |
530 | return; | |
531 | } | |
532 | ||
533 | DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", | |
534 | m->dev->name); | |
535 | bio_io_error(bio); | |
536 | } | |
537 | ||
538 | /* Asynchronous read. */ | |
539 | static void read_async_bio(struct mirror *m, struct bio *bio) | |
540 | { | |
541 | struct dm_io_region io; | |
542 | struct dm_io_request io_req = { | |
543 | .bi_op = REQ_OP_READ, | |
544 | .bi_op_flags = 0, | |
545 | .mem.type = DM_IO_BIO, | |
546 | .mem.ptr.bio = bio, | |
547 | .notify.fn = read_callback, | |
548 | .notify.context = bio, | |
549 | .client = m->ms->io_client, | |
550 | }; | |
551 | ||
552 | map_region(&io, m, bio); | |
553 | bio_set_m(bio, m); | |
554 | BUG_ON(dm_io(&io_req, 1, &io, NULL)); | |
555 | } | |
556 | ||
557 | static inline int region_in_sync(struct mirror_set *ms, region_t region, | |
558 | int may_block) | |
559 | { | |
560 | int state = dm_rh_get_state(ms->rh, region, may_block); | |
561 | return state == DM_RH_CLEAN || state == DM_RH_DIRTY; | |
562 | } | |
563 | ||
564 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |
565 | { | |
566 | region_t region; | |
567 | struct bio *bio; | |
568 | struct mirror *m; | |
569 | ||
570 | while ((bio = bio_list_pop(reads))) { | |
571 | region = dm_rh_bio_to_region(ms->rh, bio); | |
572 | m = get_default_mirror(ms); | |
573 | ||
574 | /* | |
575 | * We can only read balance if the region is in sync. | |
576 | */ | |
577 | if (likely(region_in_sync(ms, region, 1))) | |
578 | m = choose_mirror(ms, bio->bi_iter.bi_sector); | |
579 | else if (m && atomic_read(&m->error_count)) | |
580 | m = NULL; | |
581 | ||
582 | if (likely(m)) | |
583 | read_async_bio(m, bio); | |
584 | else | |
585 | bio_io_error(bio); | |
586 | } | |
587 | } | |
588 | ||
589 | /*----------------------------------------------------------------- | |
590 | * Writes. | |
591 | * | |
592 | * We do different things with the write io depending on the | |
593 | * state of the region that it's in: | |
594 | * | |
595 | * SYNC: increment pending, use kcopyd to write to *all* mirrors | |
596 | * RECOVERING: delay the io until recovery completes | |
597 | * NOSYNC: increment pending, just write to the default mirror | |
598 | *---------------------------------------------------------------*/ | |
599 | ||
600 | ||
601 | static void write_callback(unsigned long error, void *context) | |
602 | { | |
603 | unsigned i; | |
604 | struct bio *bio = (struct bio *) context; | |
605 | struct mirror_set *ms; | |
606 | int should_wake = 0; | |
607 | unsigned long flags; | |
608 | ||
609 | ms = bio_get_m(bio)->ms; | |
610 | bio_set_m(bio, NULL); | |
611 | ||
612 | /* | |
613 | * NOTE: We don't decrement the pending count here, | |
614 | * instead it is done by the targets endio function. | |
615 | * This way we handle both writes to SYNC and NOSYNC | |
616 | * regions with the same code. | |
617 | */ | |
618 | if (likely(!error)) { | |
619 | bio_endio(bio); | |
620 | return; | |
621 | } | |
622 | ||
623 | /* | |
624 | * If the bio is discard, return an error, but do not | |
625 | * degrade the array. | |
626 | */ | |
627 | if (bio_op(bio) == REQ_OP_DISCARD) { | |
628 | bio->bi_status = BLK_STS_NOTSUPP; | |
629 | bio_endio(bio); | |
630 | return; | |
631 | } | |
632 | ||
633 | for (i = 0; i < ms->nr_mirrors; i++) | |
634 | if (test_bit(i, &error)) | |
635 | fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); | |
636 | ||
637 | /* | |
638 | * Need to raise event. Since raising | |
639 | * events can block, we need to do it in | |
640 | * the main thread. | |
641 | */ | |
642 | spin_lock_irqsave(&ms->lock, flags); | |
643 | if (!ms->failures.head) | |
644 | should_wake = 1; | |
645 | bio_list_add(&ms->failures, bio); | |
646 | spin_unlock_irqrestore(&ms->lock, flags); | |
647 | if (should_wake) | |
648 | wakeup_mirrord(ms); | |
649 | } | |
650 | ||
651 | static void do_write(struct mirror_set *ms, struct bio *bio) | |
652 | { | |
653 | unsigned int i; | |
654 | struct dm_io_region io[ms->nr_mirrors], *dest = io; | |
655 | struct mirror *m; | |
656 | struct dm_io_request io_req = { | |
657 | .bi_op = REQ_OP_WRITE, | |
658 | .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), | |
659 | .mem.type = DM_IO_BIO, | |
660 | .mem.ptr.bio = bio, | |
661 | .notify.fn = write_callback, | |
662 | .notify.context = bio, | |
663 | .client = ms->io_client, | |
664 | }; | |
665 | ||
666 | if (bio_op(bio) == REQ_OP_DISCARD) { | |
667 | io_req.bi_op = REQ_OP_DISCARD; | |
668 | io_req.mem.type = DM_IO_KMEM; | |
669 | io_req.mem.ptr.addr = NULL; | |
670 | } | |
671 | ||
672 | for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) | |
673 | map_region(dest++, m, bio); | |
674 | ||
675 | /* | |
676 | * Use default mirror because we only need it to retrieve the reference | |
677 | * to the mirror set in write_callback(). | |
678 | */ | |
679 | bio_set_m(bio, get_default_mirror(ms)); | |
680 | ||
681 | BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); | |
682 | } | |
683 | ||
684 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |
685 | { | |
686 | int state; | |
687 | struct bio *bio; | |
688 | struct bio_list sync, nosync, recover, *this_list = NULL; | |
689 | struct bio_list requeue; | |
690 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
691 | region_t region; | |
692 | ||
693 | if (!writes->head) | |
694 | return; | |
695 | ||
696 | /* | |
697 | * Classify each write. | |
698 | */ | |
699 | bio_list_init(&sync); | |
700 | bio_list_init(&nosync); | |
701 | bio_list_init(&recover); | |
702 | bio_list_init(&requeue); | |
703 | ||
704 | while ((bio = bio_list_pop(writes))) { | |
705 | if ((bio->bi_opf & REQ_PREFLUSH) || | |
706 | (bio_op(bio) == REQ_OP_DISCARD)) { | |
707 | bio_list_add(&sync, bio); | |
708 | continue; | |
709 | } | |
710 | ||
711 | region = dm_rh_bio_to_region(ms->rh, bio); | |
712 | ||
713 | if (log->type->is_remote_recovering && | |
714 | log->type->is_remote_recovering(log, region)) { | |
715 | bio_list_add(&requeue, bio); | |
716 | continue; | |
717 | } | |
718 | ||
719 | state = dm_rh_get_state(ms->rh, region, 1); | |
720 | switch (state) { | |
721 | case DM_RH_CLEAN: | |
722 | case DM_RH_DIRTY: | |
723 | this_list = &sync; | |
724 | break; | |
725 | ||
726 | case DM_RH_NOSYNC: | |
727 | this_list = &nosync; | |
728 | break; | |
729 | ||
730 | case DM_RH_RECOVERING: | |
731 | this_list = &recover; | |
732 | break; | |
733 | } | |
734 | ||
735 | bio_list_add(this_list, bio); | |
736 | } | |
737 | ||
738 | /* | |
739 | * Add bios that are delayed due to remote recovery | |
740 | * back on to the write queue | |
741 | */ | |
742 | if (unlikely(requeue.head)) { | |
743 | spin_lock_irq(&ms->lock); | |
744 | bio_list_merge(&ms->writes, &requeue); | |
745 | spin_unlock_irq(&ms->lock); | |
746 | delayed_wake(ms); | |
747 | } | |
748 | ||
749 | /* | |
750 | * Increment the pending counts for any regions that will | |
751 | * be written to (writes to recover regions are going to | |
752 | * be delayed). | |
753 | */ | |
754 | dm_rh_inc_pending(ms->rh, &sync); | |
755 | dm_rh_inc_pending(ms->rh, &nosync); | |
756 | ||
757 | /* | |
758 | * If the flush fails on a previous call and succeeds here, | |
759 | * we must not reset the log_failure variable. We need | |
760 | * userspace interaction to do that. | |
761 | */ | |
762 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; | |
763 | ||
764 | /* | |
765 | * Dispatch io. | |
766 | */ | |
767 | if (unlikely(ms->log_failure) && errors_handled(ms)) { | |
768 | spin_lock_irq(&ms->lock); | |
769 | bio_list_merge(&ms->failures, &sync); | |
770 | spin_unlock_irq(&ms->lock); | |
771 | wakeup_mirrord(ms); | |
772 | } else | |
773 | while ((bio = bio_list_pop(&sync))) | |
774 | do_write(ms, bio); | |
775 | ||
776 | while ((bio = bio_list_pop(&recover))) | |
777 | dm_rh_delay(ms->rh, bio); | |
778 | ||
779 | while ((bio = bio_list_pop(&nosync))) { | |
780 | if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { | |
781 | spin_lock_irq(&ms->lock); | |
782 | bio_list_add(&ms->failures, bio); | |
783 | spin_unlock_irq(&ms->lock); | |
784 | wakeup_mirrord(ms); | |
785 | } else { | |
786 | map_bio(get_default_mirror(ms), bio); | |
787 | generic_make_request(bio); | |
788 | } | |
789 | } | |
790 | } | |
791 | ||
792 | static void do_failures(struct mirror_set *ms, struct bio_list *failures) | |
793 | { | |
794 | struct bio *bio; | |
795 | ||
796 | if (likely(!failures->head)) | |
797 | return; | |
798 | ||
799 | /* | |
800 | * If the log has failed, unattempted writes are being | |
801 | * put on the holds list. We can't issue those writes | |
802 | * until a log has been marked, so we must store them. | |
803 | * | |
804 | * If a 'noflush' suspend is in progress, we can requeue | |
805 | * the I/O's to the core. This give userspace a chance | |
806 | * to reconfigure the mirror, at which point the core | |
807 | * will reissue the writes. If the 'noflush' flag is | |
808 | * not set, we have no choice but to return errors. | |
809 | * | |
810 | * Some writes on the failures list may have been | |
811 | * submitted before the log failure and represent a | |
812 | * failure to write to one of the devices. It is ok | |
813 | * for us to treat them the same and requeue them | |
814 | * as well. | |
815 | */ | |
816 | while ((bio = bio_list_pop(failures))) { | |
817 | if (!ms->log_failure) { | |
818 | ms->in_sync = 0; | |
819 | dm_rh_mark_nosync(ms->rh, bio); | |
820 | } | |
821 | ||
822 | /* | |
823 | * If all the legs are dead, fail the I/O. | |
824 | * If the device has failed and keep_log is enabled, | |
825 | * fail the I/O. | |
826 | * | |
827 | * If we have been told to handle errors, and keep_log | |
828 | * isn't enabled, hold the bio and wait for userspace to | |
829 | * deal with the problem. | |
830 | * | |
831 | * Otherwise pretend that the I/O succeeded. (This would | |
832 | * be wrong if the failed leg returned after reboot and | |
833 | * got replicated back to the good legs.) | |
834 | */ | |
835 | if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) | |
836 | bio_io_error(bio); | |
837 | else if (errors_handled(ms) && !keep_log(ms)) | |
838 | hold_bio(ms, bio); | |
839 | else | |
840 | bio_endio(bio); | |
841 | } | |
842 | } | |
843 | ||
844 | static void trigger_event(struct work_struct *work) | |
845 | { | |
846 | struct mirror_set *ms = | |
847 | container_of(work, struct mirror_set, trigger_event); | |
848 | ||
849 | dm_table_event(ms->ti->table); | |
850 | } | |
851 | ||
852 | /*----------------------------------------------------------------- | |
853 | * kmirrord | |
854 | *---------------------------------------------------------------*/ | |
855 | static void do_mirror(struct work_struct *work) | |
856 | { | |
857 | struct mirror_set *ms = container_of(work, struct mirror_set, | |
858 | kmirrord_work); | |
859 | struct bio_list reads, writes, failures; | |
860 | unsigned long flags; | |
861 | ||
862 | spin_lock_irqsave(&ms->lock, flags); | |
863 | reads = ms->reads; | |
864 | writes = ms->writes; | |
865 | failures = ms->failures; | |
866 | bio_list_init(&ms->reads); | |
867 | bio_list_init(&ms->writes); | |
868 | bio_list_init(&ms->failures); | |
869 | spin_unlock_irqrestore(&ms->lock, flags); | |
870 | ||
871 | dm_rh_update_states(ms->rh, errors_handled(ms)); | |
872 | do_recovery(ms); | |
873 | do_reads(ms, &reads); | |
874 | do_writes(ms, &writes); | |
875 | do_failures(ms, &failures); | |
876 | } | |
877 | ||
878 | /*----------------------------------------------------------------- | |
879 | * Target functions | |
880 | *---------------------------------------------------------------*/ | |
881 | static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |
882 | uint32_t region_size, | |
883 | struct dm_target *ti, | |
884 | struct dm_dirty_log *dl) | |
885 | { | |
886 | size_t len; | |
887 | struct mirror_set *ms = NULL; | |
888 | ||
889 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); | |
890 | ||
891 | ms = kzalloc(len, GFP_KERNEL); | |
892 | if (!ms) { | |
893 | ti->error = "Cannot allocate mirror context"; | |
894 | return NULL; | |
895 | } | |
896 | ||
897 | spin_lock_init(&ms->lock); | |
898 | bio_list_init(&ms->reads); | |
899 | bio_list_init(&ms->writes); | |
900 | bio_list_init(&ms->failures); | |
901 | bio_list_init(&ms->holds); | |
902 | ||
903 | ms->ti = ti; | |
904 | ms->nr_mirrors = nr_mirrors; | |
905 | ms->nr_regions = dm_sector_div_up(ti->len, region_size); | |
906 | ms->in_sync = 0; | |
907 | ms->log_failure = 0; | |
908 | ms->leg_failure = 0; | |
909 | atomic_set(&ms->suspend, 0); | |
910 | atomic_set(&ms->default_mirror, DEFAULT_MIRROR); | |
911 | ||
912 | ms->io_client = dm_io_client_create(); | |
913 | if (IS_ERR(ms->io_client)) { | |
914 | ti->error = "Error creating dm_io client"; | |
915 | kfree(ms); | |
916 | return NULL; | |
917 | } | |
918 | ||
919 | ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, | |
920 | wakeup_all_recovery_waiters, | |
921 | ms->ti->begin, MAX_RECOVERY, | |
922 | dl, region_size, ms->nr_regions); | |
923 | if (IS_ERR(ms->rh)) { | |
924 | ti->error = "Error creating dirty region hash"; | |
925 | dm_io_client_destroy(ms->io_client); | |
926 | kfree(ms); | |
927 | return NULL; | |
928 | } | |
929 | ||
930 | return ms; | |
931 | } | |
932 | ||
933 | static void free_context(struct mirror_set *ms, struct dm_target *ti, | |
934 | unsigned int m) | |
935 | { | |
936 | while (m--) | |
937 | dm_put_device(ti, ms->mirror[m].dev); | |
938 | ||
939 | dm_io_client_destroy(ms->io_client); | |
940 | dm_region_hash_destroy(ms->rh); | |
941 | kfree(ms); | |
942 | } | |
943 | ||
944 | static int get_mirror(struct mirror_set *ms, struct dm_target *ti, | |
945 | unsigned int mirror, char **argv) | |
946 | { | |
947 | unsigned long long offset; | |
948 | char dummy; | |
949 | int ret; | |
950 | ||
951 | if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { | |
952 | ti->error = "Invalid offset"; | |
953 | return -EINVAL; | |
954 | } | |
955 | ||
956 | ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), | |
957 | &ms->mirror[mirror].dev); | |
958 | if (ret) { | |
959 | ti->error = "Device lookup failure"; | |
960 | return ret; | |
961 | } | |
962 | ||
963 | ms->mirror[mirror].ms = ms; | |
964 | atomic_set(&(ms->mirror[mirror].error_count), 0); | |
965 | ms->mirror[mirror].error_type = 0; | |
966 | ms->mirror[mirror].offset = offset; | |
967 | ||
968 | return 0; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Create dirty log: log_type #log_params <log_params> | |
973 | */ | |
974 | static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, | |
975 | unsigned argc, char **argv, | |
976 | unsigned *args_used) | |
977 | { | |
978 | unsigned param_count; | |
979 | struct dm_dirty_log *dl; | |
980 | char dummy; | |
981 | ||
982 | if (argc < 2) { | |
983 | ti->error = "Insufficient mirror log arguments"; | |
984 | return NULL; | |
985 | } | |
986 | ||
987 | if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) { | |
988 | ti->error = "Invalid mirror log argument count"; | |
989 | return NULL; | |
990 | } | |
991 | ||
992 | *args_used = 2 + param_count; | |
993 | ||
994 | if (argc < *args_used) { | |
995 | ti->error = "Insufficient mirror log arguments"; | |
996 | return NULL; | |
997 | } | |
998 | ||
999 | dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, | |
1000 | argv + 2); | |
1001 | if (!dl) { | |
1002 | ti->error = "Error creating mirror dirty log"; | |
1003 | return NULL; | |
1004 | } | |
1005 | ||
1006 | return dl; | |
1007 | } | |
1008 | ||
1009 | static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, | |
1010 | unsigned *args_used) | |
1011 | { | |
1012 | unsigned num_features; | |
1013 | struct dm_target *ti = ms->ti; | |
1014 | char dummy; | |
1015 | int i; | |
1016 | ||
1017 | *args_used = 0; | |
1018 | ||
1019 | if (!argc) | |
1020 | return 0; | |
1021 | ||
1022 | if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) { | |
1023 | ti->error = "Invalid number of features"; | |
1024 | return -EINVAL; | |
1025 | } | |
1026 | ||
1027 | argc--; | |
1028 | argv++; | |
1029 | (*args_used)++; | |
1030 | ||
1031 | if (num_features > argc) { | |
1032 | ti->error = "Not enough arguments to support feature count"; | |
1033 | return -EINVAL; | |
1034 | } | |
1035 | ||
1036 | for (i = 0; i < num_features; i++) { | |
1037 | if (!strcmp("handle_errors", argv[0])) | |
1038 | ms->features |= DM_RAID1_HANDLE_ERRORS; | |
1039 | else if (!strcmp("keep_log", argv[0])) | |
1040 | ms->features |= DM_RAID1_KEEP_LOG; | |
1041 | else { | |
1042 | ti->error = "Unrecognised feature requested"; | |
1043 | return -EINVAL; | |
1044 | } | |
1045 | ||
1046 | argc--; | |
1047 | argv++; | |
1048 | (*args_used)++; | |
1049 | } | |
1050 | if (!errors_handled(ms) && keep_log(ms)) { | |
1051 | ti->error = "keep_log feature requires the handle_errors feature"; | |
1052 | return -EINVAL; | |
1053 | } | |
1054 | ||
1055 | return 0; | |
1056 | } | |
1057 | ||
1058 | /* | |
1059 | * Construct a mirror mapping: | |
1060 | * | |
1061 | * log_type #log_params <log_params> | |
1062 | * #mirrors [mirror_path offset]{2,} | |
1063 | * [#features <features>] | |
1064 | * | |
1065 | * log_type is "core" or "disk" | |
1066 | * #log_params is between 1 and 3 | |
1067 | * | |
1068 | * If present, supported features are "handle_errors" and "keep_log". | |
1069 | */ | |
1070 | static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1071 | { | |
1072 | int r; | |
1073 | unsigned int nr_mirrors, m, args_used; | |
1074 | struct mirror_set *ms; | |
1075 | struct dm_dirty_log *dl; | |
1076 | char dummy; | |
1077 | ||
1078 | dl = create_dirty_log(ti, argc, argv, &args_used); | |
1079 | if (!dl) | |
1080 | return -EINVAL; | |
1081 | ||
1082 | argv += args_used; | |
1083 | argc -= args_used; | |
1084 | ||
1085 | if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || | |
1086 | nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { | |
1087 | ti->error = "Invalid number of mirrors"; | |
1088 | dm_dirty_log_destroy(dl); | |
1089 | return -EINVAL; | |
1090 | } | |
1091 | ||
1092 | argv++, argc--; | |
1093 | ||
1094 | if (argc < nr_mirrors * 2) { | |
1095 | ti->error = "Too few mirror arguments"; | |
1096 | dm_dirty_log_destroy(dl); | |
1097 | return -EINVAL; | |
1098 | } | |
1099 | ||
1100 | ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); | |
1101 | if (!ms) { | |
1102 | dm_dirty_log_destroy(dl); | |
1103 | return -ENOMEM; | |
1104 | } | |
1105 | ||
1106 | /* Get the mirror parameter sets */ | |
1107 | for (m = 0; m < nr_mirrors; m++) { | |
1108 | r = get_mirror(ms, ti, m, argv); | |
1109 | if (r) { | |
1110 | free_context(ms, ti, m); | |
1111 | return r; | |
1112 | } | |
1113 | argv += 2; | |
1114 | argc -= 2; | |
1115 | } | |
1116 | ||
1117 | ti->private = ms; | |
1118 | ||
1119 | r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); | |
1120 | if (r) | |
1121 | goto err_free_context; | |
1122 | ||
1123 | ti->num_flush_bios = 1; | |
1124 | ti->num_discard_bios = 1; | |
1125 | ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); | |
1126 | ||
1127 | ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); | |
1128 | if (!ms->kmirrord_wq) { | |
1129 | DMERR("couldn't start kmirrord"); | |
1130 | r = -ENOMEM; | |
1131 | goto err_free_context; | |
1132 | } | |
1133 | INIT_WORK(&ms->kmirrord_work, do_mirror); | |
1134 | timer_setup(&ms->timer, delayed_wake_fn, 0); | |
1135 | ms->timer_pending = 0; | |
1136 | INIT_WORK(&ms->trigger_event, trigger_event); | |
1137 | ||
1138 | r = parse_features(ms, argc, argv, &args_used); | |
1139 | if (r) | |
1140 | goto err_destroy_wq; | |
1141 | ||
1142 | argv += args_used; | |
1143 | argc -= args_used; | |
1144 | ||
1145 | /* | |
1146 | * Any read-balancing addition depends on the | |
1147 | * DM_RAID1_HANDLE_ERRORS flag being present. | |
1148 | * This is because the decision to balance depends | |
1149 | * on the sync state of a region. If the above | |
1150 | * flag is not present, we ignore errors; and | |
1151 | * the sync state may be inaccurate. | |
1152 | */ | |
1153 | ||
1154 | if (argc) { | |
1155 | ti->error = "Too many mirror arguments"; | |
1156 | r = -EINVAL; | |
1157 | goto err_destroy_wq; | |
1158 | } | |
1159 | ||
1160 | ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); | |
1161 | if (IS_ERR(ms->kcopyd_client)) { | |
1162 | r = PTR_ERR(ms->kcopyd_client); | |
1163 | goto err_destroy_wq; | |
1164 | } | |
1165 | ||
1166 | wakeup_mirrord(ms); | |
1167 | return 0; | |
1168 | ||
1169 | err_destroy_wq: | |
1170 | destroy_workqueue(ms->kmirrord_wq); | |
1171 | err_free_context: | |
1172 | free_context(ms, ti, ms->nr_mirrors); | |
1173 | return r; | |
1174 | } | |
1175 | ||
1176 | static void mirror_dtr(struct dm_target *ti) | |
1177 | { | |
1178 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1179 | ||
1180 | del_timer_sync(&ms->timer); | |
1181 | flush_workqueue(ms->kmirrord_wq); | |
1182 | flush_work(&ms->trigger_event); | |
1183 | dm_kcopyd_client_destroy(ms->kcopyd_client); | |
1184 | destroy_workqueue(ms->kmirrord_wq); | |
1185 | free_context(ms, ti, ms->nr_mirrors); | |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * Mirror mapping function | |
1190 | */ | |
1191 | static int mirror_map(struct dm_target *ti, struct bio *bio) | |
1192 | { | |
1193 | int r, rw = bio_data_dir(bio); | |
1194 | struct mirror *m; | |
1195 | struct mirror_set *ms = ti->private; | |
1196 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1197 | struct dm_raid1_bio_record *bio_record = | |
1198 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); | |
1199 | ||
1200 | bio_record->details.bi_disk = NULL; | |
1201 | ||
1202 | if (rw == WRITE) { | |
1203 | /* Save region for mirror_end_io() handler */ | |
1204 | bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); | |
1205 | queue_bio(ms, bio, rw); | |
1206 | return DM_MAPIO_SUBMITTED; | |
1207 | } | |
1208 | ||
1209 | r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); | |
1210 | if (r < 0 && r != -EWOULDBLOCK) | |
1211 | return DM_MAPIO_KILL; | |
1212 | ||
1213 | /* | |
1214 | * If region is not in-sync queue the bio. | |
1215 | */ | |
1216 | if (!r || (r == -EWOULDBLOCK)) { | |
1217 | if (bio->bi_opf & REQ_RAHEAD) | |
1218 | return DM_MAPIO_KILL; | |
1219 | ||
1220 | queue_bio(ms, bio, rw); | |
1221 | return DM_MAPIO_SUBMITTED; | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * The region is in-sync and we can perform reads directly. | |
1226 | * Store enough information so we can retry if it fails. | |
1227 | */ | |
1228 | m = choose_mirror(ms, bio->bi_iter.bi_sector); | |
1229 | if (unlikely(!m)) | |
1230 | return DM_MAPIO_KILL; | |
1231 | ||
1232 | dm_bio_record(&bio_record->details, bio); | |
1233 | bio_record->m = m; | |
1234 | ||
1235 | map_bio(m, bio); | |
1236 | ||
1237 | return DM_MAPIO_REMAPPED; | |
1238 | } | |
1239 | ||
1240 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |
1241 | blk_status_t *error) | |
1242 | { | |
1243 | int rw = bio_data_dir(bio); | |
1244 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1245 | struct mirror *m = NULL; | |
1246 | struct dm_bio_details *bd = NULL; | |
1247 | struct dm_raid1_bio_record *bio_record = | |
1248 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); | |
1249 | ||
1250 | /* | |
1251 | * We need to dec pending if this was a write. | |
1252 | */ | |
1253 | if (rw == WRITE) { | |
1254 | if (!(bio->bi_opf & REQ_PREFLUSH) && | |
1255 | bio_op(bio) != REQ_OP_DISCARD) | |
1256 | dm_rh_dec(ms->rh, bio_record->write_region); | |
1257 | return DM_ENDIO_DONE; | |
1258 | } | |
1259 | ||
1260 | if (*error == BLK_STS_NOTSUPP) | |
1261 | goto out; | |
1262 | ||
1263 | if (bio->bi_opf & REQ_RAHEAD) | |
1264 | goto out; | |
1265 | ||
1266 | if (unlikely(*error)) { | |
1267 | if (!bio_record->details.bi_disk) { | |
1268 | /* | |
1269 | * There wasn't enough memory to record necessary | |
1270 | * information for a retry or there was no other | |
1271 | * mirror in-sync. | |
1272 | */ | |
1273 | DMERR_LIMIT("Mirror read failed."); | |
1274 | return DM_ENDIO_DONE; | |
1275 | } | |
1276 | ||
1277 | m = bio_record->m; | |
1278 | ||
1279 | DMERR("Mirror read failed from %s. Trying alternative device.", | |
1280 | m->dev->name); | |
1281 | ||
1282 | fail_mirror(m, DM_RAID1_READ_ERROR); | |
1283 | ||
1284 | /* | |
1285 | * A failed read is requeued for another attempt using an intact | |
1286 | * mirror. | |
1287 | */ | |
1288 | if (default_ok(m) || mirror_available(ms, bio)) { | |
1289 | bd = &bio_record->details; | |
1290 | ||
1291 | dm_bio_restore(bd, bio); | |
1292 | bio_record->details.bi_disk = NULL; | |
1293 | bio->bi_status = 0; | |
1294 | ||
1295 | queue_bio(ms, bio, rw); | |
1296 | return DM_ENDIO_INCOMPLETE; | |
1297 | } | |
1298 | DMERR("All replicated volumes dead, failing I/O"); | |
1299 | } | |
1300 | ||
1301 | out: | |
1302 | bio_record->details.bi_disk = NULL; | |
1303 | ||
1304 | return DM_ENDIO_DONE; | |
1305 | } | |
1306 | ||
1307 | static void mirror_presuspend(struct dm_target *ti) | |
1308 | { | |
1309 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1310 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1311 | ||
1312 | struct bio_list holds; | |
1313 | struct bio *bio; | |
1314 | ||
1315 | atomic_set(&ms->suspend, 1); | |
1316 | ||
1317 | /* | |
1318 | * Process bios in the hold list to start recovery waiting | |
1319 | * for bios in the hold list. After the process, no bio has | |
1320 | * a chance to be added in the hold list because ms->suspend | |
1321 | * is set. | |
1322 | */ | |
1323 | spin_lock_irq(&ms->lock); | |
1324 | holds = ms->holds; | |
1325 | bio_list_init(&ms->holds); | |
1326 | spin_unlock_irq(&ms->lock); | |
1327 | ||
1328 | while ((bio = bio_list_pop(&holds))) | |
1329 | hold_bio(ms, bio); | |
1330 | ||
1331 | /* | |
1332 | * We must finish up all the work that we've | |
1333 | * generated (i.e. recovery work). | |
1334 | */ | |
1335 | dm_rh_stop_recovery(ms->rh); | |
1336 | ||
1337 | wait_event(_kmirrord_recovery_stopped, | |
1338 | !dm_rh_recovery_in_flight(ms->rh)); | |
1339 | ||
1340 | if (log->type->presuspend && log->type->presuspend(log)) | |
1341 | /* FIXME: need better error handling */ | |
1342 | DMWARN("log presuspend failed"); | |
1343 | ||
1344 | /* | |
1345 | * Now that recovery is complete/stopped and the | |
1346 | * delayed bios are queued, we need to wait for | |
1347 | * the worker thread to complete. This way, | |
1348 | * we know that all of our I/O has been pushed. | |
1349 | */ | |
1350 | flush_workqueue(ms->kmirrord_wq); | |
1351 | } | |
1352 | ||
1353 | static void mirror_postsuspend(struct dm_target *ti) | |
1354 | { | |
1355 | struct mirror_set *ms = ti->private; | |
1356 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1357 | ||
1358 | if (log->type->postsuspend && log->type->postsuspend(log)) | |
1359 | /* FIXME: need better error handling */ | |
1360 | DMWARN("log postsuspend failed"); | |
1361 | } | |
1362 | ||
1363 | static void mirror_resume(struct dm_target *ti) | |
1364 | { | |
1365 | struct mirror_set *ms = ti->private; | |
1366 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1367 | ||
1368 | atomic_set(&ms->suspend, 0); | |
1369 | if (log->type->resume && log->type->resume(log)) | |
1370 | /* FIXME: need better error handling */ | |
1371 | DMWARN("log resume failed"); | |
1372 | dm_rh_start_recovery(ms->rh); | |
1373 | } | |
1374 | ||
1375 | /* | |
1376 | * device_status_char | |
1377 | * @m: mirror device/leg we want the status of | |
1378 | * | |
1379 | * We return one character representing the most severe error | |
1380 | * we have encountered. | |
1381 | * A => Alive - No failures | |
1382 | * D => Dead - A write failure occurred leaving mirror out-of-sync | |
1383 | * S => Sync - A sychronization failure occurred, mirror out-of-sync | |
1384 | * R => Read - A read failure occurred, mirror data unaffected | |
1385 | * | |
1386 | * Returns: <char> | |
1387 | */ | |
1388 | static char device_status_char(struct mirror *m) | |
1389 | { | |
1390 | if (!atomic_read(&(m->error_count))) | |
1391 | return 'A'; | |
1392 | ||
1393 | return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : | |
1394 | (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : | |
1395 | (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : | |
1396 | (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; | |
1397 | } | |
1398 | ||
1399 | ||
1400 | static void mirror_status(struct dm_target *ti, status_type_t type, | |
1401 | unsigned status_flags, char *result, unsigned maxlen) | |
1402 | { | |
1403 | unsigned int m, sz = 0; | |
1404 | int num_feature_args = 0; | |
1405 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1406 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1407 | char buffer[ms->nr_mirrors + 1]; | |
1408 | ||
1409 | switch (type) { | |
1410 | case STATUSTYPE_INFO: | |
1411 | DMEMIT("%d ", ms->nr_mirrors); | |
1412 | for (m = 0; m < ms->nr_mirrors; m++) { | |
1413 | DMEMIT("%s ", ms->mirror[m].dev->name); | |
1414 | buffer[m] = device_status_char(&(ms->mirror[m])); | |
1415 | } | |
1416 | buffer[m] = '\0'; | |
1417 | ||
1418 | DMEMIT("%llu/%llu 1 %s ", | |
1419 | (unsigned long long)log->type->get_sync_count(log), | |
1420 | (unsigned long long)ms->nr_regions, buffer); | |
1421 | ||
1422 | sz += log->type->status(log, type, result+sz, maxlen-sz); | |
1423 | ||
1424 | break; | |
1425 | ||
1426 | case STATUSTYPE_TABLE: | |
1427 | sz = log->type->status(log, type, result, maxlen); | |
1428 | ||
1429 | DMEMIT("%d", ms->nr_mirrors); | |
1430 | for (m = 0; m < ms->nr_mirrors; m++) | |
1431 | DMEMIT(" %s %llu", ms->mirror[m].dev->name, | |
1432 | (unsigned long long)ms->mirror[m].offset); | |
1433 | ||
1434 | num_feature_args += !!errors_handled(ms); | |
1435 | num_feature_args += !!keep_log(ms); | |
1436 | if (num_feature_args) { | |
1437 | DMEMIT(" %d", num_feature_args); | |
1438 | if (errors_handled(ms)) | |
1439 | DMEMIT(" handle_errors"); | |
1440 | if (keep_log(ms)) | |
1441 | DMEMIT(" keep_log"); | |
1442 | } | |
1443 | ||
1444 | break; | |
1445 | } | |
1446 | } | |
1447 | ||
1448 | static int mirror_iterate_devices(struct dm_target *ti, | |
1449 | iterate_devices_callout_fn fn, void *data) | |
1450 | { | |
1451 | struct mirror_set *ms = ti->private; | |
1452 | int ret = 0; | |
1453 | unsigned i; | |
1454 | ||
1455 | for (i = 0; !ret && i < ms->nr_mirrors; i++) | |
1456 | ret = fn(ti, ms->mirror[i].dev, | |
1457 | ms->mirror[i].offset, ti->len, data); | |
1458 | ||
1459 | return ret; | |
1460 | } | |
1461 | ||
1462 | static struct target_type mirror_target = { | |
1463 | .name = "mirror", | |
1464 | .version = {1, 14, 0}, | |
1465 | .module = THIS_MODULE, | |
1466 | .ctr = mirror_ctr, | |
1467 | .dtr = mirror_dtr, | |
1468 | .map = mirror_map, | |
1469 | .end_io = mirror_end_io, | |
1470 | .presuspend = mirror_presuspend, | |
1471 | .postsuspend = mirror_postsuspend, | |
1472 | .resume = mirror_resume, | |
1473 | .status = mirror_status, | |
1474 | .iterate_devices = mirror_iterate_devices, | |
1475 | }; | |
1476 | ||
1477 | static int __init dm_mirror_init(void) | |
1478 | { | |
1479 | int r; | |
1480 | ||
1481 | r = dm_register_target(&mirror_target); | |
1482 | if (r < 0) { | |
1483 | DMERR("Failed to register mirror target"); | |
1484 | goto bad_target; | |
1485 | } | |
1486 | ||
1487 | return 0; | |
1488 | ||
1489 | bad_target: | |
1490 | return r; | |
1491 | } | |
1492 | ||
1493 | static void __exit dm_mirror_exit(void) | |
1494 | { | |
1495 | dm_unregister_target(&mirror_target); | |
1496 | } | |
1497 | ||
1498 | /* Module hooks */ | |
1499 | module_init(dm_mirror_init); | |
1500 | module_exit(dm_mirror_exit); | |
1501 | ||
1502 | MODULE_DESCRIPTION(DM_NAME " mirror target"); | |
1503 | MODULE_AUTHOR("Joe Thornber"); | |
1504 | MODULE_LICENSE("GPL"); |