]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
1f965b19 | 3 | * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
06386bbf | 8 | #include "dm-bio-record.h" |
1da177e4 | 9 | |
1da177e4 LT |
10 | #include <linux/init.h> |
11 | #include <linux/mempool.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/slab.h> | |
1da177e4 | 15 | #include <linux/workqueue.h> |
1f965b19 | 16 | #include <linux/device-mapper.h> |
a765e20e AK |
17 | #include <linux/dm-io.h> |
18 | #include <linux/dm-dirty-log.h> | |
19 | #include <linux/dm-kcopyd.h> | |
1f965b19 | 20 | #include <linux/dm-region-hash.h> |
1da177e4 | 21 | |
72d94861 | 22 | #define DM_MSG_PREFIX "raid1" |
1f965b19 HM |
23 | |
24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ | |
88be163a | 25 | #define DM_IO_PAGES 64 |
1f965b19 | 26 | #define DM_KCOPYD_PAGES 64 |
72d94861 | 27 | |
a8e6afa2 | 28 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
f44db678 | 29 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
a8e6afa2 | 30 | |
33184048 | 31 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); |
1da177e4 | 32 | |
e4c8b3ba NB |
33 | /*----------------------------------------------------------------- |
34 | * Mirror set structures. | |
35 | *---------------------------------------------------------------*/ | |
72f4b314 JB |
36 | enum dm_raid1_error { |
37 | DM_RAID1_WRITE_ERROR, | |
64b30c46 | 38 | DM_RAID1_FLUSH_ERROR, |
72f4b314 JB |
39 | DM_RAID1_SYNC_ERROR, |
40 | DM_RAID1_READ_ERROR | |
41 | }; | |
42 | ||
e4c8b3ba | 43 | struct mirror { |
aa5617c5 | 44 | struct mirror_set *ms; |
e4c8b3ba | 45 | atomic_t error_count; |
39ed7adb | 46 | unsigned long error_type; |
e4c8b3ba NB |
47 | struct dm_dev *dev; |
48 | sector_t offset; | |
49 | }; | |
50 | ||
51 | struct mirror_set { | |
52 | struct dm_target *ti; | |
53 | struct list_head list; | |
1f965b19 | 54 | |
a8e6afa2 | 55 | uint64_t features; |
e4c8b3ba | 56 | |
72f4b314 | 57 | spinlock_t lock; /* protects the lists */ |
e4c8b3ba NB |
58 | struct bio_list reads; |
59 | struct bio_list writes; | |
72f4b314 | 60 | struct bio_list failures; |
e4c8b3ba | 61 | |
1f965b19 HM |
62 | struct dm_region_hash *rh; |
63 | struct dm_kcopyd_client *kcopyd_client; | |
88be163a | 64 | struct dm_io_client *io_client; |
06386bbf | 65 | mempool_t *read_record_pool; |
88be163a | 66 | |
e4c8b3ba NB |
67 | /* recovery */ |
68 | region_t nr_regions; | |
69 | int in_sync; | |
fc1ff958 | 70 | int log_failure; |
b80aa7a0 | 71 | atomic_t suspend; |
e4c8b3ba | 72 | |
72f4b314 | 73 | atomic_t default_mirror; /* Default mirror */ |
e4c8b3ba | 74 | |
6ad36fe2 HS |
75 | struct workqueue_struct *kmirrord_wq; |
76 | struct work_struct kmirrord_work; | |
a2aebe03 MP |
77 | struct timer_list timer; |
78 | unsigned long timer_pending; | |
79 | ||
72f4b314 | 80 | struct work_struct trigger_event; |
6ad36fe2 | 81 | |
1f965b19 | 82 | unsigned nr_mirrors; |
e4c8b3ba NB |
83 | struct mirror mirror[0]; |
84 | }; | |
85 | ||
1f965b19 | 86 | static void wakeup_mirrord(void *context) |
1da177e4 | 87 | { |
1f965b19 | 88 | struct mirror_set *ms = context; |
1da177e4 | 89 | |
6ad36fe2 HS |
90 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); |
91 | } | |
92 | ||
a2aebe03 MP |
93 | static void delayed_wake_fn(unsigned long data) |
94 | { | |
95 | struct mirror_set *ms = (struct mirror_set *) data; | |
96 | ||
97 | clear_bit(0, &ms->timer_pending); | |
1f965b19 | 98 | wakeup_mirrord(ms); |
a2aebe03 MP |
99 | } |
100 | ||
101 | static void delayed_wake(struct mirror_set *ms) | |
102 | { | |
103 | if (test_and_set_bit(0, &ms->timer_pending)) | |
104 | return; | |
105 | ||
106 | ms->timer.expires = jiffies + HZ / 5; | |
107 | ms->timer.data = (unsigned long) ms; | |
108 | ms->timer.function = delayed_wake_fn; | |
109 | add_timer(&ms->timer); | |
110 | } | |
111 | ||
1f965b19 | 112 | static void wakeup_all_recovery_waiters(void *context) |
1da177e4 | 113 | { |
1f965b19 | 114 | wake_up_all(&_kmirrord_recovery_stopped); |
1da177e4 LT |
115 | } |
116 | ||
1f965b19 | 117 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) |
1da177e4 LT |
118 | { |
119 | unsigned long flags; | |
1da177e4 | 120 | int should_wake = 0; |
1f965b19 | 121 | struct bio_list *bl; |
1da177e4 | 122 | |
1f965b19 HM |
123 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; |
124 | spin_lock_irqsave(&ms->lock, flags); | |
125 | should_wake = !(bl->head); | |
126 | bio_list_add(bl, bio); | |
127 | spin_unlock_irqrestore(&ms->lock, flags); | |
1da177e4 LT |
128 | |
129 | if (should_wake) | |
1f965b19 | 130 | wakeup_mirrord(ms); |
1da177e4 LT |
131 | } |
132 | ||
1f965b19 | 133 | static void dispatch_bios(void *context, struct bio_list *bio_list) |
1da177e4 | 134 | { |
1f965b19 HM |
135 | struct mirror_set *ms = context; |
136 | struct bio *bio; | |
1da177e4 | 137 | |
1f965b19 HM |
138 | while ((bio = bio_list_pop(bio_list))) |
139 | queue_bio(ms, bio, WRITE); | |
1da177e4 LT |
140 | } |
141 | ||
06386bbf JB |
142 | #define MIN_READ_RECORDS 20 |
143 | struct dm_raid1_read_record { | |
144 | struct mirror *m; | |
145 | struct dm_bio_details details; | |
146 | }; | |
147 | ||
95f8fac8 MP |
148 | static struct kmem_cache *_dm_raid1_read_record_cache; |
149 | ||
1da177e4 LT |
150 | /* |
151 | * Every mirror should look like this one. | |
152 | */ | |
153 | #define DEFAULT_MIRROR 0 | |
154 | ||
155 | /* | |
06386bbf JB |
156 | * This is yucky. We squirrel the mirror struct away inside |
157 | * bi_next for read/write buffers. This is safe since the bh | |
1da177e4 LT |
158 | * doesn't get submitted to the lower levels of block layer. |
159 | */ | |
06386bbf | 160 | static struct mirror *bio_get_m(struct bio *bio) |
1da177e4 | 161 | { |
06386bbf | 162 | return (struct mirror *) bio->bi_next; |
1da177e4 LT |
163 | } |
164 | ||
06386bbf | 165 | static void bio_set_m(struct bio *bio, struct mirror *m) |
1da177e4 | 166 | { |
06386bbf | 167 | bio->bi_next = (struct bio *) m; |
1da177e4 LT |
168 | } |
169 | ||
72f4b314 JB |
170 | static struct mirror *get_default_mirror(struct mirror_set *ms) |
171 | { | |
172 | return &ms->mirror[atomic_read(&ms->default_mirror)]; | |
173 | } | |
174 | ||
175 | static void set_default_mirror(struct mirror *m) | |
176 | { | |
177 | struct mirror_set *ms = m->ms; | |
178 | struct mirror *m0 = &(ms->mirror[0]); | |
179 | ||
180 | atomic_set(&ms->default_mirror, m - m0); | |
181 | } | |
182 | ||
183 | /* fail_mirror | |
184 | * @m: mirror device to fail | |
185 | * @error_type: one of the enum's, DM_RAID1_*_ERROR | |
186 | * | |
187 | * If errors are being handled, record the type of | |
188 | * error encountered for this device. If this type | |
189 | * of error has already been recorded, we can return; | |
190 | * otherwise, we must signal userspace by triggering | |
191 | * an event. Additionally, if the device is the | |
192 | * primary device, we must choose a new primary, but | |
193 | * only if the mirror is in-sync. | |
194 | * | |
195 | * This function must not block. | |
196 | */ | |
197 | static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) | |
198 | { | |
199 | struct mirror_set *ms = m->ms; | |
200 | struct mirror *new; | |
201 | ||
72f4b314 JB |
202 | /* |
203 | * error_count is used for nothing more than a | |
204 | * simple way to tell if a device has encountered | |
205 | * errors. | |
206 | */ | |
207 | atomic_inc(&m->error_count); | |
208 | ||
209 | if (test_and_set_bit(error_type, &m->error_type)) | |
210 | return; | |
211 | ||
d460c65a JB |
212 | if (!errors_handled(ms)) |
213 | return; | |
214 | ||
72f4b314 JB |
215 | if (m != get_default_mirror(ms)) |
216 | goto out; | |
217 | ||
218 | if (!ms->in_sync) { | |
219 | /* | |
220 | * Better to issue requests to same failing device | |
221 | * than to risk returning corrupt data. | |
222 | */ | |
223 | DMERR("Primary mirror (%s) failed while out-of-sync: " | |
224 | "Reads may fail.", m->dev->name); | |
225 | goto out; | |
226 | } | |
227 | ||
228 | for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) | |
229 | if (!atomic_read(&new->error_count)) { | |
230 | set_default_mirror(new); | |
231 | break; | |
232 | } | |
233 | ||
234 | if (unlikely(new == ms->mirror + ms->nr_mirrors)) | |
235 | DMWARN("All sides of mirror have failed."); | |
236 | ||
237 | out: | |
238 | schedule_work(&ms->trigger_event); | |
239 | } | |
240 | ||
c0da3748 MP |
241 | static int mirror_flush(struct dm_target *ti) |
242 | { | |
243 | struct mirror_set *ms = ti->private; | |
244 | unsigned long error_bits; | |
245 | ||
246 | unsigned int i; | |
247 | struct dm_io_region io[ms->nr_mirrors]; | |
248 | struct mirror *m; | |
249 | struct dm_io_request io_req = { | |
250 | .bi_rw = WRITE_BARRIER, | |
251 | .mem.type = DM_IO_KMEM, | |
252 | .mem.ptr.bvec = NULL, | |
253 | .client = ms->io_client, | |
254 | }; | |
255 | ||
256 | for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { | |
257 | io[i].bdev = m->dev->bdev; | |
258 | io[i].sector = 0; | |
259 | io[i].count = 0; | |
260 | } | |
261 | ||
262 | error_bits = -1; | |
263 | dm_io(&io_req, ms->nr_mirrors, io, &error_bits); | |
264 | if (unlikely(error_bits != 0)) { | |
265 | for (i = 0; i < ms->nr_mirrors; i++) | |
266 | if (test_bit(i, &error_bits)) | |
267 | fail_mirror(ms->mirror + i, | |
64b30c46 | 268 | DM_RAID1_FLUSH_ERROR); |
c0da3748 MP |
269 | return -EIO; |
270 | } | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
1da177e4 LT |
275 | /*----------------------------------------------------------------- |
276 | * Recovery. | |
277 | * | |
278 | * When a mirror is first activated we may find that some regions | |
279 | * are in the no-sync state. We have to recover these by | |
280 | * recopying from the default mirror to all the others. | |
281 | *---------------------------------------------------------------*/ | |
4cdc1d1f | 282 | static void recovery_complete(int read_err, unsigned long write_err, |
1da177e4 LT |
283 | void *context) |
284 | { | |
1f965b19 HM |
285 | struct dm_region *reg = context; |
286 | struct mirror_set *ms = dm_rh_region_context(reg); | |
8f0205b7 | 287 | int m, bit = 0; |
1da177e4 | 288 | |
8f0205b7 | 289 | if (read_err) { |
f44db678 JB |
290 | /* Read error means the failure of default mirror. */ |
291 | DMERR_LIMIT("Unable to read primary mirror during recovery"); | |
8f0205b7 JB |
292 | fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); |
293 | } | |
f44db678 | 294 | |
8f0205b7 | 295 | if (write_err) { |
4cdc1d1f | 296 | DMERR_LIMIT("Write error during recovery (error = 0x%lx)", |
f44db678 | 297 | write_err); |
8f0205b7 JB |
298 | /* |
299 | * Bits correspond to devices (excluding default mirror). | |
300 | * The default mirror cannot change during recovery. | |
301 | */ | |
302 | for (m = 0; m < ms->nr_mirrors; m++) { | |
303 | if (&ms->mirror[m] == get_default_mirror(ms)) | |
304 | continue; | |
305 | if (test_bit(bit, &write_err)) | |
306 | fail_mirror(ms->mirror + m, | |
307 | DM_RAID1_SYNC_ERROR); | |
308 | bit++; | |
309 | } | |
310 | } | |
f44db678 | 311 | |
1f965b19 | 312 | dm_rh_recovery_end(reg, !(read_err || write_err)); |
1da177e4 LT |
313 | } |
314 | ||
1f965b19 | 315 | static int recover(struct mirror_set *ms, struct dm_region *reg) |
1da177e4 LT |
316 | { |
317 | int r; | |
1f965b19 | 318 | unsigned i; |
eb69aca5 | 319 | struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; |
1da177e4 LT |
320 | struct mirror *m; |
321 | unsigned long flags = 0; | |
1f965b19 HM |
322 | region_t key = dm_rh_get_region_key(reg); |
323 | sector_t region_size = dm_rh_get_region_size(ms->rh); | |
1da177e4 LT |
324 | |
325 | /* fill in the source */ | |
72f4b314 | 326 | m = get_default_mirror(ms); |
1da177e4 | 327 | from.bdev = m->dev->bdev; |
1f965b19 HM |
328 | from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); |
329 | if (key == (ms->nr_regions - 1)) { | |
1da177e4 LT |
330 | /* |
331 | * The final region may be smaller than | |
332 | * region_size. | |
333 | */ | |
1f965b19 | 334 | from.count = ms->ti->len & (region_size - 1); |
1da177e4 | 335 | if (!from.count) |
1f965b19 | 336 | from.count = region_size; |
1da177e4 | 337 | } else |
1f965b19 | 338 | from.count = region_size; |
1da177e4 LT |
339 | |
340 | /* fill in the destinations */ | |
341 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | |
72f4b314 | 342 | if (&ms->mirror[i] == get_default_mirror(ms)) |
1da177e4 LT |
343 | continue; |
344 | ||
345 | m = ms->mirror + i; | |
346 | dest->bdev = m->dev->bdev; | |
1f965b19 | 347 | dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); |
1da177e4 LT |
348 | dest->count = from.count; |
349 | dest++; | |
350 | } | |
351 | ||
352 | /* hand to kcopyd */ | |
f7c83e2e JB |
353 | if (!errors_handled(ms)) |
354 | set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); | |
355 | ||
eb69aca5 HM |
356 | r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, |
357 | flags, recovery_complete, reg); | |
1da177e4 LT |
358 | |
359 | return r; | |
360 | } | |
361 | ||
362 | static void do_recovery(struct mirror_set *ms) | |
363 | { | |
1f965b19 HM |
364 | struct dm_region *reg; |
365 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
1da177e4 | 366 | int r; |
1da177e4 LT |
367 | |
368 | /* | |
369 | * Start quiescing some regions. | |
370 | */ | |
1f965b19 | 371 | dm_rh_recovery_prepare(ms->rh); |
1da177e4 LT |
372 | |
373 | /* | |
374 | * Copy any already quiesced regions. | |
375 | */ | |
1f965b19 | 376 | while ((reg = dm_rh_recovery_start(ms->rh))) { |
1da177e4 LT |
377 | r = recover(ms, reg); |
378 | if (r) | |
1f965b19 | 379 | dm_rh_recovery_end(reg, 0); |
1da177e4 LT |
380 | } |
381 | ||
382 | /* | |
383 | * Update the in sync flag. | |
384 | */ | |
385 | if (!ms->in_sync && | |
386 | (log->type->get_sync_count(log) == ms->nr_regions)) { | |
387 | /* the sync is complete */ | |
388 | dm_table_event(ms->ti->table); | |
389 | ms->in_sync = 1; | |
390 | } | |
391 | } | |
392 | ||
393 | /*----------------------------------------------------------------- | |
394 | * Reads | |
395 | *---------------------------------------------------------------*/ | |
396 | static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) | |
397 | { | |
06386bbf JB |
398 | struct mirror *m = get_default_mirror(ms); |
399 | ||
400 | do { | |
401 | if (likely(!atomic_read(&m->error_count))) | |
402 | return m; | |
403 | ||
404 | if (m-- == ms->mirror) | |
405 | m += ms->nr_mirrors; | |
406 | } while (m != get_default_mirror(ms)); | |
407 | ||
408 | return NULL; | |
409 | } | |
410 | ||
411 | static int default_ok(struct mirror *m) | |
412 | { | |
413 | struct mirror *default_mirror = get_default_mirror(m->ms); | |
414 | ||
415 | return !atomic_read(&default_mirror->error_count); | |
416 | } | |
417 | ||
418 | static int mirror_available(struct mirror_set *ms, struct bio *bio) | |
419 | { | |
1f965b19 HM |
420 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
421 | region_t region = dm_rh_bio_to_region(ms->rh, bio); | |
06386bbf | 422 | |
1f965b19 | 423 | if (log->type->in_sync(log, region, 0)) |
06386bbf JB |
424 | return choose_mirror(ms, bio->bi_sector) ? 1 : 0; |
425 | ||
426 | return 0; | |
1da177e4 LT |
427 | } |
428 | ||
429 | /* | |
430 | * remap a buffer to a particular mirror. | |
431 | */ | |
06386bbf JB |
432 | static sector_t map_sector(struct mirror *m, struct bio *bio) |
433 | { | |
4184153f MP |
434 | if (unlikely(!bio->bi_size)) |
435 | return 0; | |
06386bbf JB |
436 | return m->offset + (bio->bi_sector - m->ms->ti->begin); |
437 | } | |
438 | ||
439 | static void map_bio(struct mirror *m, struct bio *bio) | |
1da177e4 LT |
440 | { |
441 | bio->bi_bdev = m->dev->bdev; | |
06386bbf JB |
442 | bio->bi_sector = map_sector(m, bio); |
443 | } | |
444 | ||
22a1ceb1 | 445 | static void map_region(struct dm_io_region *io, struct mirror *m, |
06386bbf JB |
446 | struct bio *bio) |
447 | { | |
448 | io->bdev = m->dev->bdev; | |
449 | io->sector = map_sector(m, bio); | |
450 | io->count = bio->bi_size >> 9; | |
451 | } | |
452 | ||
453 | /*----------------------------------------------------------------- | |
454 | * Reads | |
455 | *---------------------------------------------------------------*/ | |
456 | static void read_callback(unsigned long error, void *context) | |
457 | { | |
458 | struct bio *bio = context; | |
459 | struct mirror *m; | |
460 | ||
461 | m = bio_get_m(bio); | |
462 | bio_set_m(bio, NULL); | |
463 | ||
464 | if (likely(!error)) { | |
465 | bio_endio(bio, 0); | |
466 | return; | |
467 | } | |
468 | ||
469 | fail_mirror(m, DM_RAID1_READ_ERROR); | |
470 | ||
471 | if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { | |
472 | DMWARN_LIMIT("Read failure on mirror device %s. " | |
473 | "Trying alternative device.", | |
474 | m->dev->name); | |
475 | queue_bio(m->ms, bio, bio_rw(bio)); | |
476 | return; | |
477 | } | |
478 | ||
479 | DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", | |
480 | m->dev->name); | |
481 | bio_endio(bio, -EIO); | |
482 | } | |
483 | ||
484 | /* Asynchronous read. */ | |
485 | static void read_async_bio(struct mirror *m, struct bio *bio) | |
486 | { | |
22a1ceb1 | 487 | struct dm_io_region io; |
06386bbf JB |
488 | struct dm_io_request io_req = { |
489 | .bi_rw = READ, | |
490 | .mem.type = DM_IO_BVEC, | |
491 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | |
492 | .notify.fn = read_callback, | |
493 | .notify.context = bio, | |
494 | .client = m->ms->io_client, | |
495 | }; | |
496 | ||
497 | map_region(&io, m, bio); | |
498 | bio_set_m(bio, m); | |
1f965b19 HM |
499 | BUG_ON(dm_io(&io_req, 1, &io, NULL)); |
500 | } | |
501 | ||
502 | static inline int region_in_sync(struct mirror_set *ms, region_t region, | |
503 | int may_block) | |
504 | { | |
505 | int state = dm_rh_get_state(ms->rh, region, may_block); | |
506 | return state == DM_RH_CLEAN || state == DM_RH_DIRTY; | |
1da177e4 LT |
507 | } |
508 | ||
509 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |
510 | { | |
511 | region_t region; | |
512 | struct bio *bio; | |
513 | struct mirror *m; | |
514 | ||
515 | while ((bio = bio_list_pop(reads))) { | |
1f965b19 | 516 | region = dm_rh_bio_to_region(ms->rh, bio); |
06386bbf | 517 | m = get_default_mirror(ms); |
1da177e4 LT |
518 | |
519 | /* | |
520 | * We can only read balance if the region is in sync. | |
521 | */ | |
1f965b19 | 522 | if (likely(region_in_sync(ms, region, 1))) |
1da177e4 | 523 | m = choose_mirror(ms, bio->bi_sector); |
06386bbf JB |
524 | else if (m && atomic_read(&m->error_count)) |
525 | m = NULL; | |
1da177e4 | 526 | |
06386bbf JB |
527 | if (likely(m)) |
528 | read_async_bio(m, bio); | |
529 | else | |
530 | bio_endio(bio, -EIO); | |
1da177e4 LT |
531 | } |
532 | } | |
533 | ||
534 | /*----------------------------------------------------------------- | |
535 | * Writes. | |
536 | * | |
537 | * We do different things with the write io depending on the | |
538 | * state of the region that it's in: | |
539 | * | |
540 | * SYNC: increment pending, use kcopyd to write to *all* mirrors | |
541 | * RECOVERING: delay the io until recovery completes | |
542 | * NOSYNC: increment pending, just write to the default mirror | |
543 | *---------------------------------------------------------------*/ | |
72f4b314 | 544 | |
72f4b314 | 545 | |
1da177e4 LT |
546 | static void write_callback(unsigned long error, void *context) |
547 | { | |
72f4b314 | 548 | unsigned i, ret = 0; |
1da177e4 LT |
549 | struct bio *bio = (struct bio *) context; |
550 | struct mirror_set *ms; | |
72f4b314 JB |
551 | int uptodate = 0; |
552 | int should_wake = 0; | |
553 | unsigned long flags; | |
1da177e4 | 554 | |
06386bbf JB |
555 | ms = bio_get_m(bio)->ms; |
556 | bio_set_m(bio, NULL); | |
1da177e4 LT |
557 | |
558 | /* | |
559 | * NOTE: We don't decrement the pending count here, | |
560 | * instead it is done by the targets endio function. | |
561 | * This way we handle both writes to SYNC and NOSYNC | |
562 | * regions with the same code. | |
563 | */ | |
72f4b314 JB |
564 | if (likely(!error)) |
565 | goto out; | |
1da177e4 | 566 | |
72f4b314 JB |
567 | for (i = 0; i < ms->nr_mirrors; i++) |
568 | if (test_bit(i, &error)) | |
569 | fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); | |
570 | else | |
571 | uptodate = 1; | |
572 | ||
573 | if (unlikely(!uptodate)) { | |
574 | DMERR("All replicated volumes dead, failing I/O"); | |
575 | /* None of the writes succeeded, fail the I/O. */ | |
576 | ret = -EIO; | |
577 | } else if (errors_handled(ms)) { | |
1da177e4 | 578 | /* |
72f4b314 JB |
579 | * Need to raise event. Since raising |
580 | * events can block, we need to do it in | |
581 | * the main thread. | |
1da177e4 | 582 | */ |
72f4b314 JB |
583 | spin_lock_irqsave(&ms->lock, flags); |
584 | if (!ms->failures.head) | |
585 | should_wake = 1; | |
586 | bio_list_add(&ms->failures, bio); | |
587 | spin_unlock_irqrestore(&ms->lock, flags); | |
588 | if (should_wake) | |
1f965b19 | 589 | wakeup_mirrord(ms); |
72f4b314 | 590 | return; |
1da177e4 | 591 | } |
72f4b314 JB |
592 | out: |
593 | bio_endio(bio, ret); | |
1da177e4 LT |
594 | } |
595 | ||
596 | static void do_write(struct mirror_set *ms, struct bio *bio) | |
597 | { | |
598 | unsigned int i; | |
22a1ceb1 | 599 | struct dm_io_region io[ms->nr_mirrors], *dest = io; |
1da177e4 | 600 | struct mirror *m; |
88be163a | 601 | struct dm_io_request io_req = { |
4184153f | 602 | .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), |
88be163a MB |
603 | .mem.type = DM_IO_BVEC, |
604 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | |
605 | .notify.fn = write_callback, | |
606 | .notify.context = bio, | |
607 | .client = ms->io_client, | |
608 | }; | |
1da177e4 | 609 | |
06386bbf JB |
610 | for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) |
611 | map_region(dest++, m, bio); | |
1da177e4 | 612 | |
06386bbf JB |
613 | /* |
614 | * Use default mirror because we only need it to retrieve the reference | |
615 | * to the mirror set in write_callback(). | |
616 | */ | |
617 | bio_set_m(bio, get_default_mirror(ms)); | |
88be163a | 618 | |
1f965b19 | 619 | BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); |
1da177e4 LT |
620 | } |
621 | ||
622 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |
623 | { | |
624 | int state; | |
625 | struct bio *bio; | |
626 | struct bio_list sync, nosync, recover, *this_list = NULL; | |
7513c2a7 JB |
627 | struct bio_list requeue; |
628 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | |
629 | region_t region; | |
1da177e4 LT |
630 | |
631 | if (!writes->head) | |
632 | return; | |
633 | ||
634 | /* | |
635 | * Classify each write. | |
636 | */ | |
637 | bio_list_init(&sync); | |
638 | bio_list_init(&nosync); | |
639 | bio_list_init(&recover); | |
7513c2a7 | 640 | bio_list_init(&requeue); |
1da177e4 LT |
641 | |
642 | while ((bio = bio_list_pop(writes))) { | |
4184153f MP |
643 | if (unlikely(bio_empty_barrier(bio))) { |
644 | bio_list_add(&sync, bio); | |
645 | continue; | |
646 | } | |
647 | ||
7513c2a7 JB |
648 | region = dm_rh_bio_to_region(ms->rh, bio); |
649 | ||
650 | if (log->type->is_remote_recovering && | |
651 | log->type->is_remote_recovering(log, region)) { | |
652 | bio_list_add(&requeue, bio); | |
653 | continue; | |
654 | } | |
655 | ||
656 | state = dm_rh_get_state(ms->rh, region, 1); | |
1da177e4 | 657 | switch (state) { |
1f965b19 HM |
658 | case DM_RH_CLEAN: |
659 | case DM_RH_DIRTY: | |
1da177e4 LT |
660 | this_list = &sync; |
661 | break; | |
662 | ||
1f965b19 | 663 | case DM_RH_NOSYNC: |
1da177e4 LT |
664 | this_list = &nosync; |
665 | break; | |
666 | ||
1f965b19 | 667 | case DM_RH_RECOVERING: |
1da177e4 LT |
668 | this_list = &recover; |
669 | break; | |
670 | } | |
671 | ||
672 | bio_list_add(this_list, bio); | |
673 | } | |
674 | ||
7513c2a7 JB |
675 | /* |
676 | * Add bios that are delayed due to remote recovery | |
677 | * back on to the write queue | |
678 | */ | |
679 | if (unlikely(requeue.head)) { | |
680 | spin_lock_irq(&ms->lock); | |
681 | bio_list_merge(&ms->writes, &requeue); | |
682 | spin_unlock_irq(&ms->lock); | |
69885683 | 683 | delayed_wake(ms); |
7513c2a7 JB |
684 | } |
685 | ||
1da177e4 LT |
686 | /* |
687 | * Increment the pending counts for any regions that will | |
688 | * be written to (writes to recover regions are going to | |
689 | * be delayed). | |
690 | */ | |
1f965b19 HM |
691 | dm_rh_inc_pending(ms->rh, &sync); |
692 | dm_rh_inc_pending(ms->rh, &nosync); | |
d2b69864 JB |
693 | |
694 | /* | |
695 | * If the flush fails on a previous call and succeeds here, | |
696 | * we must not reset the log_failure variable. We need | |
697 | * userspace interaction to do that. | |
698 | */ | |
699 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; | |
1da177e4 LT |
700 | |
701 | /* | |
702 | * Dispatch io. | |
703 | */ | |
b80aa7a0 JB |
704 | if (unlikely(ms->log_failure)) { |
705 | spin_lock_irq(&ms->lock); | |
706 | bio_list_merge(&ms->failures, &sync); | |
707 | spin_unlock_irq(&ms->lock); | |
1f965b19 | 708 | wakeup_mirrord(ms); |
b80aa7a0 | 709 | } else |
fc1ff958 | 710 | while ((bio = bio_list_pop(&sync))) |
b80aa7a0 | 711 | do_write(ms, bio); |
1da177e4 LT |
712 | |
713 | while ((bio = bio_list_pop(&recover))) | |
1f965b19 | 714 | dm_rh_delay(ms->rh, bio); |
1da177e4 LT |
715 | |
716 | while ((bio = bio_list_pop(&nosync))) { | |
06386bbf | 717 | map_bio(get_default_mirror(ms), bio); |
1da177e4 LT |
718 | generic_make_request(bio); |
719 | } | |
720 | } | |
721 | ||
72f4b314 JB |
722 | static void do_failures(struct mirror_set *ms, struct bio_list *failures) |
723 | { | |
724 | struct bio *bio; | |
725 | ||
726 | if (!failures->head) | |
727 | return; | |
728 | ||
b80aa7a0 | 729 | if (!ms->log_failure) { |
b34578a4 | 730 | while ((bio = bio_list_pop(failures))) { |
1f965b19 HM |
731 | ms->in_sync = 0; |
732 | dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); | |
b34578a4 | 733 | } |
b80aa7a0 JB |
734 | return; |
735 | } | |
736 | ||
737 | /* | |
738 | * If the log has failed, unattempted writes are being | |
739 | * put on the failures list. We can't issue those writes | |
740 | * until a log has been marked, so we must store them. | |
741 | * | |
742 | * If a 'noflush' suspend is in progress, we can requeue | |
743 | * the I/O's to the core. This give userspace a chance | |
744 | * to reconfigure the mirror, at which point the core | |
745 | * will reissue the writes. If the 'noflush' flag is | |
746 | * not set, we have no choice but to return errors. | |
747 | * | |
748 | * Some writes on the failures list may have been | |
749 | * submitted before the log failure and represent a | |
750 | * failure to write to one of the devices. It is ok | |
751 | * for us to treat them the same and requeue them | |
752 | * as well. | |
753 | */ | |
754 | if (dm_noflush_suspending(ms->ti)) { | |
755 | while ((bio = bio_list_pop(failures))) | |
756 | bio_endio(bio, DM_ENDIO_REQUEUE); | |
757 | return; | |
758 | } | |
759 | ||
760 | if (atomic_read(&ms->suspend)) { | |
761 | while ((bio = bio_list_pop(failures))) | |
762 | bio_endio(bio, -EIO); | |
763 | return; | |
764 | } | |
765 | ||
766 | spin_lock_irq(&ms->lock); | |
767 | bio_list_merge(&ms->failures, failures); | |
768 | spin_unlock_irq(&ms->lock); | |
769 | ||
a2aebe03 | 770 | delayed_wake(ms); |
72f4b314 JB |
771 | } |
772 | ||
773 | static void trigger_event(struct work_struct *work) | |
774 | { | |
775 | struct mirror_set *ms = | |
776 | container_of(work, struct mirror_set, trigger_event); | |
777 | ||
778 | dm_table_event(ms->ti->table); | |
779 | } | |
780 | ||
1da177e4 LT |
781 | /*----------------------------------------------------------------- |
782 | * kmirrord | |
783 | *---------------------------------------------------------------*/ | |
a2aebe03 | 784 | static void do_mirror(struct work_struct *work) |
1da177e4 | 785 | { |
1f965b19 HM |
786 | struct mirror_set *ms = container_of(work, struct mirror_set, |
787 | kmirrord_work); | |
72f4b314 JB |
788 | struct bio_list reads, writes, failures; |
789 | unsigned long flags; | |
1da177e4 | 790 | |
72f4b314 | 791 | spin_lock_irqsave(&ms->lock, flags); |
1da177e4 LT |
792 | reads = ms->reads; |
793 | writes = ms->writes; | |
72f4b314 | 794 | failures = ms->failures; |
1da177e4 LT |
795 | bio_list_init(&ms->reads); |
796 | bio_list_init(&ms->writes); | |
72f4b314 JB |
797 | bio_list_init(&ms->failures); |
798 | spin_unlock_irqrestore(&ms->lock, flags); | |
1da177e4 | 799 | |
1f965b19 | 800 | dm_rh_update_states(ms->rh, errors_handled(ms)); |
1da177e4 LT |
801 | do_recovery(ms); |
802 | do_reads(ms, &reads); | |
803 | do_writes(ms, &writes); | |
72f4b314 | 804 | do_failures(ms, &failures); |
7ff14a36 MP |
805 | |
806 | dm_table_unplug_all(ms->ti->table); | |
1da177e4 LT |
807 | } |
808 | ||
1da177e4 LT |
809 | /*----------------------------------------------------------------- |
810 | * Target functions | |
811 | *---------------------------------------------------------------*/ | |
812 | static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |
813 | uint32_t region_size, | |
814 | struct dm_target *ti, | |
416cd17b | 815 | struct dm_dirty_log *dl) |
1da177e4 LT |
816 | { |
817 | size_t len; | |
818 | struct mirror_set *ms = NULL; | |
819 | ||
1da177e4 LT |
820 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); |
821 | ||
dd00cc48 | 822 | ms = kzalloc(len, GFP_KERNEL); |
1da177e4 | 823 | if (!ms) { |
72d94861 | 824 | ti->error = "Cannot allocate mirror context"; |
1da177e4 LT |
825 | return NULL; |
826 | } | |
827 | ||
1da177e4 LT |
828 | spin_lock_init(&ms->lock); |
829 | ||
830 | ms->ti = ti; | |
831 | ms->nr_mirrors = nr_mirrors; | |
832 | ms->nr_regions = dm_sector_div_up(ti->len, region_size); | |
833 | ms->in_sync = 0; | |
b80aa7a0 JB |
834 | ms->log_failure = 0; |
835 | atomic_set(&ms->suspend, 0); | |
72f4b314 | 836 | atomic_set(&ms->default_mirror, DEFAULT_MIRROR); |
1da177e4 | 837 | |
95f8fac8 MP |
838 | ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, |
839 | _dm_raid1_read_record_cache); | |
840 | ||
06386bbf JB |
841 | if (!ms->read_record_pool) { |
842 | ti->error = "Error creating mirror read_record_pool"; | |
843 | kfree(ms); | |
844 | return NULL; | |
845 | } | |
846 | ||
88be163a MB |
847 | ms->io_client = dm_io_client_create(DM_IO_PAGES); |
848 | if (IS_ERR(ms->io_client)) { | |
849 | ti->error = "Error creating dm_io client"; | |
06386bbf | 850 | mempool_destroy(ms->read_record_pool); |
88be163a MB |
851 | kfree(ms); |
852 | return NULL; | |
853 | } | |
854 | ||
1f965b19 HM |
855 | ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, |
856 | wakeup_all_recovery_waiters, | |
857 | ms->ti->begin, MAX_RECOVERY, | |
858 | dl, region_size, ms->nr_regions); | |
859 | if (IS_ERR(ms->rh)) { | |
72d94861 | 860 | ti->error = "Error creating dirty region hash"; |
a72cf737 | 861 | dm_io_client_destroy(ms->io_client); |
06386bbf | 862 | mempool_destroy(ms->read_record_pool); |
1da177e4 LT |
863 | kfree(ms); |
864 | return NULL; | |
865 | } | |
866 | ||
867 | return ms; | |
868 | } | |
869 | ||
870 | static void free_context(struct mirror_set *ms, struct dm_target *ti, | |
871 | unsigned int m) | |
872 | { | |
873 | while (m--) | |
874 | dm_put_device(ti, ms->mirror[m].dev); | |
875 | ||
88be163a | 876 | dm_io_client_destroy(ms->io_client); |
1f965b19 | 877 | dm_region_hash_destroy(ms->rh); |
06386bbf | 878 | mempool_destroy(ms->read_record_pool); |
1da177e4 LT |
879 | kfree(ms); |
880 | } | |
881 | ||
1da177e4 LT |
882 | static int get_mirror(struct mirror_set *ms, struct dm_target *ti, |
883 | unsigned int mirror, char **argv) | |
884 | { | |
4ee218cd | 885 | unsigned long long offset; |
1da177e4 | 886 | |
4ee218cd | 887 | if (sscanf(argv[1], "%llu", &offset) != 1) { |
72d94861 | 888 | ti->error = "Invalid offset"; |
1da177e4 LT |
889 | return -EINVAL; |
890 | } | |
891 | ||
892 | if (dm_get_device(ti, argv[0], offset, ti->len, | |
893 | dm_table_get_mode(ti->table), | |
894 | &ms->mirror[mirror].dev)) { | |
72d94861 | 895 | ti->error = "Device lookup failure"; |
1da177e4 LT |
896 | return -ENXIO; |
897 | } | |
898 | ||
aa5617c5 | 899 | ms->mirror[mirror].ms = ms; |
72f4b314 JB |
900 | atomic_set(&(ms->mirror[mirror].error_count), 0); |
901 | ms->mirror[mirror].error_type = 0; | |
1da177e4 LT |
902 | ms->mirror[mirror].offset = offset; |
903 | ||
904 | return 0; | |
905 | } | |
906 | ||
1da177e4 LT |
907 | /* |
908 | * Create dirty log: log_type #log_params <log_params> | |
909 | */ | |
416cd17b | 910 | static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, |
1f965b19 HM |
911 | unsigned argc, char **argv, |
912 | unsigned *args_used) | |
1da177e4 | 913 | { |
1f965b19 | 914 | unsigned param_count; |
416cd17b | 915 | struct dm_dirty_log *dl; |
1da177e4 LT |
916 | |
917 | if (argc < 2) { | |
72d94861 | 918 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
919 | return NULL; |
920 | } | |
921 | ||
922 | if (sscanf(argv[1], "%u", ¶m_count) != 1) { | |
72d94861 | 923 | ti->error = "Invalid mirror log argument count"; |
1da177e4 LT |
924 | return NULL; |
925 | } | |
926 | ||
927 | *args_used = 2 + param_count; | |
928 | ||
929 | if (argc < *args_used) { | |
72d94861 | 930 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
931 | return NULL; |
932 | } | |
933 | ||
c0da3748 MP |
934 | dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, |
935 | argv + 2); | |
1da177e4 | 936 | if (!dl) { |
72d94861 | 937 | ti->error = "Error creating mirror dirty log"; |
1da177e4 LT |
938 | return NULL; |
939 | } | |
940 | ||
1da177e4 LT |
941 | return dl; |
942 | } | |
943 | ||
a8e6afa2 JB |
944 | static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, |
945 | unsigned *args_used) | |
946 | { | |
947 | unsigned num_features; | |
948 | struct dm_target *ti = ms->ti; | |
949 | ||
950 | *args_used = 0; | |
951 | ||
952 | if (!argc) | |
953 | return 0; | |
954 | ||
955 | if (sscanf(argv[0], "%u", &num_features) != 1) { | |
956 | ti->error = "Invalid number of features"; | |
957 | return -EINVAL; | |
958 | } | |
959 | ||
960 | argc--; | |
961 | argv++; | |
962 | (*args_used)++; | |
963 | ||
964 | if (num_features > argc) { | |
965 | ti->error = "Not enough arguments to support feature count"; | |
966 | return -EINVAL; | |
967 | } | |
968 | ||
969 | if (!strcmp("handle_errors", argv[0])) | |
970 | ms->features |= DM_RAID1_HANDLE_ERRORS; | |
971 | else { | |
972 | ti->error = "Unrecognised feature requested"; | |
973 | return -EINVAL; | |
974 | } | |
975 | ||
976 | (*args_used)++; | |
977 | ||
978 | return 0; | |
979 | } | |
980 | ||
1da177e4 LT |
981 | /* |
982 | * Construct a mirror mapping: | |
983 | * | |
984 | * log_type #log_params <log_params> | |
985 | * #mirrors [mirror_path offset]{2,} | |
a8e6afa2 | 986 | * [#features <features>] |
1da177e4 LT |
987 | * |
988 | * log_type is "core" or "disk" | |
989 | * #log_params is between 1 and 3 | |
a8e6afa2 JB |
990 | * |
991 | * If present, features must be "handle_errors". | |
1da177e4 | 992 | */ |
1da177e4 LT |
993 | static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
994 | { | |
995 | int r; | |
996 | unsigned int nr_mirrors, m, args_used; | |
997 | struct mirror_set *ms; | |
416cd17b | 998 | struct dm_dirty_log *dl; |
1da177e4 LT |
999 | |
1000 | dl = create_dirty_log(ti, argc, argv, &args_used); | |
1001 | if (!dl) | |
1002 | return -EINVAL; | |
1003 | ||
1004 | argv += args_used; | |
1005 | argc -= args_used; | |
1006 | ||
1007 | if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || | |
eb69aca5 | 1008 | nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { |
72d94861 | 1009 | ti->error = "Invalid number of mirrors"; |
416cd17b | 1010 | dm_dirty_log_destroy(dl); |
1da177e4 LT |
1011 | return -EINVAL; |
1012 | } | |
1013 | ||
1014 | argv++, argc--; | |
1015 | ||
a8e6afa2 JB |
1016 | if (argc < nr_mirrors * 2) { |
1017 | ti->error = "Too few mirror arguments"; | |
416cd17b | 1018 | dm_dirty_log_destroy(dl); |
1da177e4 LT |
1019 | return -EINVAL; |
1020 | } | |
1021 | ||
1022 | ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); | |
1023 | if (!ms) { | |
416cd17b | 1024 | dm_dirty_log_destroy(dl); |
1da177e4 LT |
1025 | return -ENOMEM; |
1026 | } | |
1027 | ||
1028 | /* Get the mirror parameter sets */ | |
1029 | for (m = 0; m < nr_mirrors; m++) { | |
1030 | r = get_mirror(ms, ti, m, argv); | |
1031 | if (r) { | |
1032 | free_context(ms, ti, m); | |
1033 | return r; | |
1034 | } | |
1035 | argv += 2; | |
1036 | argc -= 2; | |
1037 | } | |
1038 | ||
1039 | ti->private = ms; | |
1f965b19 | 1040 | ti->split_io = dm_rh_get_region_size(ms->rh); |
4184153f | 1041 | ti->num_flush_requests = 1; |
1da177e4 | 1042 | |
6ad36fe2 HS |
1043 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1044 | if (!ms->kmirrord_wq) { | |
1045 | DMERR("couldn't start kmirrord"); | |
a72cf737 DM |
1046 | r = -ENOMEM; |
1047 | goto err_free_context; | |
6ad36fe2 HS |
1048 | } |
1049 | INIT_WORK(&ms->kmirrord_work, do_mirror); | |
a2aebe03 MP |
1050 | init_timer(&ms->timer); |
1051 | ms->timer_pending = 0; | |
72f4b314 | 1052 | INIT_WORK(&ms->trigger_event, trigger_event); |
6ad36fe2 | 1053 | |
a8e6afa2 | 1054 | r = parse_features(ms, argc, argv, &args_used); |
a72cf737 DM |
1055 | if (r) |
1056 | goto err_destroy_wq; | |
a8e6afa2 JB |
1057 | |
1058 | argv += args_used; | |
1059 | argc -= args_used; | |
1060 | ||
f44db678 JB |
1061 | /* |
1062 | * Any read-balancing addition depends on the | |
1063 | * DM_RAID1_HANDLE_ERRORS flag being present. | |
1064 | * This is because the decision to balance depends | |
1065 | * on the sync state of a region. If the above | |
1066 | * flag is not present, we ignore errors; and | |
1067 | * the sync state may be inaccurate. | |
1068 | */ | |
1069 | ||
a8e6afa2 JB |
1070 | if (argc) { |
1071 | ti->error = "Too many mirror arguments"; | |
a72cf737 DM |
1072 | r = -EINVAL; |
1073 | goto err_destroy_wq; | |
a8e6afa2 JB |
1074 | } |
1075 | ||
1f965b19 | 1076 | r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); |
a72cf737 DM |
1077 | if (r) |
1078 | goto err_destroy_wq; | |
1da177e4 | 1079 | |
1f965b19 | 1080 | wakeup_mirrord(ms); |
1da177e4 | 1081 | return 0; |
a72cf737 DM |
1082 | |
1083 | err_destroy_wq: | |
1084 | destroy_workqueue(ms->kmirrord_wq); | |
1085 | err_free_context: | |
1086 | free_context(ms, ti, ms->nr_mirrors); | |
1087 | return r; | |
1da177e4 LT |
1088 | } |
1089 | ||
1090 | static void mirror_dtr(struct dm_target *ti) | |
1091 | { | |
1092 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1093 | ||
a2aebe03 | 1094 | del_timer_sync(&ms->timer); |
6ad36fe2 | 1095 | flush_workqueue(ms->kmirrord_wq); |
18776c73 | 1096 | flush_scheduled_work(); |
eb69aca5 | 1097 | dm_kcopyd_client_destroy(ms->kcopyd_client); |
6ad36fe2 | 1098 | destroy_workqueue(ms->kmirrord_wq); |
1da177e4 LT |
1099 | free_context(ms, ti, ms->nr_mirrors); |
1100 | } | |
1101 | ||
1da177e4 LT |
1102 | /* |
1103 | * Mirror mapping function | |
1104 | */ | |
1105 | static int mirror_map(struct dm_target *ti, struct bio *bio, | |
1106 | union map_info *map_context) | |
1107 | { | |
1108 | int r, rw = bio_rw(bio); | |
1109 | struct mirror *m; | |
1110 | struct mirror_set *ms = ti->private; | |
06386bbf | 1111 | struct dm_raid1_read_record *read_record = NULL; |
1f965b19 | 1112 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1da177e4 LT |
1113 | |
1114 | if (rw == WRITE) { | |
06386bbf | 1115 | /* Save region for mirror_end_io() handler */ |
1f965b19 | 1116 | map_context->ll = dm_rh_bio_to_region(ms->rh, bio); |
1da177e4 | 1117 | queue_bio(ms, bio, rw); |
d2a7ad29 | 1118 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1119 | } |
1120 | ||
1f965b19 | 1121 | r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); |
1da177e4 LT |
1122 | if (r < 0 && r != -EWOULDBLOCK) |
1123 | return r; | |
1124 | ||
1da177e4 | 1125 | /* |
06386bbf | 1126 | * If region is not in-sync queue the bio. |
1da177e4 | 1127 | */ |
06386bbf JB |
1128 | if (!r || (r == -EWOULDBLOCK)) { |
1129 | if (rw == READA) | |
1130 | return -EWOULDBLOCK; | |
1da177e4 | 1131 | |
1da177e4 | 1132 | queue_bio(ms, bio, rw); |
d2a7ad29 | 1133 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1134 | } |
1135 | ||
06386bbf JB |
1136 | /* |
1137 | * The region is in-sync and we can perform reads directly. | |
1138 | * Store enough information so we can retry if it fails. | |
1139 | */ | |
1da177e4 | 1140 | m = choose_mirror(ms, bio->bi_sector); |
06386bbf | 1141 | if (unlikely(!m)) |
1da177e4 LT |
1142 | return -EIO; |
1143 | ||
06386bbf JB |
1144 | read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); |
1145 | if (likely(read_record)) { | |
1146 | dm_bio_record(&read_record->details, bio); | |
1147 | map_context->ptr = read_record; | |
1148 | read_record->m = m; | |
1149 | } | |
1150 | ||
1151 | map_bio(m, bio); | |
1152 | ||
d2a7ad29 | 1153 | return DM_MAPIO_REMAPPED; |
1da177e4 LT |
1154 | } |
1155 | ||
1156 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |
1157 | int error, union map_info *map_context) | |
1158 | { | |
1159 | int rw = bio_rw(bio); | |
1160 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
06386bbf JB |
1161 | struct mirror *m = NULL; |
1162 | struct dm_bio_details *bd = NULL; | |
1163 | struct dm_raid1_read_record *read_record = map_context->ptr; | |
1da177e4 LT |
1164 | |
1165 | /* | |
1166 | * We need to dec pending if this was a write. | |
1167 | */ | |
06386bbf | 1168 | if (rw == WRITE) { |
4184153f MP |
1169 | if (likely(!bio_empty_barrier(bio))) |
1170 | dm_rh_dec(ms->rh, map_context->ll); | |
06386bbf JB |
1171 | return error; |
1172 | } | |
1da177e4 | 1173 | |
06386bbf JB |
1174 | if (error == -EOPNOTSUPP) |
1175 | goto out; | |
1176 | ||
1f98a13f | 1177 | if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) |
06386bbf JB |
1178 | goto out; |
1179 | ||
1180 | if (unlikely(error)) { | |
1181 | if (!read_record) { | |
1182 | /* | |
1183 | * There wasn't enough memory to record necessary | |
1184 | * information for a retry or there was no other | |
1185 | * mirror in-sync. | |
1186 | */ | |
e03f1a84 | 1187 | DMERR_LIMIT("Mirror read failed."); |
06386bbf JB |
1188 | return -EIO; |
1189 | } | |
e03f1a84 AB |
1190 | |
1191 | m = read_record->m; | |
1192 | ||
06386bbf JB |
1193 | DMERR("Mirror read failed from %s. Trying alternative device.", |
1194 | m->dev->name); | |
1195 | ||
06386bbf JB |
1196 | fail_mirror(m, DM_RAID1_READ_ERROR); |
1197 | ||
1198 | /* | |
1199 | * A failed read is requeued for another attempt using an intact | |
1200 | * mirror. | |
1201 | */ | |
1202 | if (default_ok(m) || mirror_available(ms, bio)) { | |
1203 | bd = &read_record->details; | |
1204 | ||
1205 | dm_bio_restore(bd, bio); | |
1206 | mempool_free(read_record, ms->read_record_pool); | |
1207 | map_context->ptr = NULL; | |
1208 | queue_bio(ms, bio, rw); | |
1209 | return 1; | |
1210 | } | |
1211 | DMERR("All replicated volumes dead, failing I/O"); | |
1212 | } | |
1213 | ||
1214 | out: | |
1215 | if (read_record) { | |
1216 | mempool_free(read_record, ms->read_record_pool); | |
1217 | map_context->ptr = NULL; | |
1218 | } | |
1219 | ||
1220 | return error; | |
1da177e4 LT |
1221 | } |
1222 | ||
b80aa7a0 | 1223 | static void mirror_presuspend(struct dm_target *ti) |
1da177e4 LT |
1224 | { |
1225 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1f965b19 | 1226 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1da177e4 | 1227 | |
b80aa7a0 JB |
1228 | atomic_set(&ms->suspend, 1); |
1229 | ||
1230 | /* | |
1231 | * We must finish up all the work that we've | |
1232 | * generated (i.e. recovery work). | |
1233 | */ | |
1f965b19 | 1234 | dm_rh_stop_recovery(ms->rh); |
33184048 | 1235 | |
33184048 | 1236 | wait_event(_kmirrord_recovery_stopped, |
1f965b19 | 1237 | !dm_rh_recovery_in_flight(ms->rh)); |
33184048 | 1238 | |
b80aa7a0 JB |
1239 | if (log->type->presuspend && log->type->presuspend(log)) |
1240 | /* FIXME: need better error handling */ | |
1241 | DMWARN("log presuspend failed"); | |
1242 | ||
1243 | /* | |
1244 | * Now that recovery is complete/stopped and the | |
1245 | * delayed bios are queued, we need to wait for | |
1246 | * the worker thread to complete. This way, | |
1247 | * we know that all of our I/O has been pushed. | |
1248 | */ | |
1249 | flush_workqueue(ms->kmirrord_wq); | |
1250 | } | |
1251 | ||
1252 | static void mirror_postsuspend(struct dm_target *ti) | |
1253 | { | |
1254 | struct mirror_set *ms = ti->private; | |
1f965b19 | 1255 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
b80aa7a0 | 1256 | |
6b3df0d7 | 1257 | if (log->type->postsuspend && log->type->postsuspend(log)) |
1da177e4 | 1258 | /* FIXME: need better error handling */ |
b80aa7a0 | 1259 | DMWARN("log postsuspend failed"); |
1da177e4 LT |
1260 | } |
1261 | ||
1262 | static void mirror_resume(struct dm_target *ti) | |
1263 | { | |
b80aa7a0 | 1264 | struct mirror_set *ms = ti->private; |
1f965b19 | 1265 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
b80aa7a0 JB |
1266 | |
1267 | atomic_set(&ms->suspend, 0); | |
1da177e4 LT |
1268 | if (log->type->resume && log->type->resume(log)) |
1269 | /* FIXME: need better error handling */ | |
1270 | DMWARN("log resume failed"); | |
1f965b19 | 1271 | dm_rh_start_recovery(ms->rh); |
1da177e4 LT |
1272 | } |
1273 | ||
af195ac8 JB |
1274 | /* |
1275 | * device_status_char | |
1276 | * @m: mirror device/leg we want the status of | |
1277 | * | |
1278 | * We return one character representing the most severe error | |
1279 | * we have encountered. | |
1280 | * A => Alive - No failures | |
1281 | * D => Dead - A write failure occurred leaving mirror out-of-sync | |
1282 | * S => Sync - A sychronization failure occurred, mirror out-of-sync | |
1283 | * R => Read - A read failure occurred, mirror data unaffected | |
1284 | * | |
1285 | * Returns: <char> | |
1286 | */ | |
1287 | static char device_status_char(struct mirror *m) | |
1288 | { | |
1289 | if (!atomic_read(&(m->error_count))) | |
1290 | return 'A'; | |
1291 | ||
64b30c46 MP |
1292 | return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : |
1293 | (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : | |
af195ac8 JB |
1294 | (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : |
1295 | (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; | |
1296 | } | |
1297 | ||
1298 | ||
1da177e4 LT |
1299 | static int mirror_status(struct dm_target *ti, status_type_t type, |
1300 | char *result, unsigned int maxlen) | |
1301 | { | |
315dcc22 | 1302 | unsigned int m, sz = 0; |
1da177e4 | 1303 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
1f965b19 | 1304 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
af195ac8 | 1305 | char buffer[ms->nr_mirrors + 1]; |
1da177e4 | 1306 | |
1da177e4 LT |
1307 | switch (type) { |
1308 | case STATUSTYPE_INFO: | |
1309 | DMEMIT("%d ", ms->nr_mirrors); | |
af195ac8 | 1310 | for (m = 0; m < ms->nr_mirrors; m++) { |
1da177e4 | 1311 | DMEMIT("%s ", ms->mirror[m].dev->name); |
af195ac8 JB |
1312 | buffer[m] = device_status_char(&(ms->mirror[m])); |
1313 | } | |
1314 | buffer[m] = '\0'; | |
1da177e4 | 1315 | |
af195ac8 | 1316 | DMEMIT("%llu/%llu 1 %s ", |
1f965b19 | 1317 | (unsigned long long)log->type->get_sync_count(log), |
af195ac8 | 1318 | (unsigned long long)ms->nr_regions, buffer); |
315dcc22 | 1319 | |
1f965b19 | 1320 | sz += log->type->status(log, type, result+sz, maxlen-sz); |
315dcc22 | 1321 | |
1da177e4 LT |
1322 | break; |
1323 | ||
1324 | case STATUSTYPE_TABLE: | |
1f965b19 | 1325 | sz = log->type->status(log, type, result, maxlen); |
315dcc22 | 1326 | |
e52b8f6d | 1327 | DMEMIT("%d", ms->nr_mirrors); |
1da177e4 | 1328 | for (m = 0; m < ms->nr_mirrors; m++) |
e52b8f6d | 1329 | DMEMIT(" %s %llu", ms->mirror[m].dev->name, |
b80aa7a0 | 1330 | (unsigned long long)ms->mirror[m].offset); |
a8e6afa2 JB |
1331 | |
1332 | if (ms->features & DM_RAID1_HANDLE_ERRORS) | |
1333 | DMEMIT(" 1 handle_errors"); | |
1da177e4 LT |
1334 | } |
1335 | ||
1336 | return 0; | |
1337 | } | |
1338 | ||
af4874e0 MS |
1339 | static int mirror_iterate_devices(struct dm_target *ti, |
1340 | iterate_devices_callout_fn fn, void *data) | |
1341 | { | |
1342 | struct mirror_set *ms = ti->private; | |
1343 | int ret = 0; | |
1344 | unsigned i; | |
1345 | ||
1346 | for (i = 0; !ret && i < ms->nr_mirrors; i++) | |
1347 | ret = fn(ti, ms->mirror[i].dev, | |
5dea271b | 1348 | ms->mirror[i].offset, ti->len, data); |
af4874e0 MS |
1349 | |
1350 | return ret; | |
1351 | } | |
1352 | ||
1da177e4 LT |
1353 | static struct target_type mirror_target = { |
1354 | .name = "mirror", | |
af4874e0 | 1355 | .version = {1, 12, 0}, |
1da177e4 LT |
1356 | .module = THIS_MODULE, |
1357 | .ctr = mirror_ctr, | |
1358 | .dtr = mirror_dtr, | |
1359 | .map = mirror_map, | |
1360 | .end_io = mirror_end_io, | |
b80aa7a0 | 1361 | .presuspend = mirror_presuspend, |
1da177e4 LT |
1362 | .postsuspend = mirror_postsuspend, |
1363 | .resume = mirror_resume, | |
1364 | .status = mirror_status, | |
af4874e0 | 1365 | .iterate_devices = mirror_iterate_devices, |
1da177e4 LT |
1366 | }; |
1367 | ||
1368 | static int __init dm_mirror_init(void) | |
1369 | { | |
1370 | int r; | |
1371 | ||
95f8fac8 MP |
1372 | _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); |
1373 | if (!_dm_raid1_read_record_cache) { | |
1374 | DMERR("Can't allocate dm_raid1_read_record cache"); | |
1375 | r = -ENOMEM; | |
1376 | goto bad_cache; | |
1377 | } | |
1378 | ||
1da177e4 | 1379 | r = dm_register_target(&mirror_target); |
95f8fac8 | 1380 | if (r < 0) { |
0cd33124 | 1381 | DMERR("Failed to register mirror target"); |
95f8fac8 MP |
1382 | goto bad_target; |
1383 | } | |
1384 | ||
1385 | return 0; | |
1da177e4 | 1386 | |
95f8fac8 MP |
1387 | bad_target: |
1388 | kmem_cache_destroy(_dm_raid1_read_record_cache); | |
1389 | bad_cache: | |
1da177e4 LT |
1390 | return r; |
1391 | } | |
1392 | ||
1393 | static void __exit dm_mirror_exit(void) | |
1394 | { | |
10d3bd09 | 1395 | dm_unregister_target(&mirror_target); |
95f8fac8 | 1396 | kmem_cache_destroy(_dm_raid1_read_record_cache); |
1da177e4 LT |
1397 | } |
1398 | ||
1399 | /* Module hooks */ | |
1400 | module_init(dm_mirror_init); | |
1401 | module_exit(dm_mirror_exit); | |
1402 | ||
1403 | MODULE_DESCRIPTION(DM_NAME " mirror target"); | |
1404 | MODULE_AUTHOR("Joe Thornber"); | |
1405 | MODULE_LICENSE("GPL"); |