]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm.h" | |
8 | #include "dm-bio-list.h" | |
9 | #include "dm-io.h" | |
10 | #include "dm-log.h" | |
11 | #include "kcopyd.h" | |
12 | ||
13 | #include <linux/ctype.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/mempool.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/pagemap.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/time.h> | |
20 | #include <linux/vmalloc.h> | |
21 | #include <linux/workqueue.h> | |
22 | ||
72d94861 | 23 | #define DM_MSG_PREFIX "raid1" |
88be163a | 24 | #define DM_IO_PAGES 64 |
72d94861 | 25 | |
a8e6afa2 | 26 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
f44db678 | 27 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
a8e6afa2 | 28 | |
33184048 | 29 | static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); |
1da177e4 | 30 | |
1da177e4 LT |
31 | /*----------------------------------------------------------------- |
32 | * Region hash | |
33 | * | |
34 | * The mirror splits itself up into discrete regions. Each | |
35 | * region can be in one of three states: clean, dirty, | |
36 | * nosync. There is no need to put clean regions in the hash. | |
37 | * | |
38 | * In addition to being present in the hash table a region _may_ | |
39 | * be present on one of three lists. | |
40 | * | |
41 | * clean_regions: Regions on this list have no io pending to | |
42 | * them, they are in sync, we are no longer interested in them, | |
43 | * they are dull. rh_update_states() will remove them from the | |
44 | * hash table. | |
45 | * | |
46 | * quiesced_regions: These regions have been spun down, ready | |
47 | * for recovery. rh_recovery_start() will remove regions from | |
48 | * this list and hand them to kmirrord, which will schedule the | |
49 | * recovery io with kcopyd. | |
50 | * | |
51 | * recovered_regions: Regions that kcopyd has successfully | |
52 | * recovered. rh_update_states() will now schedule any delayed | |
53 | * io, up the recovery_count, and remove the region from the | |
54 | * hash. | |
55 | * | |
56 | * There are 2 locks: | |
57 | * A rw spin lock 'hash_lock' protects just the hash table, | |
58 | * this is never held in write mode from interrupt context, | |
59 | * which I believe means that we only have to disable irqs when | |
60 | * doing a write lock. | |
61 | * | |
62 | * An ordinary spin lock 'region_lock' that protects the three | |
63 | * lists in the region_hash, with the 'state', 'list' and | |
64 | * 'bhs_delayed' fields of the regions. This is used from irq | |
65 | * context, so all other uses will have to suspend local irqs. | |
66 | *---------------------------------------------------------------*/ | |
67 | struct mirror_set; | |
68 | struct region_hash { | |
69 | struct mirror_set *ms; | |
70 | uint32_t region_size; | |
71 | unsigned region_shift; | |
72 | ||
73 | /* holds persistent region state */ | |
74 | struct dirty_log *log; | |
75 | ||
76 | /* hash table */ | |
77 | rwlock_t hash_lock; | |
78 | mempool_t *region_pool; | |
79 | unsigned int mask; | |
80 | unsigned int nr_buckets; | |
81 | struct list_head *buckets; | |
82 | ||
83 | spinlock_t region_lock; | |
33184048 | 84 | atomic_t recovery_in_flight; |
1da177e4 LT |
85 | struct semaphore recovery_count; |
86 | struct list_head clean_regions; | |
87 | struct list_head quiesced_regions; | |
88 | struct list_head recovered_regions; | |
f44db678 | 89 | struct list_head failed_recovered_regions; |
1da177e4 LT |
90 | }; |
91 | ||
92 | enum { | |
93 | RH_CLEAN, | |
94 | RH_DIRTY, | |
95 | RH_NOSYNC, | |
96 | RH_RECOVERING | |
97 | }; | |
98 | ||
99 | struct region { | |
100 | struct region_hash *rh; /* FIXME: can we get rid of this ? */ | |
101 | region_t key; | |
102 | int state; | |
103 | ||
104 | struct list_head hash_list; | |
105 | struct list_head list; | |
106 | ||
107 | atomic_t pending; | |
108 | struct bio_list delayed_bios; | |
109 | }; | |
110 | ||
e4c8b3ba NB |
111 | |
112 | /*----------------------------------------------------------------- | |
113 | * Mirror set structures. | |
114 | *---------------------------------------------------------------*/ | |
115 | struct mirror { | |
116 | atomic_t error_count; | |
117 | struct dm_dev *dev; | |
118 | sector_t offset; | |
119 | }; | |
120 | ||
121 | struct mirror_set { | |
122 | struct dm_target *ti; | |
123 | struct list_head list; | |
124 | struct region_hash rh; | |
125 | struct kcopyd_client *kcopyd_client; | |
a8e6afa2 | 126 | uint64_t features; |
e4c8b3ba NB |
127 | |
128 | spinlock_t lock; /* protects the next two lists */ | |
129 | struct bio_list reads; | |
130 | struct bio_list writes; | |
131 | ||
88be163a MB |
132 | struct dm_io_client *io_client; |
133 | ||
e4c8b3ba NB |
134 | /* recovery */ |
135 | region_t nr_regions; | |
136 | int in_sync; | |
fc1ff958 | 137 | int log_failure; |
e4c8b3ba NB |
138 | |
139 | struct mirror *default_mirror; /* Default mirror */ | |
140 | ||
6ad36fe2 HS |
141 | struct workqueue_struct *kmirrord_wq; |
142 | struct work_struct kmirrord_work; | |
143 | ||
e4c8b3ba NB |
144 | unsigned int nr_mirrors; |
145 | struct mirror mirror[0]; | |
146 | }; | |
147 | ||
1da177e4 LT |
148 | /* |
149 | * Conversion fns | |
150 | */ | |
151 | static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) | |
152 | { | |
e4c8b3ba | 153 | return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift; |
1da177e4 LT |
154 | } |
155 | ||
156 | static inline sector_t region_to_sector(struct region_hash *rh, region_t region) | |
157 | { | |
158 | return region << rh->region_shift; | |
159 | } | |
160 | ||
6ad36fe2 HS |
161 | static void wake(struct mirror_set *ms) |
162 | { | |
163 | queue_work(ms->kmirrord_wq, &ms->kmirrord_work); | |
164 | } | |
165 | ||
1da177e4 LT |
166 | /* FIXME move this */ |
167 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); | |
168 | ||
1da177e4 LT |
169 | #define MIN_REGIONS 64 |
170 | #define MAX_RECOVERY 1 | |
171 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, | |
172 | struct dirty_log *log, uint32_t region_size, | |
173 | region_t nr_regions) | |
174 | { | |
175 | unsigned int nr_buckets, max_buckets; | |
176 | size_t i; | |
177 | ||
178 | /* | |
179 | * Calculate a suitable number of buckets for our hash | |
180 | * table. | |
181 | */ | |
182 | max_buckets = nr_regions >> 6; | |
183 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | |
184 | ; | |
185 | nr_buckets >>= 1; | |
186 | ||
187 | rh->ms = ms; | |
188 | rh->log = log; | |
189 | rh->region_size = region_size; | |
190 | rh->region_shift = ffs(region_size) - 1; | |
191 | rwlock_init(&rh->hash_lock); | |
192 | rh->mask = nr_buckets - 1; | |
193 | rh->nr_buckets = nr_buckets; | |
194 | ||
195 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | |
196 | if (!rh->buckets) { | |
197 | DMERR("unable to allocate region hash memory"); | |
198 | return -ENOMEM; | |
199 | } | |
200 | ||
201 | for (i = 0; i < nr_buckets; i++) | |
202 | INIT_LIST_HEAD(rh->buckets + i); | |
203 | ||
204 | spin_lock_init(&rh->region_lock); | |
205 | sema_init(&rh->recovery_count, 0); | |
33184048 | 206 | atomic_set(&rh->recovery_in_flight, 0); |
1da177e4 LT |
207 | INIT_LIST_HEAD(&rh->clean_regions); |
208 | INIT_LIST_HEAD(&rh->quiesced_regions); | |
209 | INIT_LIST_HEAD(&rh->recovered_regions); | |
f44db678 | 210 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
1da177e4 | 211 | |
0eaae62a MD |
212 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
213 | sizeof(struct region)); | |
1da177e4 LT |
214 | if (!rh->region_pool) { |
215 | vfree(rh->buckets); | |
216 | rh->buckets = NULL; | |
217 | return -ENOMEM; | |
218 | } | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
223 | static void rh_exit(struct region_hash *rh) | |
224 | { | |
225 | unsigned int h; | |
226 | struct region *reg, *nreg; | |
227 | ||
228 | BUG_ON(!list_empty(&rh->quiesced_regions)); | |
229 | for (h = 0; h < rh->nr_buckets; h++) { | |
230 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { | |
231 | BUG_ON(atomic_read(®->pending)); | |
232 | mempool_free(reg, rh->region_pool); | |
233 | } | |
234 | } | |
235 | ||
236 | if (rh->log) | |
237 | dm_destroy_dirty_log(rh->log); | |
238 | if (rh->region_pool) | |
239 | mempool_destroy(rh->region_pool); | |
240 | vfree(rh->buckets); | |
241 | } | |
242 | ||
243 | #define RH_HASH_MULT 2654435387U | |
244 | ||
245 | static inline unsigned int rh_hash(struct region_hash *rh, region_t region) | |
246 | { | |
247 | return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; | |
248 | } | |
249 | ||
250 | static struct region *__rh_lookup(struct region_hash *rh, region_t region) | |
251 | { | |
252 | struct region *reg; | |
253 | ||
254 | list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) | |
255 | if (reg->key == region) | |
256 | return reg; | |
257 | ||
258 | return NULL; | |
259 | } | |
260 | ||
261 | static void __rh_insert(struct region_hash *rh, struct region *reg) | |
262 | { | |
263 | unsigned int h = rh_hash(rh, reg->key); | |
264 | list_add(®->hash_list, rh->buckets + h); | |
265 | } | |
266 | ||
267 | static struct region *__rh_alloc(struct region_hash *rh, region_t region) | |
268 | { | |
269 | struct region *reg, *nreg; | |
270 | ||
271 | read_unlock(&rh->hash_lock); | |
c06aad85 DK |
272 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); |
273 | if (unlikely(!nreg)) | |
274 | nreg = kmalloc(sizeof(struct region), GFP_NOIO); | |
1da177e4 LT |
275 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? |
276 | RH_CLEAN : RH_NOSYNC; | |
277 | nreg->rh = rh; | |
278 | nreg->key = region; | |
279 | ||
280 | INIT_LIST_HEAD(&nreg->list); | |
281 | ||
282 | atomic_set(&nreg->pending, 0); | |
283 | bio_list_init(&nreg->delayed_bios); | |
284 | write_lock_irq(&rh->hash_lock); | |
285 | ||
286 | reg = __rh_lookup(rh, region); | |
287 | if (reg) | |
288 | /* we lost the race */ | |
289 | mempool_free(nreg, rh->region_pool); | |
290 | ||
291 | else { | |
292 | __rh_insert(rh, nreg); | |
293 | if (nreg->state == RH_CLEAN) { | |
294 | spin_lock(&rh->region_lock); | |
295 | list_add(&nreg->list, &rh->clean_regions); | |
296 | spin_unlock(&rh->region_lock); | |
297 | } | |
298 | reg = nreg; | |
299 | } | |
300 | write_unlock_irq(&rh->hash_lock); | |
301 | read_lock(&rh->hash_lock); | |
302 | ||
303 | return reg; | |
304 | } | |
305 | ||
306 | static inline struct region *__rh_find(struct region_hash *rh, region_t region) | |
307 | { | |
308 | struct region *reg; | |
309 | ||
310 | reg = __rh_lookup(rh, region); | |
311 | if (!reg) | |
312 | reg = __rh_alloc(rh, region); | |
313 | ||
314 | return reg; | |
315 | } | |
316 | ||
317 | static int rh_state(struct region_hash *rh, region_t region, int may_block) | |
318 | { | |
319 | int r; | |
320 | struct region *reg; | |
321 | ||
322 | read_lock(&rh->hash_lock); | |
323 | reg = __rh_lookup(rh, region); | |
324 | read_unlock(&rh->hash_lock); | |
325 | ||
326 | if (reg) | |
327 | return reg->state; | |
328 | ||
329 | /* | |
330 | * The region wasn't in the hash, so we fall back to the | |
331 | * dirty log. | |
332 | */ | |
333 | r = rh->log->type->in_sync(rh->log, region, may_block); | |
334 | ||
335 | /* | |
336 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | |
337 | * taken as a RH_NOSYNC | |
338 | */ | |
339 | return r == 1 ? RH_CLEAN : RH_NOSYNC; | |
340 | } | |
341 | ||
342 | static inline int rh_in_sync(struct region_hash *rh, | |
343 | region_t region, int may_block) | |
344 | { | |
345 | int state = rh_state(rh, region, may_block); | |
346 | return state == RH_CLEAN || state == RH_DIRTY; | |
347 | } | |
348 | ||
349 | static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) | |
350 | { | |
351 | struct bio *bio; | |
352 | ||
353 | while ((bio = bio_list_pop(bio_list))) { | |
354 | queue_bio(ms, bio, WRITE); | |
355 | } | |
356 | } | |
357 | ||
f3ee6b2f JB |
358 | static void complete_resync_work(struct region *reg, int success) |
359 | { | |
360 | struct region_hash *rh = reg->rh; | |
361 | ||
362 | rh->log->type->set_region_sync(rh->log, reg->key, success); | |
363 | dispatch_bios(rh->ms, ®->delayed_bios); | |
364 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
365 | wake_up_all(&_kmirrord_recovery_stopped); | |
366 | up(&rh->recovery_count); | |
367 | } | |
368 | ||
1da177e4 LT |
369 | static void rh_update_states(struct region_hash *rh) |
370 | { | |
371 | struct region *reg, *next; | |
372 | ||
373 | LIST_HEAD(clean); | |
374 | LIST_HEAD(recovered); | |
f44db678 | 375 | LIST_HEAD(failed_recovered); |
1da177e4 LT |
376 | |
377 | /* | |
378 | * Quickly grab the lists. | |
379 | */ | |
380 | write_lock_irq(&rh->hash_lock); | |
381 | spin_lock(&rh->region_lock); | |
382 | if (!list_empty(&rh->clean_regions)) { | |
383 | list_splice(&rh->clean_regions, &clean); | |
384 | INIT_LIST_HEAD(&rh->clean_regions); | |
385 | ||
943317ef | 386 | list_for_each_entry(reg, &clean, list) |
1da177e4 | 387 | list_del(®->hash_list); |
1da177e4 LT |
388 | } |
389 | ||
390 | if (!list_empty(&rh->recovered_regions)) { | |
391 | list_splice(&rh->recovered_regions, &recovered); | |
392 | INIT_LIST_HEAD(&rh->recovered_regions); | |
393 | ||
394 | list_for_each_entry (reg, &recovered, list) | |
395 | list_del(®->hash_list); | |
396 | } | |
f44db678 JB |
397 | |
398 | if (!list_empty(&rh->failed_recovered_regions)) { | |
399 | list_splice(&rh->failed_recovered_regions, &failed_recovered); | |
400 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | |
401 | ||
402 | list_for_each_entry(reg, &failed_recovered, list) | |
403 | list_del(®->hash_list); | |
404 | } | |
405 | ||
1da177e4 LT |
406 | spin_unlock(&rh->region_lock); |
407 | write_unlock_irq(&rh->hash_lock); | |
408 | ||
409 | /* | |
410 | * All the regions on the recovered and clean lists have | |
411 | * now been pulled out of the system, so no need to do | |
412 | * any more locking. | |
413 | */ | |
414 | list_for_each_entry_safe (reg, next, &recovered, list) { | |
415 | rh->log->type->clear_region(rh->log, reg->key); | |
f3ee6b2f | 416 | complete_resync_work(reg, 1); |
1da177e4 LT |
417 | mempool_free(reg, rh->region_pool); |
418 | } | |
419 | ||
f44db678 JB |
420 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { |
421 | complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); | |
422 | mempool_free(reg, rh->region_pool); | |
423 | } | |
424 | ||
943317ef JB |
425 | list_for_each_entry_safe(reg, next, &clean, list) { |
426 | rh->log->type->clear_region(rh->log, reg->key); | |
1da177e4 | 427 | mempool_free(reg, rh->region_pool); |
943317ef JB |
428 | } |
429 | ||
430 | rh->log->type->flush(rh->log); | |
1da177e4 LT |
431 | } |
432 | ||
433 | static void rh_inc(struct region_hash *rh, region_t region) | |
434 | { | |
435 | struct region *reg; | |
436 | ||
437 | read_lock(&rh->hash_lock); | |
438 | reg = __rh_find(rh, region); | |
844e8d90 | 439 | |
7692c5dd | 440 | spin_lock_irq(&rh->region_lock); |
844e8d90 JN |
441 | atomic_inc(®->pending); |
442 | ||
1da177e4 | 443 | if (reg->state == RH_CLEAN) { |
1da177e4 LT |
444 | reg->state = RH_DIRTY; |
445 | list_del_init(®->list); /* take off the clean list */ | |
7692c5dd JB |
446 | spin_unlock_irq(&rh->region_lock); |
447 | ||
448 | rh->log->type->mark_region(rh->log, reg->key); | |
449 | } else | |
450 | spin_unlock_irq(&rh->region_lock); | |
451 | ||
1da177e4 | 452 | |
1da177e4 LT |
453 | read_unlock(&rh->hash_lock); |
454 | } | |
455 | ||
456 | static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) | |
457 | { | |
458 | struct bio *bio; | |
459 | ||
460 | for (bio = bios->head; bio; bio = bio->bi_next) | |
461 | rh_inc(rh, bio_to_region(rh, bio)); | |
462 | } | |
463 | ||
464 | static void rh_dec(struct region_hash *rh, region_t region) | |
465 | { | |
466 | unsigned long flags; | |
467 | struct region *reg; | |
468 | int should_wake = 0; | |
469 | ||
470 | read_lock(&rh->hash_lock); | |
471 | reg = __rh_lookup(rh, region); | |
472 | read_unlock(&rh->hash_lock); | |
473 | ||
7692c5dd | 474 | spin_lock_irqsave(&rh->region_lock, flags); |
1da177e4 | 475 | if (atomic_dec_and_test(®->pending)) { |
930d332a JN |
476 | /* |
477 | * There is no pending I/O for this region. | |
478 | * We can move the region to corresponding list for next action. | |
479 | * At this point, the region is not yet connected to any list. | |
480 | * | |
481 | * If the state is RH_NOSYNC, the region should be kept off | |
482 | * from clean list. | |
483 | * The hash entry for RH_NOSYNC will remain in memory | |
484 | * until the region is recovered or the map is reloaded. | |
485 | */ | |
486 | ||
487 | /* do nothing for RH_NOSYNC */ | |
1da177e4 LT |
488 | if (reg->state == RH_RECOVERING) { |
489 | list_add_tail(®->list, &rh->quiesced_regions); | |
930d332a | 490 | } else if (reg->state == RH_DIRTY) { |
1da177e4 LT |
491 | reg->state = RH_CLEAN; |
492 | list_add(®->list, &rh->clean_regions); | |
493 | } | |
1da177e4 LT |
494 | should_wake = 1; |
495 | } | |
7692c5dd | 496 | spin_unlock_irqrestore(&rh->region_lock, flags); |
1da177e4 LT |
497 | |
498 | if (should_wake) | |
6ad36fe2 | 499 | wake(rh->ms); |
1da177e4 LT |
500 | } |
501 | ||
502 | /* | |
503 | * Starts quiescing a region in preparation for recovery. | |
504 | */ | |
505 | static int __rh_recovery_prepare(struct region_hash *rh) | |
506 | { | |
507 | int r; | |
508 | struct region *reg; | |
509 | region_t region; | |
510 | ||
511 | /* | |
512 | * Ask the dirty log what's next. | |
513 | */ | |
514 | r = rh->log->type->get_resync_work(rh->log, ®ion); | |
515 | if (r <= 0) | |
516 | return r; | |
517 | ||
518 | /* | |
519 | * Get this region, and start it quiescing by setting the | |
520 | * recovering flag. | |
521 | */ | |
522 | read_lock(&rh->hash_lock); | |
523 | reg = __rh_find(rh, region); | |
524 | read_unlock(&rh->hash_lock); | |
525 | ||
526 | spin_lock_irq(&rh->region_lock); | |
527 | reg->state = RH_RECOVERING; | |
528 | ||
529 | /* Already quiesced ? */ | |
530 | if (atomic_read(®->pending)) | |
531 | list_del_init(®->list); | |
179e0917 AM |
532 | else |
533 | list_move(®->list, &rh->quiesced_regions); | |
1da177e4 | 534 | |
1da177e4 LT |
535 | spin_unlock_irq(&rh->region_lock); |
536 | ||
537 | return 1; | |
538 | } | |
539 | ||
540 | static void rh_recovery_prepare(struct region_hash *rh) | |
541 | { | |
33184048 JB |
542 | /* Extra reference to avoid race with rh_stop_recovery */ |
543 | atomic_inc(&rh->recovery_in_flight); | |
544 | ||
545 | while (!down_trylock(&rh->recovery_count)) { | |
546 | atomic_inc(&rh->recovery_in_flight); | |
1da177e4 | 547 | if (__rh_recovery_prepare(rh) <= 0) { |
33184048 | 548 | atomic_dec(&rh->recovery_in_flight); |
1da177e4 LT |
549 | up(&rh->recovery_count); |
550 | break; | |
551 | } | |
33184048 JB |
552 | } |
553 | ||
554 | /* Drop the extra reference */ | |
555 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
556 | wake_up_all(&_kmirrord_recovery_stopped); | |
1da177e4 LT |
557 | } |
558 | ||
559 | /* | |
560 | * Returns any quiesced regions. | |
561 | */ | |
562 | static struct region *rh_recovery_start(struct region_hash *rh) | |
563 | { | |
564 | struct region *reg = NULL; | |
565 | ||
566 | spin_lock_irq(&rh->region_lock); | |
567 | if (!list_empty(&rh->quiesced_regions)) { | |
568 | reg = list_entry(rh->quiesced_regions.next, | |
569 | struct region, list); | |
570 | list_del_init(®->list); /* remove from the quiesced list */ | |
571 | } | |
572 | spin_unlock_irq(&rh->region_lock); | |
573 | ||
574 | return reg; | |
575 | } | |
576 | ||
1da177e4 LT |
577 | static void rh_recovery_end(struct region *reg, int success) |
578 | { | |
579 | struct region_hash *rh = reg->rh; | |
580 | ||
581 | spin_lock_irq(&rh->region_lock); | |
f44db678 JB |
582 | if (success) |
583 | list_add(®->list, ®->rh->recovered_regions); | |
584 | else { | |
585 | reg->state = RH_NOSYNC; | |
586 | list_add(®->list, ®->rh->failed_recovered_regions); | |
587 | } | |
1da177e4 LT |
588 | spin_unlock_irq(&rh->region_lock); |
589 | ||
6ad36fe2 | 590 | wake(rh->ms); |
1da177e4 LT |
591 | } |
592 | ||
fc1ff958 | 593 | static int rh_flush(struct region_hash *rh) |
1da177e4 | 594 | { |
fc1ff958 | 595 | return rh->log->type->flush(rh->log); |
1da177e4 LT |
596 | } |
597 | ||
598 | static void rh_delay(struct region_hash *rh, struct bio *bio) | |
599 | { | |
600 | struct region *reg; | |
601 | ||
602 | read_lock(&rh->hash_lock); | |
603 | reg = __rh_find(rh, bio_to_region(rh, bio)); | |
604 | bio_list_add(®->delayed_bios, bio); | |
605 | read_unlock(&rh->hash_lock); | |
606 | } | |
607 | ||
608 | static void rh_stop_recovery(struct region_hash *rh) | |
609 | { | |
610 | int i; | |
611 | ||
612 | /* wait for any recovering regions */ | |
613 | for (i = 0; i < MAX_RECOVERY; i++) | |
614 | down(&rh->recovery_count); | |
615 | } | |
616 | ||
617 | static void rh_start_recovery(struct region_hash *rh) | |
618 | { | |
619 | int i; | |
620 | ||
621 | for (i = 0; i < MAX_RECOVERY; i++) | |
622 | up(&rh->recovery_count); | |
623 | ||
6ad36fe2 | 624 | wake(rh->ms); |
1da177e4 LT |
625 | } |
626 | ||
1da177e4 LT |
627 | /* |
628 | * Every mirror should look like this one. | |
629 | */ | |
630 | #define DEFAULT_MIRROR 0 | |
631 | ||
632 | /* | |
633 | * This is yucky. We squirrel the mirror_set struct away inside | |
634 | * bi_next for write buffers. This is safe since the bh | |
635 | * doesn't get submitted to the lower levels of block layer. | |
636 | */ | |
637 | static struct mirror_set *bio_get_ms(struct bio *bio) | |
638 | { | |
639 | return (struct mirror_set *) bio->bi_next; | |
640 | } | |
641 | ||
642 | static void bio_set_ms(struct bio *bio, struct mirror_set *ms) | |
643 | { | |
644 | bio->bi_next = (struct bio *) ms; | |
645 | } | |
646 | ||
647 | /*----------------------------------------------------------------- | |
648 | * Recovery. | |
649 | * | |
650 | * When a mirror is first activated we may find that some regions | |
651 | * are in the no-sync state. We have to recover these by | |
652 | * recopying from the default mirror to all the others. | |
653 | *---------------------------------------------------------------*/ | |
654 | static void recovery_complete(int read_err, unsigned int write_err, | |
655 | void *context) | |
656 | { | |
657 | struct region *reg = (struct region *) context; | |
658 | ||
f44db678 JB |
659 | if (read_err) |
660 | /* Read error means the failure of default mirror. */ | |
661 | DMERR_LIMIT("Unable to read primary mirror during recovery"); | |
662 | ||
663 | if (write_err) | |
664 | DMERR_LIMIT("Write error during recovery (error = 0x%x)", | |
665 | write_err); | |
666 | ||
ce503f59 | 667 | rh_recovery_end(reg, !(read_err || write_err)); |
1da177e4 LT |
668 | } |
669 | ||
670 | static int recover(struct mirror_set *ms, struct region *reg) | |
671 | { | |
672 | int r; | |
673 | unsigned int i; | |
674 | struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; | |
675 | struct mirror *m; | |
676 | unsigned long flags = 0; | |
677 | ||
678 | /* fill in the source */ | |
a1a19080 | 679 | m = ms->default_mirror; |
1da177e4 LT |
680 | from.bdev = m->dev->bdev; |
681 | from.sector = m->offset + region_to_sector(reg->rh, reg->key); | |
682 | if (reg->key == (ms->nr_regions - 1)) { | |
683 | /* | |
684 | * The final region may be smaller than | |
685 | * region_size. | |
686 | */ | |
687 | from.count = ms->ti->len & (reg->rh->region_size - 1); | |
688 | if (!from.count) | |
689 | from.count = reg->rh->region_size; | |
690 | } else | |
691 | from.count = reg->rh->region_size; | |
692 | ||
693 | /* fill in the destinations */ | |
694 | for (i = 0, dest = to; i < ms->nr_mirrors; i++) { | |
a1a19080 | 695 | if (&ms->mirror[i] == ms->default_mirror) |
1da177e4 LT |
696 | continue; |
697 | ||
698 | m = ms->mirror + i; | |
699 | dest->bdev = m->dev->bdev; | |
700 | dest->sector = m->offset + region_to_sector(reg->rh, reg->key); | |
701 | dest->count = from.count; | |
702 | dest++; | |
703 | } | |
704 | ||
705 | /* hand to kcopyd */ | |
706 | set_bit(KCOPYD_IGNORE_ERROR, &flags); | |
707 | r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, | |
708 | recovery_complete, reg); | |
709 | ||
710 | return r; | |
711 | } | |
712 | ||
713 | static void do_recovery(struct mirror_set *ms) | |
714 | { | |
715 | int r; | |
716 | struct region *reg; | |
717 | struct dirty_log *log = ms->rh.log; | |
718 | ||
719 | /* | |
720 | * Start quiescing some regions. | |
721 | */ | |
722 | rh_recovery_prepare(&ms->rh); | |
723 | ||
724 | /* | |
725 | * Copy any already quiesced regions. | |
726 | */ | |
727 | while ((reg = rh_recovery_start(&ms->rh))) { | |
728 | r = recover(ms, reg); | |
729 | if (r) | |
730 | rh_recovery_end(reg, 0); | |
731 | } | |
732 | ||
733 | /* | |
734 | * Update the in sync flag. | |
735 | */ | |
736 | if (!ms->in_sync && | |
737 | (log->type->get_sync_count(log) == ms->nr_regions)) { | |
738 | /* the sync is complete */ | |
739 | dm_table_event(ms->ti->table); | |
740 | ms->in_sync = 1; | |
741 | } | |
742 | } | |
743 | ||
744 | /*----------------------------------------------------------------- | |
745 | * Reads | |
746 | *---------------------------------------------------------------*/ | |
747 | static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) | |
748 | { | |
749 | /* FIXME: add read balancing */ | |
a1a19080 | 750 | return ms->default_mirror; |
1da177e4 LT |
751 | } |
752 | ||
753 | /* | |
754 | * remap a buffer to a particular mirror. | |
755 | */ | |
756 | static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) | |
757 | { | |
758 | bio->bi_bdev = m->dev->bdev; | |
759 | bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); | |
760 | } | |
761 | ||
762 | static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |
763 | { | |
764 | region_t region; | |
765 | struct bio *bio; | |
766 | struct mirror *m; | |
767 | ||
768 | while ((bio = bio_list_pop(reads))) { | |
769 | region = bio_to_region(&ms->rh, bio); | |
770 | ||
771 | /* | |
772 | * We can only read balance if the region is in sync. | |
773 | */ | |
b997b82d | 774 | if (rh_in_sync(&ms->rh, region, 1)) |
1da177e4 LT |
775 | m = choose_mirror(ms, bio->bi_sector); |
776 | else | |
a1a19080 | 777 | m = ms->default_mirror; |
1da177e4 LT |
778 | |
779 | map_bio(ms, m, bio); | |
780 | generic_make_request(bio); | |
781 | } | |
782 | } | |
783 | ||
784 | /*----------------------------------------------------------------- | |
785 | * Writes. | |
786 | * | |
787 | * We do different things with the write io depending on the | |
788 | * state of the region that it's in: | |
789 | * | |
790 | * SYNC: increment pending, use kcopyd to write to *all* mirrors | |
791 | * RECOVERING: delay the io until recovery completes | |
792 | * NOSYNC: increment pending, just write to the default mirror | |
793 | *---------------------------------------------------------------*/ | |
794 | static void write_callback(unsigned long error, void *context) | |
795 | { | |
796 | unsigned int i; | |
797 | int uptodate = 1; | |
798 | struct bio *bio = (struct bio *) context; | |
799 | struct mirror_set *ms; | |
800 | ||
801 | ms = bio_get_ms(bio); | |
802 | bio_set_ms(bio, NULL); | |
803 | ||
804 | /* | |
805 | * NOTE: We don't decrement the pending count here, | |
806 | * instead it is done by the targets endio function. | |
807 | * This way we handle both writes to SYNC and NOSYNC | |
808 | * regions with the same code. | |
809 | */ | |
810 | ||
811 | if (error) { | |
812 | /* | |
813 | * only error the io if all mirrors failed. | |
814 | * FIXME: bogus | |
815 | */ | |
816 | uptodate = 0; | |
817 | for (i = 0; i < ms->nr_mirrors; i++) | |
818 | if (!test_bit(i, &error)) { | |
819 | uptodate = 1; | |
820 | break; | |
821 | } | |
822 | } | |
823 | bio_endio(bio, bio->bi_size, 0); | |
824 | } | |
825 | ||
826 | static void do_write(struct mirror_set *ms, struct bio *bio) | |
827 | { | |
828 | unsigned int i; | |
829 | struct io_region io[KCOPYD_MAX_REGIONS+1]; | |
830 | struct mirror *m; | |
88be163a MB |
831 | struct dm_io_request io_req = { |
832 | .bi_rw = WRITE, | |
833 | .mem.type = DM_IO_BVEC, | |
834 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | |
835 | .notify.fn = write_callback, | |
836 | .notify.context = bio, | |
837 | .client = ms->io_client, | |
838 | }; | |
1da177e4 LT |
839 | |
840 | for (i = 0; i < ms->nr_mirrors; i++) { | |
841 | m = ms->mirror + i; | |
842 | ||
843 | io[i].bdev = m->dev->bdev; | |
844 | io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); | |
845 | io[i].count = bio->bi_size >> 9; | |
846 | } | |
847 | ||
848 | bio_set_ms(bio, ms); | |
88be163a MB |
849 | |
850 | (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); | |
1da177e4 LT |
851 | } |
852 | ||
853 | static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |
854 | { | |
855 | int state; | |
856 | struct bio *bio; | |
857 | struct bio_list sync, nosync, recover, *this_list = NULL; | |
858 | ||
859 | if (!writes->head) | |
860 | return; | |
861 | ||
862 | /* | |
863 | * Classify each write. | |
864 | */ | |
865 | bio_list_init(&sync); | |
866 | bio_list_init(&nosync); | |
867 | bio_list_init(&recover); | |
868 | ||
869 | while ((bio = bio_list_pop(writes))) { | |
870 | state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); | |
871 | switch (state) { | |
872 | case RH_CLEAN: | |
873 | case RH_DIRTY: | |
874 | this_list = &sync; | |
875 | break; | |
876 | ||
877 | case RH_NOSYNC: | |
878 | this_list = &nosync; | |
879 | break; | |
880 | ||
881 | case RH_RECOVERING: | |
882 | this_list = &recover; | |
883 | break; | |
884 | } | |
885 | ||
886 | bio_list_add(this_list, bio); | |
887 | } | |
888 | ||
889 | /* | |
890 | * Increment the pending counts for any regions that will | |
891 | * be written to (writes to recover regions are going to | |
892 | * be delayed). | |
893 | */ | |
894 | rh_inc_pending(&ms->rh, &sync); | |
895 | rh_inc_pending(&ms->rh, &nosync); | |
fc1ff958 | 896 | ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; |
1da177e4 LT |
897 | |
898 | /* | |
899 | * Dispatch io. | |
900 | */ | |
fc1ff958 JB |
901 | if (unlikely(ms->log_failure)) |
902 | while ((bio = bio_list_pop(&sync))) | |
903 | bio_endio(bio, bio->bi_size, -EIO); | |
904 | else while ((bio = bio_list_pop(&sync))) | |
1da177e4 LT |
905 | do_write(ms, bio); |
906 | ||
907 | while ((bio = bio_list_pop(&recover))) | |
908 | rh_delay(&ms->rh, bio); | |
909 | ||
910 | while ((bio = bio_list_pop(&nosync))) { | |
a1a19080 | 911 | map_bio(ms, ms->default_mirror, bio); |
1da177e4 LT |
912 | generic_make_request(bio); |
913 | } | |
914 | } | |
915 | ||
916 | /*----------------------------------------------------------------- | |
917 | * kmirrord | |
918 | *---------------------------------------------------------------*/ | |
6ad36fe2 | 919 | static void do_mirror(struct work_struct *work) |
1da177e4 | 920 | { |
6ad36fe2 HS |
921 | struct mirror_set *ms =container_of(work, struct mirror_set, |
922 | kmirrord_work); | |
1da177e4 LT |
923 | struct bio_list reads, writes; |
924 | ||
925 | spin_lock(&ms->lock); | |
926 | reads = ms->reads; | |
927 | writes = ms->writes; | |
928 | bio_list_init(&ms->reads); | |
929 | bio_list_init(&ms->writes); | |
930 | spin_unlock(&ms->lock); | |
931 | ||
932 | rh_update_states(&ms->rh); | |
933 | do_recovery(ms); | |
934 | do_reads(ms, &reads); | |
935 | do_writes(ms, &writes); | |
936 | } | |
937 | ||
1da177e4 LT |
938 | /*----------------------------------------------------------------- |
939 | * Target functions | |
940 | *---------------------------------------------------------------*/ | |
941 | static struct mirror_set *alloc_context(unsigned int nr_mirrors, | |
942 | uint32_t region_size, | |
943 | struct dm_target *ti, | |
944 | struct dirty_log *dl) | |
945 | { | |
946 | size_t len; | |
947 | struct mirror_set *ms = NULL; | |
948 | ||
949 | if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) | |
950 | return NULL; | |
951 | ||
952 | len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); | |
953 | ||
954 | ms = kmalloc(len, GFP_KERNEL); | |
955 | if (!ms) { | |
72d94861 | 956 | ti->error = "Cannot allocate mirror context"; |
1da177e4 LT |
957 | return NULL; |
958 | } | |
959 | ||
960 | memset(ms, 0, len); | |
961 | spin_lock_init(&ms->lock); | |
962 | ||
963 | ms->ti = ti; | |
964 | ms->nr_mirrors = nr_mirrors; | |
965 | ms->nr_regions = dm_sector_div_up(ti->len, region_size); | |
966 | ms->in_sync = 0; | |
a1a19080 | 967 | ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; |
1da177e4 | 968 | |
88be163a MB |
969 | ms->io_client = dm_io_client_create(DM_IO_PAGES); |
970 | if (IS_ERR(ms->io_client)) { | |
971 | ti->error = "Error creating dm_io client"; | |
972 | kfree(ms); | |
973 | return NULL; | |
974 | } | |
975 | ||
1da177e4 | 976 | if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { |
72d94861 | 977 | ti->error = "Error creating dirty region hash"; |
1da177e4 LT |
978 | kfree(ms); |
979 | return NULL; | |
980 | } | |
981 | ||
982 | return ms; | |
983 | } | |
984 | ||
985 | static void free_context(struct mirror_set *ms, struct dm_target *ti, | |
986 | unsigned int m) | |
987 | { | |
988 | while (m--) | |
989 | dm_put_device(ti, ms->mirror[m].dev); | |
990 | ||
88be163a | 991 | dm_io_client_destroy(ms->io_client); |
1da177e4 LT |
992 | rh_exit(&ms->rh); |
993 | kfree(ms); | |
994 | } | |
995 | ||
996 | static inline int _check_region_size(struct dm_target *ti, uint32_t size) | |
997 | { | |
998 | return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || | |
999 | size > ti->len); | |
1000 | } | |
1001 | ||
1002 | static int get_mirror(struct mirror_set *ms, struct dm_target *ti, | |
1003 | unsigned int mirror, char **argv) | |
1004 | { | |
4ee218cd | 1005 | unsigned long long offset; |
1da177e4 | 1006 | |
4ee218cd | 1007 | if (sscanf(argv[1], "%llu", &offset) != 1) { |
72d94861 | 1008 | ti->error = "Invalid offset"; |
1da177e4 LT |
1009 | return -EINVAL; |
1010 | } | |
1011 | ||
1012 | if (dm_get_device(ti, argv[0], offset, ti->len, | |
1013 | dm_table_get_mode(ti->table), | |
1014 | &ms->mirror[mirror].dev)) { | |
72d94861 | 1015 | ti->error = "Device lookup failure"; |
1da177e4 LT |
1016 | return -ENXIO; |
1017 | } | |
1018 | ||
1019 | ms->mirror[mirror].offset = offset; | |
1020 | ||
1021 | return 0; | |
1022 | } | |
1023 | ||
1da177e4 LT |
1024 | /* |
1025 | * Create dirty log: log_type #log_params <log_params> | |
1026 | */ | |
1027 | static struct dirty_log *create_dirty_log(struct dm_target *ti, | |
1028 | unsigned int argc, char **argv, | |
1029 | unsigned int *args_used) | |
1030 | { | |
1031 | unsigned int param_count; | |
1032 | struct dirty_log *dl; | |
1033 | ||
1034 | if (argc < 2) { | |
72d94861 | 1035 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
1036 | return NULL; |
1037 | } | |
1038 | ||
1039 | if (sscanf(argv[1], "%u", ¶m_count) != 1) { | |
72d94861 | 1040 | ti->error = "Invalid mirror log argument count"; |
1da177e4 LT |
1041 | return NULL; |
1042 | } | |
1043 | ||
1044 | *args_used = 2 + param_count; | |
1045 | ||
1046 | if (argc < *args_used) { | |
72d94861 | 1047 | ti->error = "Insufficient mirror log arguments"; |
1da177e4 LT |
1048 | return NULL; |
1049 | } | |
1050 | ||
1051 | dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); | |
1052 | if (!dl) { | |
72d94861 | 1053 | ti->error = "Error creating mirror dirty log"; |
1da177e4 LT |
1054 | return NULL; |
1055 | } | |
1056 | ||
1057 | if (!_check_region_size(ti, dl->type->get_region_size(dl))) { | |
72d94861 | 1058 | ti->error = "Invalid region size"; |
1da177e4 LT |
1059 | dm_destroy_dirty_log(dl); |
1060 | return NULL; | |
1061 | } | |
1062 | ||
1063 | return dl; | |
1064 | } | |
1065 | ||
a8e6afa2 JB |
1066 | static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, |
1067 | unsigned *args_used) | |
1068 | { | |
1069 | unsigned num_features; | |
1070 | struct dm_target *ti = ms->ti; | |
1071 | ||
1072 | *args_used = 0; | |
1073 | ||
1074 | if (!argc) | |
1075 | return 0; | |
1076 | ||
1077 | if (sscanf(argv[0], "%u", &num_features) != 1) { | |
1078 | ti->error = "Invalid number of features"; | |
1079 | return -EINVAL; | |
1080 | } | |
1081 | ||
1082 | argc--; | |
1083 | argv++; | |
1084 | (*args_used)++; | |
1085 | ||
1086 | if (num_features > argc) { | |
1087 | ti->error = "Not enough arguments to support feature count"; | |
1088 | return -EINVAL; | |
1089 | } | |
1090 | ||
1091 | if (!strcmp("handle_errors", argv[0])) | |
1092 | ms->features |= DM_RAID1_HANDLE_ERRORS; | |
1093 | else { | |
1094 | ti->error = "Unrecognised feature requested"; | |
1095 | return -EINVAL; | |
1096 | } | |
1097 | ||
1098 | (*args_used)++; | |
1099 | ||
1100 | return 0; | |
1101 | } | |
1102 | ||
1da177e4 LT |
1103 | /* |
1104 | * Construct a mirror mapping: | |
1105 | * | |
1106 | * log_type #log_params <log_params> | |
1107 | * #mirrors [mirror_path offset]{2,} | |
a8e6afa2 | 1108 | * [#features <features>] |
1da177e4 LT |
1109 | * |
1110 | * log_type is "core" or "disk" | |
1111 | * #log_params is between 1 and 3 | |
a8e6afa2 JB |
1112 | * |
1113 | * If present, features must be "handle_errors". | |
1da177e4 | 1114 | */ |
1da177e4 LT |
1115 | static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1116 | { | |
1117 | int r; | |
1118 | unsigned int nr_mirrors, m, args_used; | |
1119 | struct mirror_set *ms; | |
1120 | struct dirty_log *dl; | |
1121 | ||
1122 | dl = create_dirty_log(ti, argc, argv, &args_used); | |
1123 | if (!dl) | |
1124 | return -EINVAL; | |
1125 | ||
1126 | argv += args_used; | |
1127 | argc -= args_used; | |
1128 | ||
1129 | if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || | |
1130 | nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { | |
72d94861 | 1131 | ti->error = "Invalid number of mirrors"; |
1da177e4 LT |
1132 | dm_destroy_dirty_log(dl); |
1133 | return -EINVAL; | |
1134 | } | |
1135 | ||
1136 | argv++, argc--; | |
1137 | ||
a8e6afa2 JB |
1138 | if (argc < nr_mirrors * 2) { |
1139 | ti->error = "Too few mirror arguments"; | |
1da177e4 LT |
1140 | dm_destroy_dirty_log(dl); |
1141 | return -EINVAL; | |
1142 | } | |
1143 | ||
1144 | ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); | |
1145 | if (!ms) { | |
1146 | dm_destroy_dirty_log(dl); | |
1147 | return -ENOMEM; | |
1148 | } | |
1149 | ||
1150 | /* Get the mirror parameter sets */ | |
1151 | for (m = 0; m < nr_mirrors; m++) { | |
1152 | r = get_mirror(ms, ti, m, argv); | |
1153 | if (r) { | |
1154 | free_context(ms, ti, m); | |
1155 | return r; | |
1156 | } | |
1157 | argv += 2; | |
1158 | argc -= 2; | |
1159 | } | |
1160 | ||
1161 | ti->private = ms; | |
d88854f0 | 1162 | ti->split_io = ms->rh.region_size; |
1da177e4 | 1163 | |
6ad36fe2 HS |
1164 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1165 | if (!ms->kmirrord_wq) { | |
1166 | DMERR("couldn't start kmirrord"); | |
1167 | free_context(ms, ti, m); | |
1168 | return -ENOMEM; | |
1169 | } | |
1170 | INIT_WORK(&ms->kmirrord_work, do_mirror); | |
1171 | ||
a8e6afa2 JB |
1172 | r = parse_features(ms, argc, argv, &args_used); |
1173 | if (r) { | |
1174 | free_context(ms, ti, ms->nr_mirrors); | |
1175 | return r; | |
1176 | } | |
1177 | ||
1178 | argv += args_used; | |
1179 | argc -= args_used; | |
1180 | ||
f44db678 JB |
1181 | /* |
1182 | * Any read-balancing addition depends on the | |
1183 | * DM_RAID1_HANDLE_ERRORS flag being present. | |
1184 | * This is because the decision to balance depends | |
1185 | * on the sync state of a region. If the above | |
1186 | * flag is not present, we ignore errors; and | |
1187 | * the sync state may be inaccurate. | |
1188 | */ | |
1189 | ||
a8e6afa2 JB |
1190 | if (argc) { |
1191 | ti->error = "Too many mirror arguments"; | |
1192 | free_context(ms, ti, ms->nr_mirrors); | |
1193 | return -EINVAL; | |
1194 | } | |
1195 | ||
1da177e4 LT |
1196 | r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); |
1197 | if (r) { | |
6ad36fe2 | 1198 | destroy_workqueue(ms->kmirrord_wq); |
1da177e4 LT |
1199 | free_context(ms, ti, ms->nr_mirrors); |
1200 | return r; | |
1201 | } | |
1202 | ||
6ad36fe2 | 1203 | wake(ms); |
1da177e4 LT |
1204 | return 0; |
1205 | } | |
1206 | ||
1207 | static void mirror_dtr(struct dm_target *ti) | |
1208 | { | |
1209 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1210 | ||
6ad36fe2 | 1211 | flush_workqueue(ms->kmirrord_wq); |
1da177e4 | 1212 | kcopyd_client_destroy(ms->kcopyd_client); |
6ad36fe2 | 1213 | destroy_workqueue(ms->kmirrord_wq); |
1da177e4 LT |
1214 | free_context(ms, ti, ms->nr_mirrors); |
1215 | } | |
1216 | ||
1217 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) | |
1218 | { | |
1219 | int should_wake = 0; | |
1220 | struct bio_list *bl; | |
1221 | ||
1222 | bl = (rw == WRITE) ? &ms->writes : &ms->reads; | |
1223 | spin_lock(&ms->lock); | |
1224 | should_wake = !(bl->head); | |
1225 | bio_list_add(bl, bio); | |
1226 | spin_unlock(&ms->lock); | |
1227 | ||
1228 | if (should_wake) | |
6ad36fe2 | 1229 | wake(ms); |
1da177e4 LT |
1230 | } |
1231 | ||
1232 | /* | |
1233 | * Mirror mapping function | |
1234 | */ | |
1235 | static int mirror_map(struct dm_target *ti, struct bio *bio, | |
1236 | union map_info *map_context) | |
1237 | { | |
1238 | int r, rw = bio_rw(bio); | |
1239 | struct mirror *m; | |
1240 | struct mirror_set *ms = ti->private; | |
1241 | ||
e4c8b3ba | 1242 | map_context->ll = bio_to_region(&ms->rh, bio); |
1da177e4 LT |
1243 | |
1244 | if (rw == WRITE) { | |
1245 | queue_bio(ms, bio, rw); | |
d2a7ad29 | 1246 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1247 | } |
1248 | ||
1249 | r = ms->rh.log->type->in_sync(ms->rh.log, | |
1250 | bio_to_region(&ms->rh, bio), 0); | |
1251 | if (r < 0 && r != -EWOULDBLOCK) | |
1252 | return r; | |
1253 | ||
1254 | if (r == -EWOULDBLOCK) /* FIXME: ugly */ | |
d2a7ad29 | 1255 | r = DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1256 | |
1257 | /* | |
1258 | * We don't want to fast track a recovery just for a read | |
1259 | * ahead. So we just let it silently fail. | |
1260 | * FIXME: get rid of this. | |
1261 | */ | |
1262 | if (!r && rw == READA) | |
1263 | return -EIO; | |
1264 | ||
1265 | if (!r) { | |
1266 | /* Pass this io over to the daemon */ | |
1267 | queue_bio(ms, bio, rw); | |
d2a7ad29 | 1268 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1269 | } |
1270 | ||
1271 | m = choose_mirror(ms, bio->bi_sector); | |
1272 | if (!m) | |
1273 | return -EIO; | |
1274 | ||
1275 | map_bio(ms, m, bio); | |
d2a7ad29 | 1276 | return DM_MAPIO_REMAPPED; |
1da177e4 LT |
1277 | } |
1278 | ||
1279 | static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |
1280 | int error, union map_info *map_context) | |
1281 | { | |
1282 | int rw = bio_rw(bio); | |
1283 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1284 | region_t region = map_context->ll; | |
1285 | ||
1286 | /* | |
1287 | * We need to dec pending if this was a write. | |
1288 | */ | |
1289 | if (rw == WRITE) | |
1290 | rh_dec(&ms->rh, region); | |
1291 | ||
1292 | return 0; | |
1293 | } | |
1294 | ||
1295 | static void mirror_postsuspend(struct dm_target *ti) | |
1296 | { | |
1297 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1298 | struct dirty_log *log = ms->rh.log; | |
1299 | ||
1300 | rh_stop_recovery(&ms->rh); | |
33184048 JB |
1301 | |
1302 | /* Wait for all I/O we generated to complete */ | |
1303 | wait_event(_kmirrord_recovery_stopped, | |
1304 | !atomic_read(&ms->rh.recovery_in_flight)); | |
1305 | ||
1da177e4 LT |
1306 | if (log->type->suspend && log->type->suspend(log)) |
1307 | /* FIXME: need better error handling */ | |
1308 | DMWARN("log suspend failed"); | |
1309 | } | |
1310 | ||
1311 | static void mirror_resume(struct dm_target *ti) | |
1312 | { | |
1313 | struct mirror_set *ms = (struct mirror_set *) ti->private; | |
1314 | struct dirty_log *log = ms->rh.log; | |
1315 | if (log->type->resume && log->type->resume(log)) | |
1316 | /* FIXME: need better error handling */ | |
1317 | DMWARN("log resume failed"); | |
1318 | rh_start_recovery(&ms->rh); | |
1319 | } | |
1320 | ||
1321 | static int mirror_status(struct dm_target *ti, status_type_t type, | |
1322 | char *result, unsigned int maxlen) | |
1323 | { | |
315dcc22 | 1324 | unsigned int m, sz = 0; |
1da177e4 LT |
1325 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
1326 | ||
1da177e4 LT |
1327 | switch (type) { |
1328 | case STATUSTYPE_INFO: | |
1329 | DMEMIT("%d ", ms->nr_mirrors); | |
1330 | for (m = 0; m < ms->nr_mirrors; m++) | |
1331 | DMEMIT("%s ", ms->mirror[m].dev->name); | |
1332 | ||
c95bc206 | 1333 | DMEMIT("%llu/%llu 0 ", |
4ee218cd AM |
1334 | (unsigned long long)ms->rh.log->type-> |
1335 | get_sync_count(ms->rh.log), | |
1336 | (unsigned long long)ms->nr_regions); | |
315dcc22 | 1337 | |
c95bc206 | 1338 | sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz); |
315dcc22 | 1339 | |
1da177e4 LT |
1340 | break; |
1341 | ||
1342 | case STATUSTYPE_TABLE: | |
315dcc22 JB |
1343 | sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); |
1344 | ||
e52b8f6d | 1345 | DMEMIT("%d", ms->nr_mirrors); |
1da177e4 | 1346 | for (m = 0; m < ms->nr_mirrors; m++) |
e52b8f6d | 1347 | DMEMIT(" %s %llu", ms->mirror[m].dev->name, |
4ee218cd | 1348 | (unsigned long long)ms->mirror[m].offset); |
a8e6afa2 JB |
1349 | |
1350 | if (ms->features & DM_RAID1_HANDLE_ERRORS) | |
1351 | DMEMIT(" 1 handle_errors"); | |
1da177e4 LT |
1352 | } |
1353 | ||
1354 | return 0; | |
1355 | } | |
1356 | ||
1357 | static struct target_type mirror_target = { | |
1358 | .name = "mirror", | |
315dcc22 | 1359 | .version = {1, 0, 3}, |
1da177e4 LT |
1360 | .module = THIS_MODULE, |
1361 | .ctr = mirror_ctr, | |
1362 | .dtr = mirror_dtr, | |
1363 | .map = mirror_map, | |
1364 | .end_io = mirror_end_io, | |
1365 | .postsuspend = mirror_postsuspend, | |
1366 | .resume = mirror_resume, | |
1367 | .status = mirror_status, | |
1368 | }; | |
1369 | ||
1370 | static int __init dm_mirror_init(void) | |
1371 | { | |
1372 | int r; | |
1373 | ||
1374 | r = dm_dirty_log_init(); | |
1375 | if (r) | |
1376 | return r; | |
1377 | ||
1da177e4 LT |
1378 | r = dm_register_target(&mirror_target); |
1379 | if (r < 0) { | |
0cd33124 | 1380 | DMERR("Failed to register mirror target"); |
1da177e4 | 1381 | dm_dirty_log_exit(); |
1da177e4 LT |
1382 | } |
1383 | ||
1384 | return r; | |
1385 | } | |
1386 | ||
1387 | static void __exit dm_mirror_exit(void) | |
1388 | { | |
1389 | int r; | |
1390 | ||
1391 | r = dm_unregister_target(&mirror_target); | |
1392 | if (r < 0) | |
0cd33124 | 1393 | DMERR("unregister failed %d", r); |
1da177e4 | 1394 | |
1da177e4 LT |
1395 | dm_dirty_log_exit(); |
1396 | } | |
1397 | ||
1398 | /* Module hooks */ | |
1399 | module_init(dm_mirror_init); | |
1400 | module_exit(dm_mirror_exit); | |
1401 | ||
1402 | MODULE_DESCRIPTION(DM_NAME " mirror target"); | |
1403 | MODULE_AUTHOR("Joe Thornber"); | |
1404 | MODULE_LICENSE("GPL"); |