]>
Commit | Line | Data |
---|---|---|
f6bed0ef SL |
1 | /* |
2 | * Copyright (C) 2015 Shaohua Li <shli@fb.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | */ | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/wait.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/raid/md_p.h> | |
5cb2fbd6 | 19 | #include <linux/crc32c.h> |
f6bed0ef SL |
20 | #include <linux/random.h> |
21 | #include "md.h" | |
22 | #include "raid5.h" | |
23 | ||
24 | /* | |
25 | * metadata/data stored in disk with 4k size unit (a block) regardless | |
26 | * underneath hardware sector size. only works with PAGE_SIZE == 4096 | |
27 | */ | |
28 | #define BLOCK_SECTORS (8) | |
29 | ||
0576b1c6 SL |
30 | /* |
31 | * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent | |
32 | * recovery scans a very long log | |
33 | */ | |
34 | #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ | |
35 | #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) | |
36 | ||
f6bed0ef SL |
37 | struct r5l_log { |
38 | struct md_rdev *rdev; | |
39 | ||
40 | u32 uuid_checksum; | |
41 | ||
42 | sector_t device_size; /* log device size, round to | |
43 | * BLOCK_SECTORS */ | |
0576b1c6 SL |
44 | sector_t max_free_space; /* reclaim run if free space is at |
45 | * this size */ | |
f6bed0ef SL |
46 | |
47 | sector_t last_checkpoint; /* log tail. where recovery scan | |
48 | * starts from */ | |
49 | u64 last_cp_seq; /* log tail sequence */ | |
50 | ||
51 | sector_t log_start; /* log head. where new data appends */ | |
52 | u64 seq; /* log head sequence */ | |
53 | ||
17036461 CH |
54 | sector_t next_checkpoint; |
55 | u64 next_cp_seq; | |
56 | ||
f6bed0ef SL |
57 | struct mutex io_mutex; |
58 | struct r5l_io_unit *current_io; /* current io_unit accepting new data */ | |
59 | ||
60 | spinlock_t io_list_lock; | |
61 | struct list_head running_ios; /* io_units which are still running, | |
62 | * and have not yet been completely | |
63 | * written to the log */ | |
64 | struct list_head io_end_ios; /* io_units which have been completely | |
65 | * written to the log but not yet written | |
66 | * to the RAID */ | |
a8c34f91 SL |
67 | struct list_head flushing_ios; /* io_units which are waiting for log |
68 | * cache flush */ | |
04732f74 | 69 | struct list_head finished_ios; /* io_units which settle down in log disk */ |
a8c34f91 | 70 | struct bio flush_bio; |
f6bed0ef SL |
71 | |
72 | struct kmem_cache *io_kc; | |
73 | ||
0576b1c6 SL |
74 | struct md_thread *reclaim_thread; |
75 | unsigned long reclaim_target; /* number of space that need to be | |
76 | * reclaimed. if it's 0, reclaim spaces | |
77 | * used by io_units which are in | |
78 | * IO_UNIT_STRIPE_END state (eg, reclaim | |
79 | * dones't wait for specific io_unit | |
80 | * switching to IO_UNIT_STRIPE_END | |
81 | * state) */ | |
0fd22b45 | 82 | wait_queue_head_t iounit_wait; |
0576b1c6 | 83 | |
f6bed0ef SL |
84 | struct list_head no_space_stripes; /* pending stripes, log has no space */ |
85 | spinlock_t no_space_stripes_lock; | |
56fef7c6 CH |
86 | |
87 | bool need_cache_flush; | |
f6bed0ef SL |
88 | }; |
89 | ||
90 | /* | |
91 | * an IO range starts from a meta data block and end at the next meta data | |
92 | * block. The io unit's the meta data block tracks data/parity followed it. io | |
93 | * unit is written to log disk with normal write, as we always flush log disk | |
94 | * first and then start move data to raid disks, there is no requirement to | |
95 | * write io unit with FLUSH/FUA | |
96 | */ | |
97 | struct r5l_io_unit { | |
98 | struct r5l_log *log; | |
99 | ||
100 | struct page *meta_page; /* store meta block */ | |
101 | int meta_offset; /* current offset in meta_page */ | |
102 | ||
103 | struct bio_list bios; | |
104 | atomic_t pending_io; /* pending bios not written to log yet */ | |
105 | struct bio *current_bio;/* current_bio accepting new data */ | |
106 | ||
107 | atomic_t pending_stripe;/* how many stripes not flushed to raid */ | |
108 | u64 seq; /* seq number of the metablock */ | |
109 | sector_t log_start; /* where the io_unit starts */ | |
110 | sector_t log_end; /* where the io_unit ends */ | |
111 | struct list_head log_sibling; /* log->running_ios */ | |
112 | struct list_head stripe_list; /* stripes added to the io_unit */ | |
113 | ||
114 | int state; | |
f6bed0ef SL |
115 | }; |
116 | ||
117 | /* r5l_io_unit state */ | |
118 | enum r5l_io_unit_state { | |
119 | IO_UNIT_RUNNING = 0, /* accepting new IO */ | |
120 | IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, | |
121 | * don't accepting new bio */ | |
122 | IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ | |
a8c34f91 | 123 | IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ |
f6bed0ef SL |
124 | }; |
125 | ||
126 | static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) | |
127 | { | |
128 | start += inc; | |
129 | if (start >= log->device_size) | |
130 | start = start - log->device_size; | |
131 | return start; | |
132 | } | |
133 | ||
134 | static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, | |
135 | sector_t end) | |
136 | { | |
137 | if (end >= start) | |
138 | return end - start; | |
139 | else | |
140 | return end + log->device_size - start; | |
141 | } | |
142 | ||
143 | static bool r5l_has_free_space(struct r5l_log *log, sector_t size) | |
144 | { | |
145 | sector_t used_size; | |
146 | ||
147 | used_size = r5l_ring_distance(log, log->last_checkpoint, | |
148 | log->log_start); | |
149 | ||
150 | return log->device_size > used_size + size; | |
151 | } | |
152 | ||
153 | static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log) | |
154 | { | |
155 | struct r5l_io_unit *io; | |
156 | /* We can't handle memory allocate failure so far */ | |
157 | gfp_t gfp = GFP_NOIO | __GFP_NOFAIL; | |
158 | ||
159 | io = kmem_cache_zalloc(log->io_kc, gfp); | |
160 | io->log = log; | |
161 | io->meta_page = alloc_page(gfp | __GFP_ZERO); | |
162 | ||
163 | bio_list_init(&io->bios); | |
164 | INIT_LIST_HEAD(&io->log_sibling); | |
165 | INIT_LIST_HEAD(&io->stripe_list); | |
166 | io->state = IO_UNIT_RUNNING; | |
f6bed0ef SL |
167 | return io; |
168 | } | |
169 | ||
170 | static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) | |
171 | { | |
172 | __free_page(io->meta_page); | |
173 | kmem_cache_free(log->io_kc, io); | |
174 | } | |
175 | ||
176 | static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, | |
177 | enum r5l_io_unit_state state) | |
178 | { | |
179 | struct r5l_io_unit *io; | |
180 | ||
181 | while (!list_empty(from)) { | |
182 | io = list_first_entry(from, struct r5l_io_unit, log_sibling); | |
183 | /* don't change list order */ | |
184 | if (io->state >= state) | |
185 | list_move_tail(&io->log_sibling, to); | |
186 | else | |
187 | break; | |
188 | } | |
189 | } | |
190 | ||
f6bed0ef SL |
191 | static void __r5l_set_io_unit_state(struct r5l_io_unit *io, |
192 | enum r5l_io_unit_state state) | |
193 | { | |
f6bed0ef SL |
194 | if (WARN_ON(io->state >= state)) |
195 | return; | |
196 | io->state = state; | |
f6bed0ef SL |
197 | } |
198 | ||
d8858f43 CH |
199 | static void r5l_io_run_stripes(struct r5l_io_unit *io) |
200 | { | |
201 | struct stripe_head *sh, *next; | |
202 | ||
203 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { | |
204 | list_del_init(&sh->log_list); | |
205 | set_bit(STRIPE_HANDLE, &sh->state); | |
206 | raid5_release_stripe(sh); | |
207 | } | |
208 | } | |
209 | ||
f6bed0ef | 210 | /* XXX: totally ignores I/O errors */ |
56fef7c6 CH |
211 | static void r5l_log_run_stripes(struct r5l_log *log) |
212 | { | |
213 | struct r5l_io_unit *io, *next; | |
214 | ||
215 | assert_spin_locked(&log->io_list_lock); | |
216 | ||
217 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { | |
218 | /* don't change list order */ | |
219 | if (io->state < IO_UNIT_IO_END) | |
220 | break; | |
221 | ||
222 | list_move_tail(&io->log_sibling, &log->finished_ios); | |
223 | r5l_io_run_stripes(io); | |
224 | } | |
225 | } | |
226 | ||
f6bed0ef SL |
227 | static void r5l_log_endio(struct bio *bio) |
228 | { | |
229 | struct r5l_io_unit *io = bio->bi_private; | |
230 | struct r5l_log *log = io->log; | |
509ffec7 | 231 | unsigned long flags; |
f6bed0ef SL |
232 | |
233 | bio_put(bio); | |
234 | ||
235 | if (!atomic_dec_and_test(&io->pending_io)) | |
236 | return; | |
237 | ||
509ffec7 CH |
238 | spin_lock_irqsave(&log->io_list_lock, flags); |
239 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | |
56fef7c6 CH |
240 | if (log->need_cache_flush) |
241 | r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, | |
242 | IO_UNIT_IO_END); | |
243 | else | |
244 | r5l_log_run_stripes(log); | |
509ffec7 CH |
245 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
246 | ||
56fef7c6 CH |
247 | if (log->need_cache_flush) |
248 | md_wakeup_thread(log->rdev->mddev->thread); | |
f6bed0ef SL |
249 | } |
250 | ||
251 | static void r5l_submit_current_io(struct r5l_log *log) | |
252 | { | |
253 | struct r5l_io_unit *io = log->current_io; | |
254 | struct r5l_meta_block *block; | |
255 | struct bio *bio; | |
509ffec7 | 256 | unsigned long flags; |
f6bed0ef SL |
257 | u32 crc; |
258 | ||
259 | if (!io) | |
260 | return; | |
261 | ||
262 | block = page_address(io->meta_page); | |
263 | block->meta_size = cpu_to_le32(io->meta_offset); | |
5cb2fbd6 | 264 | crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); |
f6bed0ef SL |
265 | block->checksum = cpu_to_le32(crc); |
266 | ||
267 | log->current_io = NULL; | |
509ffec7 CH |
268 | spin_lock_irqsave(&log->io_list_lock, flags); |
269 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); | |
270 | spin_unlock_irqrestore(&log->io_list_lock, flags); | |
f6bed0ef SL |
271 | |
272 | while ((bio = bio_list_pop(&io->bios))) { | |
273 | /* all IO must start from rdev->data_offset */ | |
274 | bio->bi_iter.bi_sector += log->rdev->data_offset; | |
275 | submit_bio(WRITE, bio); | |
276 | } | |
277 | } | |
278 | ||
279 | static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) | |
280 | { | |
281 | struct r5l_io_unit *io; | |
282 | struct r5l_meta_block *block; | |
283 | struct bio *bio; | |
284 | ||
285 | io = r5l_alloc_io_unit(log); | |
286 | ||
287 | block = page_address(io->meta_page); | |
288 | block->magic = cpu_to_le32(R5LOG_MAGIC); | |
289 | block->version = R5LOG_VERSION; | |
290 | block->seq = cpu_to_le64(log->seq); | |
291 | block->position = cpu_to_le64(log->log_start); | |
292 | ||
293 | io->log_start = log->log_start; | |
294 | io->meta_offset = sizeof(struct r5l_meta_block); | |
295 | io->seq = log->seq; | |
296 | ||
297 | bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); | |
298 | io->current_bio = bio; | |
299 | bio->bi_rw = WRITE; | |
300 | bio->bi_bdev = log->rdev->bdev; | |
301 | bio->bi_iter.bi_sector = log->log_start; | |
302 | bio_add_page(bio, io->meta_page, PAGE_SIZE, 0); | |
303 | bio->bi_end_io = r5l_log_endio; | |
304 | bio->bi_private = io; | |
305 | ||
306 | bio_list_add(&io->bios, bio); | |
307 | atomic_inc(&io->pending_io); | |
308 | ||
309 | log->seq++; | |
310 | log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); | |
311 | io->log_end = log->log_start; | |
312 | /* current bio hit disk end */ | |
313 | if (log->log_start == 0) | |
314 | io->current_bio = NULL; | |
315 | ||
316 | spin_lock_irq(&log->io_list_lock); | |
317 | list_add_tail(&io->log_sibling, &log->running_ios); | |
318 | spin_unlock_irq(&log->io_list_lock); | |
319 | ||
320 | return io; | |
321 | } | |
322 | ||
323 | static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) | |
324 | { | |
325 | struct r5l_io_unit *io; | |
326 | ||
327 | io = log->current_io; | |
328 | if (io && io->meta_offset + payload_size > PAGE_SIZE) | |
329 | r5l_submit_current_io(log); | |
330 | io = log->current_io; | |
331 | if (io) | |
332 | return 0; | |
333 | ||
334 | log->current_io = r5l_new_meta(log); | |
335 | return 0; | |
336 | } | |
337 | ||
338 | static void r5l_append_payload_meta(struct r5l_log *log, u16 type, | |
339 | sector_t location, | |
340 | u32 checksum1, u32 checksum2, | |
341 | bool checksum2_valid) | |
342 | { | |
343 | struct r5l_io_unit *io = log->current_io; | |
344 | struct r5l_payload_data_parity *payload; | |
345 | ||
346 | payload = page_address(io->meta_page) + io->meta_offset; | |
347 | payload->header.type = cpu_to_le16(type); | |
348 | payload->header.flags = cpu_to_le16(0); | |
349 | payload->size = cpu_to_le32((1 + !!checksum2_valid) << | |
350 | (PAGE_SHIFT - 9)); | |
351 | payload->location = cpu_to_le64(location); | |
352 | payload->checksum[0] = cpu_to_le32(checksum1); | |
353 | if (checksum2_valid) | |
354 | payload->checksum[1] = cpu_to_le32(checksum2); | |
355 | ||
356 | io->meta_offset += sizeof(struct r5l_payload_data_parity) + | |
357 | sizeof(__le32) * (1 + !!checksum2_valid); | |
358 | } | |
359 | ||
360 | static void r5l_append_payload_page(struct r5l_log *log, struct page *page) | |
361 | { | |
362 | struct r5l_io_unit *io = log->current_io; | |
363 | ||
364 | alloc_bio: | |
365 | if (!io->current_bio) { | |
366 | struct bio *bio; | |
367 | ||
368 | bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); | |
369 | bio->bi_rw = WRITE; | |
370 | bio->bi_bdev = log->rdev->bdev; | |
371 | bio->bi_iter.bi_sector = log->log_start; | |
372 | bio->bi_end_io = r5l_log_endio; | |
373 | bio->bi_private = io; | |
374 | bio_list_add(&io->bios, bio); | |
375 | atomic_inc(&io->pending_io); | |
376 | io->current_bio = bio; | |
377 | } | |
378 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) { | |
379 | io->current_bio = NULL; | |
380 | goto alloc_bio; | |
381 | } | |
382 | log->log_start = r5l_ring_add(log, log->log_start, | |
383 | BLOCK_SECTORS); | |
384 | /* current bio hit disk end */ | |
385 | if (log->log_start == 0) | |
386 | io->current_bio = NULL; | |
387 | ||
388 | io->log_end = log->log_start; | |
389 | } | |
390 | ||
391 | static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, | |
392 | int data_pages, int parity_pages) | |
393 | { | |
394 | int i; | |
395 | int meta_size; | |
396 | struct r5l_io_unit *io; | |
397 | ||
398 | meta_size = | |
399 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) | |
400 | * data_pages) + | |
401 | sizeof(struct r5l_payload_data_parity) + | |
402 | sizeof(__le32) * parity_pages; | |
403 | ||
404 | r5l_get_meta(log, meta_size); | |
405 | io = log->current_io; | |
406 | ||
407 | for (i = 0; i < sh->disks; i++) { | |
408 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) | |
409 | continue; | |
410 | if (i == sh->pd_idx || i == sh->qd_idx) | |
411 | continue; | |
412 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, | |
413 | raid5_compute_blocknr(sh, i, 0), | |
414 | sh->dev[i].log_checksum, 0, false); | |
415 | r5l_append_payload_page(log, sh->dev[i].page); | |
416 | } | |
417 | ||
418 | if (sh->qd_idx >= 0) { | |
419 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | |
420 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | |
421 | sh->dev[sh->qd_idx].log_checksum, true); | |
422 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | |
423 | r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); | |
424 | } else { | |
425 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | |
426 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | |
427 | 0, false); | |
428 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | |
429 | } | |
430 | ||
431 | list_add_tail(&sh->log_list, &io->stripe_list); | |
432 | atomic_inc(&io->pending_stripe); | |
433 | sh->log_io = io; | |
434 | } | |
435 | ||
509ffec7 | 436 | static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); |
f6bed0ef SL |
437 | /* |
438 | * running in raid5d, where reclaim could wait for raid5d too (when it flushes | |
439 | * data from log to raid disks), so we shouldn't wait for reclaim here | |
440 | */ | |
441 | int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) | |
442 | { | |
443 | int write_disks = 0; | |
444 | int data_pages, parity_pages; | |
445 | int meta_size; | |
446 | int reserve; | |
447 | int i; | |
448 | ||
449 | if (!log) | |
450 | return -EAGAIN; | |
451 | /* Don't support stripe batch */ | |
452 | if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || | |
453 | test_bit(STRIPE_SYNCING, &sh->state)) { | |
454 | /* the stripe is written to log, we start writing it to raid */ | |
455 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); | |
456 | return -EAGAIN; | |
457 | } | |
458 | ||
459 | for (i = 0; i < sh->disks; i++) { | |
460 | void *addr; | |
461 | ||
462 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) | |
463 | continue; | |
464 | write_disks++; | |
465 | /* checksum is already calculated in last run */ | |
466 | if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) | |
467 | continue; | |
468 | addr = kmap_atomic(sh->dev[i].page); | |
5cb2fbd6 SL |
469 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, |
470 | addr, PAGE_SIZE); | |
f6bed0ef SL |
471 | kunmap_atomic(addr); |
472 | } | |
473 | parity_pages = 1 + !!(sh->qd_idx >= 0); | |
474 | data_pages = write_disks - parity_pages; | |
475 | ||
476 | meta_size = | |
477 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) | |
478 | * data_pages) + | |
479 | sizeof(struct r5l_payload_data_parity) + | |
480 | sizeof(__le32) * parity_pages; | |
481 | /* Doesn't work with very big raid array */ | |
482 | if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE) | |
483 | return -EINVAL; | |
484 | ||
485 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); | |
253f9fd4 SL |
486 | /* |
487 | * The stripe must enter state machine again to finish the write, so | |
488 | * don't delay. | |
489 | */ | |
490 | clear_bit(STRIPE_DELAYED, &sh->state); | |
f6bed0ef SL |
491 | atomic_inc(&sh->count); |
492 | ||
493 | mutex_lock(&log->io_mutex); | |
494 | /* meta + data */ | |
495 | reserve = (1 + write_disks) << (PAGE_SHIFT - 9); | |
496 | if (r5l_has_free_space(log, reserve)) | |
497 | r5l_log_stripe(log, sh, data_pages, parity_pages); | |
498 | else { | |
499 | spin_lock(&log->no_space_stripes_lock); | |
500 | list_add_tail(&sh->log_list, &log->no_space_stripes); | |
501 | spin_unlock(&log->no_space_stripes_lock); | |
502 | ||
503 | r5l_wake_reclaim(log, reserve); | |
504 | } | |
505 | mutex_unlock(&log->io_mutex); | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
510 | void r5l_write_stripe_run(struct r5l_log *log) | |
511 | { | |
512 | if (!log) | |
513 | return; | |
514 | mutex_lock(&log->io_mutex); | |
515 | r5l_submit_current_io(log); | |
516 | mutex_unlock(&log->io_mutex); | |
517 | } | |
518 | ||
828cbe98 SL |
519 | int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) |
520 | { | |
521 | if (!log) | |
522 | return -ENODEV; | |
523 | /* | |
524 | * we flush log disk cache first, then write stripe data to raid disks. | |
525 | * So if bio is finished, the log disk cache is flushed already. The | |
526 | * recovery guarantees we can recovery the bio from log disk, so we | |
527 | * don't need to flush again | |
528 | */ | |
529 | if (bio->bi_iter.bi_size == 0) { | |
530 | bio_endio(bio); | |
531 | return 0; | |
532 | } | |
533 | bio->bi_rw &= ~REQ_FLUSH; | |
534 | return -EAGAIN; | |
535 | } | |
536 | ||
f6bed0ef SL |
537 | /* This will run after log space is reclaimed */ |
538 | static void r5l_run_no_space_stripes(struct r5l_log *log) | |
539 | { | |
540 | struct stripe_head *sh; | |
541 | ||
542 | spin_lock(&log->no_space_stripes_lock); | |
543 | while (!list_empty(&log->no_space_stripes)) { | |
544 | sh = list_first_entry(&log->no_space_stripes, | |
545 | struct stripe_head, log_list); | |
546 | list_del_init(&sh->log_list); | |
547 | set_bit(STRIPE_HANDLE, &sh->state); | |
548 | raid5_release_stripe(sh); | |
549 | } | |
550 | spin_unlock(&log->no_space_stripes_lock); | |
551 | } | |
552 | ||
17036461 CH |
553 | static sector_t r5l_reclaimable_space(struct r5l_log *log) |
554 | { | |
555 | return r5l_ring_distance(log, log->last_checkpoint, | |
556 | log->next_checkpoint); | |
557 | } | |
558 | ||
04732f74 | 559 | static bool r5l_complete_finished_ios(struct r5l_log *log) |
17036461 CH |
560 | { |
561 | struct r5l_io_unit *io, *next; | |
562 | bool found = false; | |
563 | ||
564 | assert_spin_locked(&log->io_list_lock); | |
565 | ||
04732f74 | 566 | list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { |
17036461 CH |
567 | /* don't change list order */ |
568 | if (io->state < IO_UNIT_STRIPE_END) | |
569 | break; | |
570 | ||
571 | log->next_checkpoint = io->log_start; | |
572 | log->next_cp_seq = io->seq; | |
573 | ||
574 | list_del(&io->log_sibling); | |
575 | r5l_free_io_unit(log, io); | |
576 | ||
577 | found = true; | |
578 | } | |
579 | ||
580 | return found; | |
581 | } | |
582 | ||
509ffec7 CH |
583 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io) |
584 | { | |
585 | struct r5l_log *log = io->log; | |
509ffec7 CH |
586 | unsigned long flags; |
587 | ||
588 | spin_lock_irqsave(&log->io_list_lock, flags); | |
589 | __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); | |
17036461 | 590 | |
04732f74 | 591 | if (!r5l_complete_finished_ios(log)) { |
85f2f9a4 SL |
592 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
593 | return; | |
594 | } | |
509ffec7 | 595 | |
17036461 | 596 | if (r5l_reclaimable_space(log) > log->max_free_space) |
509ffec7 CH |
597 | r5l_wake_reclaim(log, 0); |
598 | ||
509ffec7 CH |
599 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
600 | wake_up(&log->iounit_wait); | |
601 | } | |
602 | ||
0576b1c6 SL |
603 | void r5l_stripe_write_finished(struct stripe_head *sh) |
604 | { | |
605 | struct r5l_io_unit *io; | |
606 | ||
0576b1c6 | 607 | io = sh->log_io; |
0576b1c6 SL |
608 | sh->log_io = NULL; |
609 | ||
509ffec7 CH |
610 | if (io && atomic_dec_and_test(&io->pending_stripe)) |
611 | __r5l_stripe_write_finished(io); | |
0576b1c6 SL |
612 | } |
613 | ||
a8c34f91 SL |
614 | static void r5l_log_flush_endio(struct bio *bio) |
615 | { | |
616 | struct r5l_log *log = container_of(bio, struct r5l_log, | |
617 | flush_bio); | |
618 | unsigned long flags; | |
619 | struct r5l_io_unit *io; | |
a8c34f91 SL |
620 | |
621 | spin_lock_irqsave(&log->io_list_lock, flags); | |
d8858f43 CH |
622 | list_for_each_entry(io, &log->flushing_ios, log_sibling) |
623 | r5l_io_run_stripes(io); | |
04732f74 | 624 | list_splice_tail_init(&log->flushing_ios, &log->finished_ios); |
a8c34f91 SL |
625 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
626 | } | |
627 | ||
0576b1c6 SL |
628 | /* |
629 | * Starting dispatch IO to raid. | |
630 | * io_unit(meta) consists of a log. There is one situation we want to avoid. A | |
631 | * broken meta in the middle of a log causes recovery can't find meta at the | |
632 | * head of log. If operations require meta at the head persistent in log, we | |
633 | * must make sure meta before it persistent in log too. A case is: | |
634 | * | |
635 | * stripe data/parity is in log, we start write stripe to raid disks. stripe | |
636 | * data/parity must be persistent in log before we do the write to raid disks. | |
637 | * | |
638 | * The solution is we restrictly maintain io_unit list order. In this case, we | |
639 | * only write stripes of an io_unit to raid disks till the io_unit is the first | |
640 | * one whose data/parity is in log. | |
641 | */ | |
642 | void r5l_flush_stripe_to_raid(struct r5l_log *log) | |
643 | { | |
a8c34f91 | 644 | bool do_flush; |
56fef7c6 CH |
645 | |
646 | if (!log || !log->need_cache_flush) | |
0576b1c6 | 647 | return; |
0576b1c6 SL |
648 | |
649 | spin_lock_irq(&log->io_list_lock); | |
a8c34f91 SL |
650 | /* flush bio is running */ |
651 | if (!list_empty(&log->flushing_ios)) { | |
652 | spin_unlock_irq(&log->io_list_lock); | |
653 | return; | |
0576b1c6 | 654 | } |
a8c34f91 SL |
655 | list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); |
656 | do_flush = !list_empty(&log->flushing_ios); | |
0576b1c6 | 657 | spin_unlock_irq(&log->io_list_lock); |
a8c34f91 SL |
658 | |
659 | if (!do_flush) | |
660 | return; | |
661 | bio_reset(&log->flush_bio); | |
662 | log->flush_bio.bi_bdev = log->rdev->bdev; | |
663 | log->flush_bio.bi_end_io = r5l_log_flush_endio; | |
664 | submit_bio(WRITE_FLUSH, &log->flush_bio); | |
0576b1c6 SL |
665 | } |
666 | ||
0576b1c6 SL |
667 | static void r5l_write_super(struct r5l_log *log, sector_t cp); |
668 | static void r5l_do_reclaim(struct r5l_log *log) | |
669 | { | |
0576b1c6 | 670 | sector_t reclaim_target = xchg(&log->reclaim_target, 0); |
17036461 CH |
671 | sector_t reclaimable; |
672 | sector_t next_checkpoint; | |
673 | u64 next_cp_seq; | |
0576b1c6 SL |
674 | |
675 | spin_lock_irq(&log->io_list_lock); | |
676 | /* | |
677 | * move proper io_unit to reclaim list. We should not change the order. | |
678 | * reclaimable/unreclaimable io_unit can be mixed in the list, we | |
679 | * shouldn't reuse space of an unreclaimable io_unit | |
680 | */ | |
681 | while (1) { | |
17036461 CH |
682 | reclaimable = r5l_reclaimable_space(log); |
683 | if (reclaimable >= reclaim_target || | |
0576b1c6 SL |
684 | (list_empty(&log->running_ios) && |
685 | list_empty(&log->io_end_ios) && | |
a8c34f91 | 686 | list_empty(&log->flushing_ios) && |
04732f74 | 687 | list_empty(&log->finished_ios))) |
0576b1c6 SL |
688 | break; |
689 | ||
17036461 CH |
690 | md_wakeup_thread(log->rdev->mddev->thread); |
691 | wait_event_lock_irq(log->iounit_wait, | |
692 | r5l_reclaimable_space(log) > reclaimable, | |
693 | log->io_list_lock); | |
0576b1c6 | 694 | } |
17036461 CH |
695 | |
696 | next_checkpoint = log->next_checkpoint; | |
697 | next_cp_seq = log->next_cp_seq; | |
0576b1c6 SL |
698 | spin_unlock_irq(&log->io_list_lock); |
699 | ||
17036461 CH |
700 | BUG_ON(reclaimable < 0); |
701 | if (reclaimable == 0) | |
0576b1c6 SL |
702 | return; |
703 | ||
0576b1c6 SL |
704 | /* |
705 | * write_super will flush cache of each raid disk. We must write super | |
706 | * here, because the log area might be reused soon and we don't want to | |
707 | * confuse recovery | |
708 | */ | |
17036461 | 709 | r5l_write_super(log, next_checkpoint); |
0576b1c6 SL |
710 | |
711 | mutex_lock(&log->io_mutex); | |
17036461 CH |
712 | log->last_checkpoint = next_checkpoint; |
713 | log->last_cp_seq = next_cp_seq; | |
0576b1c6 | 714 | mutex_unlock(&log->io_mutex); |
0576b1c6 | 715 | |
17036461 | 716 | r5l_run_no_space_stripes(log); |
0576b1c6 SL |
717 | } |
718 | ||
719 | static void r5l_reclaim_thread(struct md_thread *thread) | |
720 | { | |
721 | struct mddev *mddev = thread->mddev; | |
722 | struct r5conf *conf = mddev->private; | |
723 | struct r5l_log *log = conf->log; | |
724 | ||
725 | if (!log) | |
726 | return; | |
727 | r5l_do_reclaim(log); | |
728 | } | |
729 | ||
f6bed0ef SL |
730 | static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) |
731 | { | |
0576b1c6 SL |
732 | unsigned long target; |
733 | unsigned long new = (unsigned long)space; /* overflow in theory */ | |
734 | ||
735 | do { | |
736 | target = log->reclaim_target; | |
737 | if (new < target) | |
738 | return; | |
739 | } while (cmpxchg(&log->reclaim_target, target, new) != target); | |
740 | md_wakeup_thread(log->reclaim_thread); | |
f6bed0ef SL |
741 | } |
742 | ||
e6c033f7 SL |
743 | void r5l_quiesce(struct r5l_log *log, int state) |
744 | { | |
745 | if (!log || state == 2) | |
746 | return; | |
747 | if (state == 0) { | |
748 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, | |
749 | log->rdev->mddev, "reclaim"); | |
750 | } else if (state == 1) { | |
751 | /* | |
752 | * at this point all stripes are finished, so io_unit is at | |
753 | * least in STRIPE_END state | |
754 | */ | |
755 | r5l_wake_reclaim(log, -1L); | |
756 | md_unregister_thread(&log->reclaim_thread); | |
757 | r5l_do_reclaim(log); | |
758 | } | |
759 | } | |
760 | ||
355810d1 SL |
761 | struct r5l_recovery_ctx { |
762 | struct page *meta_page; /* current meta */ | |
763 | sector_t meta_total_blocks; /* total size of current meta and data */ | |
764 | sector_t pos; /* recovery position */ | |
765 | u64 seq; /* recovery position seq */ | |
766 | }; | |
767 | ||
768 | static int r5l_read_meta_block(struct r5l_log *log, | |
769 | struct r5l_recovery_ctx *ctx) | |
770 | { | |
771 | struct page *page = ctx->meta_page; | |
772 | struct r5l_meta_block *mb; | |
773 | u32 crc, stored_crc; | |
774 | ||
775 | if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) | |
776 | return -EIO; | |
777 | ||
778 | mb = page_address(page); | |
779 | stored_crc = le32_to_cpu(mb->checksum); | |
780 | mb->checksum = 0; | |
781 | ||
782 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | |
783 | le64_to_cpu(mb->seq) != ctx->seq || | |
784 | mb->version != R5LOG_VERSION || | |
785 | le64_to_cpu(mb->position) != ctx->pos) | |
786 | return -EINVAL; | |
787 | ||
5cb2fbd6 | 788 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
355810d1 SL |
789 | if (stored_crc != crc) |
790 | return -EINVAL; | |
791 | ||
792 | if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) | |
793 | return -EINVAL; | |
794 | ||
795 | ctx->meta_total_blocks = BLOCK_SECTORS; | |
796 | ||
797 | return 0; | |
798 | } | |
799 | ||
800 | static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |
801 | struct r5l_recovery_ctx *ctx, | |
802 | sector_t stripe_sect, | |
803 | int *offset, sector_t *log_offset) | |
804 | { | |
805 | struct r5conf *conf = log->rdev->mddev->private; | |
806 | struct stripe_head *sh; | |
807 | struct r5l_payload_data_parity *payload; | |
808 | int disk_index; | |
809 | ||
810 | sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); | |
811 | while (1) { | |
812 | payload = page_address(ctx->meta_page) + *offset; | |
813 | ||
814 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { | |
815 | raid5_compute_sector(conf, | |
816 | le64_to_cpu(payload->location), 0, | |
817 | &disk_index, sh); | |
818 | ||
819 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | |
820 | sh->dev[disk_index].page, READ, false); | |
821 | sh->dev[disk_index].log_checksum = | |
822 | le32_to_cpu(payload->checksum[0]); | |
823 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | |
824 | ctx->meta_total_blocks += BLOCK_SECTORS; | |
825 | } else { | |
826 | disk_index = sh->pd_idx; | |
827 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | |
828 | sh->dev[disk_index].page, READ, false); | |
829 | sh->dev[disk_index].log_checksum = | |
830 | le32_to_cpu(payload->checksum[0]); | |
831 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | |
832 | ||
833 | if (sh->qd_idx >= 0) { | |
834 | disk_index = sh->qd_idx; | |
835 | sync_page_io(log->rdev, | |
836 | r5l_ring_add(log, *log_offset, BLOCK_SECTORS), | |
837 | PAGE_SIZE, sh->dev[disk_index].page, | |
838 | READ, false); | |
839 | sh->dev[disk_index].log_checksum = | |
840 | le32_to_cpu(payload->checksum[1]); | |
841 | set_bit(R5_Wantwrite, | |
842 | &sh->dev[disk_index].flags); | |
843 | } | |
844 | ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; | |
845 | } | |
846 | ||
847 | *log_offset = r5l_ring_add(log, *log_offset, | |
848 | le32_to_cpu(payload->size)); | |
849 | *offset += sizeof(struct r5l_payload_data_parity) + | |
850 | sizeof(__le32) * | |
851 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); | |
852 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) | |
853 | break; | |
854 | } | |
855 | ||
856 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | |
857 | void *addr; | |
858 | u32 checksum; | |
859 | ||
860 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) | |
861 | continue; | |
862 | addr = kmap_atomic(sh->dev[disk_index].page); | |
5cb2fbd6 | 863 | checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); |
355810d1 SL |
864 | kunmap_atomic(addr); |
865 | if (checksum != sh->dev[disk_index].log_checksum) | |
866 | goto error; | |
867 | } | |
868 | ||
869 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | |
870 | struct md_rdev *rdev, *rrdev; | |
871 | ||
872 | if (!test_and_clear_bit(R5_Wantwrite, | |
873 | &sh->dev[disk_index].flags)) | |
874 | continue; | |
875 | ||
876 | /* in case device is broken */ | |
877 | rdev = rcu_dereference(conf->disks[disk_index].rdev); | |
878 | if (rdev) | |
879 | sync_page_io(rdev, stripe_sect, PAGE_SIZE, | |
880 | sh->dev[disk_index].page, WRITE, false); | |
881 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); | |
882 | if (rrdev) | |
883 | sync_page_io(rrdev, stripe_sect, PAGE_SIZE, | |
884 | sh->dev[disk_index].page, WRITE, false); | |
885 | } | |
886 | raid5_release_stripe(sh); | |
887 | return 0; | |
888 | ||
889 | error: | |
890 | for (disk_index = 0; disk_index < sh->disks; disk_index++) | |
891 | sh->dev[disk_index].flags = 0; | |
892 | raid5_release_stripe(sh); | |
893 | return -EINVAL; | |
894 | } | |
895 | ||
896 | static int r5l_recovery_flush_one_meta(struct r5l_log *log, | |
897 | struct r5l_recovery_ctx *ctx) | |
898 | { | |
899 | struct r5conf *conf = log->rdev->mddev->private; | |
900 | struct r5l_payload_data_parity *payload; | |
901 | struct r5l_meta_block *mb; | |
902 | int offset; | |
903 | sector_t log_offset; | |
904 | sector_t stripe_sector; | |
905 | ||
906 | mb = page_address(ctx->meta_page); | |
907 | offset = sizeof(struct r5l_meta_block); | |
908 | log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | |
909 | ||
910 | while (offset < le32_to_cpu(mb->meta_size)) { | |
911 | int dd; | |
912 | ||
913 | payload = (void *)mb + offset; | |
914 | stripe_sector = raid5_compute_sector(conf, | |
915 | le64_to_cpu(payload->location), 0, &dd, NULL); | |
916 | if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, | |
917 | &offset, &log_offset)) | |
918 | return -EINVAL; | |
919 | } | |
920 | return 0; | |
921 | } | |
922 | ||
923 | /* copy data/parity from log to raid disks */ | |
924 | static void r5l_recovery_flush_log(struct r5l_log *log, | |
925 | struct r5l_recovery_ctx *ctx) | |
926 | { | |
927 | while (1) { | |
928 | if (r5l_read_meta_block(log, ctx)) | |
929 | return; | |
930 | if (r5l_recovery_flush_one_meta(log, ctx)) | |
931 | return; | |
932 | ctx->seq++; | |
933 | ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); | |
934 | } | |
935 | } | |
936 | ||
937 | static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |
938 | u64 seq) | |
939 | { | |
940 | struct page *page; | |
941 | struct r5l_meta_block *mb; | |
942 | u32 crc; | |
943 | ||
944 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
945 | if (!page) | |
946 | return -ENOMEM; | |
947 | mb = page_address(page); | |
948 | mb->magic = cpu_to_le32(R5LOG_MAGIC); | |
949 | mb->version = R5LOG_VERSION; | |
950 | mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); | |
951 | mb->seq = cpu_to_le64(seq); | |
952 | mb->position = cpu_to_le64(pos); | |
5cb2fbd6 | 953 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
355810d1 SL |
954 | mb->checksum = cpu_to_le32(crc); |
955 | ||
956 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { | |
957 | __free_page(page); | |
958 | return -EIO; | |
959 | } | |
960 | __free_page(page); | |
961 | return 0; | |
962 | } | |
963 | ||
f6bed0ef SL |
964 | static int r5l_recovery_log(struct r5l_log *log) |
965 | { | |
355810d1 SL |
966 | struct r5l_recovery_ctx ctx; |
967 | ||
968 | ctx.pos = log->last_checkpoint; | |
969 | ctx.seq = log->last_cp_seq; | |
970 | ctx.meta_page = alloc_page(GFP_KERNEL); | |
971 | if (!ctx.meta_page) | |
972 | return -ENOMEM; | |
973 | ||
974 | r5l_recovery_flush_log(log, &ctx); | |
975 | __free_page(ctx.meta_page); | |
976 | ||
977 | /* | |
978 | * we did a recovery. Now ctx.pos points to an invalid meta block. New | |
979 | * log will start here. but we can't let superblock point to last valid | |
980 | * meta block. The log might looks like: | |
981 | * | meta 1| meta 2| meta 3| | |
982 | * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If | |
983 | * superblock points to meta 1, we write a new valid meta 2n. if crash | |
984 | * happens again, new recovery will start from meta 1. Since meta 2n is | |
985 | * valid now, recovery will think meta 3 is valid, which is wrong. | |
986 | * The solution is we create a new meta in meta2 with its seq == meta | |
987 | * 1's seq + 10 and let superblock points to meta2. The same recovery will | |
988 | * not think meta 3 is a valid meta, because its seq doesn't match | |
989 | */ | |
990 | if (ctx.seq > log->last_cp_seq + 1) { | |
991 | int ret; | |
992 | ||
993 | ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); | |
994 | if (ret) | |
995 | return ret; | |
996 | log->seq = ctx.seq + 11; | |
997 | log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); | |
998 | r5l_write_super(log, ctx.pos); | |
999 | } else { | |
1000 | log->log_start = ctx.pos; | |
1001 | log->seq = ctx.seq; | |
1002 | } | |
f6bed0ef SL |
1003 | return 0; |
1004 | } | |
1005 | ||
1006 | static void r5l_write_super(struct r5l_log *log, sector_t cp) | |
1007 | { | |
1008 | struct mddev *mddev = log->rdev->mddev; | |
1009 | ||
1010 | log->rdev->journal_tail = cp; | |
1011 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
1012 | } | |
1013 | ||
1014 | static int r5l_load_log(struct r5l_log *log) | |
1015 | { | |
1016 | struct md_rdev *rdev = log->rdev; | |
1017 | struct page *page; | |
1018 | struct r5l_meta_block *mb; | |
1019 | sector_t cp = log->rdev->journal_tail; | |
1020 | u32 stored_crc, expected_crc; | |
1021 | bool create_super = false; | |
1022 | int ret; | |
1023 | ||
1024 | /* Make sure it's valid */ | |
1025 | if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) | |
1026 | cp = 0; | |
1027 | page = alloc_page(GFP_KERNEL); | |
1028 | if (!page) | |
1029 | return -ENOMEM; | |
1030 | ||
1031 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { | |
1032 | ret = -EIO; | |
1033 | goto ioerr; | |
1034 | } | |
1035 | mb = page_address(page); | |
1036 | ||
1037 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | |
1038 | mb->version != R5LOG_VERSION) { | |
1039 | create_super = true; | |
1040 | goto create; | |
1041 | } | |
1042 | stored_crc = le32_to_cpu(mb->checksum); | |
1043 | mb->checksum = 0; | |
5cb2fbd6 | 1044 | expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
f6bed0ef SL |
1045 | if (stored_crc != expected_crc) { |
1046 | create_super = true; | |
1047 | goto create; | |
1048 | } | |
1049 | if (le64_to_cpu(mb->position) != cp) { | |
1050 | create_super = true; | |
1051 | goto create; | |
1052 | } | |
1053 | create: | |
1054 | if (create_super) { | |
1055 | log->last_cp_seq = prandom_u32(); | |
1056 | cp = 0; | |
1057 | /* | |
1058 | * Make sure super points to correct address. Log might have | |
1059 | * data very soon. If super hasn't correct log tail address, | |
1060 | * recovery can't find the log | |
1061 | */ | |
1062 | r5l_write_super(log, cp); | |
1063 | } else | |
1064 | log->last_cp_seq = le64_to_cpu(mb->seq); | |
1065 | ||
1066 | log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); | |
0576b1c6 SL |
1067 | log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; |
1068 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) | |
1069 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; | |
f6bed0ef SL |
1070 | log->last_checkpoint = cp; |
1071 | ||
1072 | __free_page(page); | |
1073 | ||
1074 | return r5l_recovery_log(log); | |
1075 | ioerr: | |
1076 | __free_page(page); | |
1077 | return ret; | |
1078 | } | |
1079 | ||
1080 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | |
1081 | { | |
1082 | struct r5l_log *log; | |
1083 | ||
1084 | if (PAGE_SIZE != 4096) | |
1085 | return -EINVAL; | |
1086 | log = kzalloc(sizeof(*log), GFP_KERNEL); | |
1087 | if (!log) | |
1088 | return -ENOMEM; | |
1089 | log->rdev = rdev; | |
1090 | ||
56fef7c6 CH |
1091 | log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0); |
1092 | ||
5cb2fbd6 SL |
1093 | log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, |
1094 | sizeof(rdev->mddev->uuid)); | |
f6bed0ef SL |
1095 | |
1096 | mutex_init(&log->io_mutex); | |
1097 | ||
1098 | spin_lock_init(&log->io_list_lock); | |
1099 | INIT_LIST_HEAD(&log->running_ios); | |
0576b1c6 | 1100 | INIT_LIST_HEAD(&log->io_end_ios); |
a8c34f91 | 1101 | INIT_LIST_HEAD(&log->flushing_ios); |
04732f74 | 1102 | INIT_LIST_HEAD(&log->finished_ios); |
a8c34f91 | 1103 | bio_init(&log->flush_bio); |
f6bed0ef SL |
1104 | |
1105 | log->io_kc = KMEM_CACHE(r5l_io_unit, 0); | |
1106 | if (!log->io_kc) | |
1107 | goto io_kc; | |
1108 | ||
0576b1c6 SL |
1109 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, |
1110 | log->rdev->mddev, "reclaim"); | |
1111 | if (!log->reclaim_thread) | |
1112 | goto reclaim_thread; | |
0fd22b45 | 1113 | init_waitqueue_head(&log->iounit_wait); |
0576b1c6 | 1114 | |
f6bed0ef SL |
1115 | INIT_LIST_HEAD(&log->no_space_stripes); |
1116 | spin_lock_init(&log->no_space_stripes_lock); | |
1117 | ||
1118 | if (r5l_load_log(log)) | |
1119 | goto error; | |
1120 | ||
1121 | conf->log = log; | |
1122 | return 0; | |
1123 | error: | |
0576b1c6 SL |
1124 | md_unregister_thread(&log->reclaim_thread); |
1125 | reclaim_thread: | |
f6bed0ef SL |
1126 | kmem_cache_destroy(log->io_kc); |
1127 | io_kc: | |
1128 | kfree(log); | |
1129 | return -EINVAL; | |
1130 | } | |
1131 | ||
1132 | void r5l_exit_log(struct r5l_log *log) | |
1133 | { | |
0576b1c6 | 1134 | md_unregister_thread(&log->reclaim_thread); |
f6bed0ef SL |
1135 | kmem_cache_destroy(log->io_kc); |
1136 | kfree(log); | |
1137 | } |