2 * Partial Parity Log for closing the RAID5 write hole
3 * Copyright (c) 2017, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/crc32c.h>
19 #include <linux/async_tx.h>
20 #include <linux/raid/md_p.h>
23 #include "raid5-log.h"
26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
27 * partial parity data. The header contains an array of entries
28 * (struct ppl_header_entry) which describe the logged write requests.
29 * Partial parity for the entries comes after the header, written in the same
30 * sequence as the entries:
41 * An entry describes one or more consecutive stripe_heads, up to a full
42 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
43 * number of stripe_heads in the entry and n is the number of modified data
44 * disks. Every stripe_head in the entry must write to the same data disks.
45 * An example of a valid case described by a single entry (writes to the first
46 * stripe of a 4 disk array, 16k chunk size):
48 * sh->sector dd0 dd1 dd2 ppl
50 * 0 | --- | --- | --- | +----+
51 * 8 | -W- | -W- | --- | | pp | data_sector = 8
52 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k
53 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k
54 * +-----+-----+-----+ +----+
56 * data_sector is the first raid sector of the modified data, data_size is the
57 * total size of modified data and pp_size is the size of partial parity for
58 * this entry. Entries for full stripe writes contain no partial parity
59 * (pp_size = 0), they only mark the stripes for which parity should be
60 * recalculated after an unclean shutdown. Every entry holds a checksum of its
61 * partial parity, the header also has a checksum of the header itself.
63 * A write request is always logged to the PPL instance stored on the parity
64 * disk of the corresponding stripe. For each member disk there is one ppl_log
65 * used to handle logging for this disk, independently from others. They are
66 * grouped in child_logs array in struct ppl_conf, which is assigned to
67 * r5conf->log_private.
69 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
70 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
71 * can be appended to the last entry if it meets the conditions for a valid
72 * entry described above, otherwise a new entry is added. Checksums of entries
73 * are calculated incrementally as stripes containing partial parity are being
74 * added. ppl_submit_iounit() calculates the checksum of the header and submits
75 * a bio containing the header page and partial parity pages (sh->ppl_page) for
76 * all stripes of the io_unit. When the PPL write completes, the stripes
77 * associated with the io_unit are released and raid5d starts writing their data
78 * and parity. When all stripes are written, the io_unit is freed and the next
81 * An io_unit is used to gather stripes until it is submitted or becomes full
82 * (if the maximum number of entries or size of PPL is reached). Another io_unit
83 * can't be submitted until the previous has completed (PPL and stripe
84 * data+parity is written). The log->io_list tracks all io_units of a log
85 * (for a single member disk). New io_units are added to the end of the list
86 * and the first io_unit is submitted, if it is not submitted already.
87 * The current io_unit accepting new stripes is always at the end of the list.
89 * If write-back cache is enabled for any of the disks in the array, its data
90 * must be flushed before next io_unit is submitted.
93 #define PPL_SPACE_SIZE (128 * 1024)
98 /* array of child logs, one for each raid disk */
99 struct ppl_log
*child_logs
;
102 int block_size
; /* the logical block size used for data_sector
103 * in ppl_header_entry */
104 u32 signature
; /* raid array identifier */
105 atomic64_t seq
; /* current log write sequence number */
107 struct kmem_cache
*io_kc
;
110 struct bio_set flush_bs
;
112 /* used only for recovery */
113 int recovered_entries
;
116 /* stripes to retry if failed to allocate io_unit */
117 struct list_head no_mem_stripes
;
118 spinlock_t no_mem_stripes_lock
;
120 unsigned short write_hint
;
124 struct ppl_conf
*ppl_conf
; /* shared between all log instances */
126 struct md_rdev
*rdev
; /* array member disk associated with
127 * this log instance */
128 struct mutex io_mutex
;
129 struct ppl_io_unit
*current_io
; /* current io_unit accepting new data
130 * always at the end of io_list */
131 spinlock_t io_list_lock
;
132 struct list_head io_list
; /* all io_units of this log */
134 sector_t next_io_sector
;
135 unsigned int entry_space
;
138 unsigned long disk_flush_bitmap
;
141 #define PPL_IO_INLINE_BVECS 32
146 struct page
*header_page
; /* for ppl_header */
148 unsigned int entries_count
; /* number of entries in ppl_header */
149 unsigned int pp_size
; /* total size current of partial parity */
151 u64 seq
; /* sequence number of this log write */
152 struct list_head log_sibling
; /* log->io_list */
154 struct list_head stripe_list
; /* stripes added to the io_unit */
155 atomic_t pending_stripes
; /* how many stripes not written to raid */
156 atomic_t pending_flushes
; /* how many disk flushes are in progress */
158 bool submitted
; /* true if write to log started */
160 /* inline bio and its biovec for submitting the iounit */
162 struct bio_vec biovec
[PPL_IO_INLINE_BVECS
];
165 struct dma_async_tx_descriptor
*
166 ops_run_partial_parity(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
167 struct dma_async_tx_descriptor
*tx
)
169 int disks
= sh
->disks
;
170 struct page
**srcs
= percpu
->scribble
;
171 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
172 struct async_submit_ctl submit
;
174 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
177 * Partial parity is the XOR of stripe data chunks that are not changed
178 * during the write request. Depending on available data
179 * (read-modify-write vs. reconstruct-write case) we calculate it
182 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
184 * rmw: xor old data and parity from updated disks
185 * This is calculated earlier by ops_run_prexor5() so just copy
186 * the parity dev page.
188 srcs
[count
++] = sh
->dev
[pd_idx
].page
;
189 } else if (sh
->reconstruct_state
== reconstruct_state_drain_run
) {
190 /* rcw: xor data from all not updated disks */
191 for (i
= disks
; i
--;) {
192 struct r5dev
*dev
= &sh
->dev
[i
];
193 if (test_bit(R5_UPTODATE
, &dev
->flags
))
194 srcs
[count
++] = dev
->page
;
200 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, tx
,
201 NULL
, sh
, (void *) (srcs
+ sh
->disks
+ 2));
204 tx
= async_memcpy(sh
->ppl_page
, srcs
[0], 0, 0, PAGE_SIZE
,
207 tx
= async_xor(sh
->ppl_page
, srcs
, 0, count
, PAGE_SIZE
,
213 static void *ppl_io_pool_alloc(gfp_t gfp_mask
, void *pool_data
)
215 struct kmem_cache
*kc
= pool_data
;
216 struct ppl_io_unit
*io
;
218 io
= kmem_cache_alloc(kc
, gfp_mask
);
222 io
->header_page
= alloc_page(gfp_mask
);
223 if (!io
->header_page
) {
224 kmem_cache_free(kc
, io
);
231 static void ppl_io_pool_free(void *element
, void *pool_data
)
233 struct kmem_cache
*kc
= pool_data
;
234 struct ppl_io_unit
*io
= element
;
236 __free_page(io
->header_page
);
237 kmem_cache_free(kc
, io
);
240 static struct ppl_io_unit
*ppl_new_iounit(struct ppl_log
*log
,
241 struct stripe_head
*sh
)
243 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
244 struct ppl_io_unit
*io
;
245 struct ppl_header
*pplhdr
;
246 struct page
*header_page
;
248 io
= mempool_alloc(&ppl_conf
->io_pool
, GFP_NOWAIT
);
252 header_page
= io
->header_page
;
253 memset(io
, 0, sizeof(*io
));
254 io
->header_page
= header_page
;
257 INIT_LIST_HEAD(&io
->log_sibling
);
258 INIT_LIST_HEAD(&io
->stripe_list
);
259 atomic_set(&io
->pending_stripes
, 0);
260 atomic_set(&io
->pending_flushes
, 0);
261 bio_init(&io
->bio
, io
->biovec
, PPL_IO_INLINE_BVECS
);
263 pplhdr
= page_address(io
->header_page
);
265 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
266 pplhdr
->signature
= cpu_to_le32(ppl_conf
->signature
);
268 io
->seq
= atomic64_add_return(1, &ppl_conf
->seq
);
269 pplhdr
->generation
= cpu_to_le64(io
->seq
);
274 static int ppl_log_stripe(struct ppl_log
*log
, struct stripe_head
*sh
)
276 struct ppl_io_unit
*io
= log
->current_io
;
277 struct ppl_header_entry
*e
= NULL
;
278 struct ppl_header
*pplhdr
;
280 sector_t data_sector
= 0;
282 struct r5conf
*conf
= sh
->raid_conf
;
284 pr_debug("%s: stripe: %llu\n", __func__
, (unsigned long long)sh
->sector
);
286 /* check if current io_unit is full */
287 if (io
&& (io
->pp_size
== log
->entry_space
||
288 io
->entries_count
== PPL_HDR_MAX_ENTRIES
)) {
289 pr_debug("%s: add io_unit blocked by seq: %llu\n",
294 /* add a new unit if there is none or the current is full */
296 io
= ppl_new_iounit(log
, sh
);
299 spin_lock_irq(&log
->io_list_lock
);
300 list_add_tail(&io
->log_sibling
, &log
->io_list
);
301 spin_unlock_irq(&log
->io_list_lock
);
303 log
->current_io
= io
;
306 for (i
= 0; i
< sh
->disks
; i
++) {
307 struct r5dev
*dev
= &sh
->dev
[i
];
309 if (i
!= sh
->pd_idx
&& test_bit(R5_Wantwrite
, &dev
->flags
)) {
310 if (!data_disks
|| dev
->sector
< data_sector
)
311 data_sector
= dev
->sector
;
317 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__
,
318 io
->seq
, (unsigned long long)data_sector
, data_disks
);
320 pplhdr
= page_address(io
->header_page
);
322 if (io
->entries_count
> 0) {
323 struct ppl_header_entry
*last
=
324 &pplhdr
->entries
[io
->entries_count
- 1];
325 struct stripe_head
*sh_last
= list_last_entry(
326 &io
->stripe_list
, struct stripe_head
, log_list
);
327 u64 data_sector_last
= le64_to_cpu(last
->data_sector
);
328 u32 data_size_last
= le32_to_cpu(last
->data_size
);
331 * Check if we can append the stripe to the last entry. It must
332 * be just after the last logged stripe and write to the same
333 * disks. Use bit shift and logarithm to avoid 64-bit division.
335 if ((sh
->sector
== sh_last
->sector
+ STRIPE_SECTORS
) &&
336 (data_sector
>> ilog2(conf
->chunk_sectors
) ==
337 data_sector_last
>> ilog2(conf
->chunk_sectors
)) &&
338 ((data_sector
- data_sector_last
) * data_disks
==
339 data_size_last
>> 9))
344 e
= &pplhdr
->entries
[io
->entries_count
++];
345 e
->data_sector
= cpu_to_le64(data_sector
);
346 e
->parity_disk
= cpu_to_le32(sh
->pd_idx
);
347 e
->checksum
= cpu_to_le32(~0);
350 le32_add_cpu(&e
->data_size
, data_disks
<< PAGE_SHIFT
);
352 /* don't write any PP if full stripe write */
353 if (!test_bit(STRIPE_FULL_WRITE
, &sh
->state
)) {
354 le32_add_cpu(&e
->pp_size
, PAGE_SIZE
);
355 io
->pp_size
+= PAGE_SIZE
;
356 e
->checksum
= cpu_to_le32(crc32c_le(le32_to_cpu(e
->checksum
),
357 page_address(sh
->ppl_page
),
361 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
362 atomic_inc(&io
->pending_stripes
);
368 int ppl_write_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
370 struct ppl_conf
*ppl_conf
= conf
->log_private
;
371 struct ppl_io_unit
*io
= sh
->ppl_io
;
374 if (io
|| test_bit(STRIPE_SYNCING
, &sh
->state
) || !sh
->ppl_page
||
375 !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
376 !test_bit(R5_Insync
, &sh
->dev
[sh
->pd_idx
].flags
)) {
377 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
381 log
= &ppl_conf
->child_logs
[sh
->pd_idx
];
383 mutex_lock(&log
->io_mutex
);
385 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
386 mutex_unlock(&log
->io_mutex
);
390 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
391 clear_bit(STRIPE_DELAYED
, &sh
->state
);
392 atomic_inc(&sh
->count
);
394 if (ppl_log_stripe(log
, sh
)) {
395 spin_lock_irq(&ppl_conf
->no_mem_stripes_lock
);
396 list_add_tail(&sh
->log_list
, &ppl_conf
->no_mem_stripes
);
397 spin_unlock_irq(&ppl_conf
->no_mem_stripes_lock
);
400 mutex_unlock(&log
->io_mutex
);
405 static void ppl_log_endio(struct bio
*bio
)
407 struct ppl_io_unit
*io
= bio
->bi_private
;
408 struct ppl_log
*log
= io
->log
;
409 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
410 struct stripe_head
*sh
, *next
;
412 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
415 md_error(ppl_conf
->mddev
, log
->rdev
);
417 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
418 list_del_init(&sh
->log_list
);
420 set_bit(STRIPE_HANDLE
, &sh
->state
);
421 raid5_release_stripe(sh
);
425 static void ppl_submit_iounit_bio(struct ppl_io_unit
*io
, struct bio
*bio
)
427 char b
[BDEVNAME_SIZE
];
429 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
430 __func__
, io
->seq
, bio
->bi_iter
.bi_size
,
431 (unsigned long long)bio
->bi_iter
.bi_sector
,
432 bio_devname(bio
, b
));
437 static void ppl_submit_iounit(struct ppl_io_unit
*io
)
439 struct ppl_log
*log
= io
->log
;
440 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
441 struct ppl_header
*pplhdr
= page_address(io
->header_page
);
442 struct bio
*bio
= &io
->bio
;
443 struct stripe_head
*sh
;
446 bio
->bi_private
= io
;
448 if (!log
->rdev
|| test_bit(Faulty
, &log
->rdev
->flags
)) {
453 for (i
= 0; i
< io
->entries_count
; i
++) {
454 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
456 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
457 __func__
, io
->seq
, i
, le64_to_cpu(e
->data_sector
),
458 le32_to_cpu(e
->pp_size
), le32_to_cpu(e
->data_size
));
460 e
->data_sector
= cpu_to_le64(le64_to_cpu(e
->data_sector
) >>
461 ilog2(ppl_conf
->block_size
>> 9));
462 e
->checksum
= cpu_to_le32(~le32_to_cpu(e
->checksum
));
465 pplhdr
->entries_count
= cpu_to_le32(io
->entries_count
);
466 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PPL_HEADER_SIZE
));
468 /* Rewind the buffer if current PPL is larger then remaining space */
469 if (log
->use_multippl
&&
470 log
->rdev
->ppl
.sector
+ log
->rdev
->ppl
.size
- log
->next_io_sector
<
471 (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9)
472 log
->next_io_sector
= log
->rdev
->ppl
.sector
;
475 bio
->bi_end_io
= ppl_log_endio
;
476 bio
->bi_opf
= REQ_OP_WRITE
| REQ_FUA
;
477 bio_set_dev(bio
, log
->rdev
->bdev
);
478 bio
->bi_iter
.bi_sector
= log
->next_io_sector
;
479 bio_add_page(bio
, io
->header_page
, PAGE_SIZE
, 0);
480 bio
->bi_write_hint
= ppl_conf
->write_hint
;
482 pr_debug("%s: log->current_io_sector: %llu\n", __func__
,
483 (unsigned long long)log
->next_io_sector
);
485 if (log
->use_multippl
)
486 log
->next_io_sector
+= (PPL_HEADER_SIZE
+ io
->pp_size
) >> 9;
488 WARN_ON(log
->disk_flush_bitmap
!= 0);
490 list_for_each_entry(sh
, &io
->stripe_list
, log_list
) {
491 for (i
= 0; i
< sh
->disks
; i
++) {
492 struct r5dev
*dev
= &sh
->dev
[i
];
494 if ((ppl_conf
->child_logs
[i
].wb_cache_on
) &&
495 (test_bit(R5_Wantwrite
, &dev
->flags
))) {
496 set_bit(i
, &log
->disk_flush_bitmap
);
500 /* entries for full stripe writes have no partial parity */
501 if (test_bit(STRIPE_FULL_WRITE
, &sh
->state
))
504 if (!bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0)) {
505 struct bio
*prev
= bio
;
507 bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
,
509 bio
->bi_opf
= prev
->bi_opf
;
510 bio
->bi_write_hint
= prev
->bi_write_hint
;
511 bio_copy_dev(bio
, prev
);
512 bio
->bi_iter
.bi_sector
= bio_end_sector(prev
);
513 bio_add_page(bio
, sh
->ppl_page
, PAGE_SIZE
, 0);
515 bio_chain(bio
, prev
);
516 ppl_submit_iounit_bio(io
, prev
);
520 ppl_submit_iounit_bio(io
, bio
);
523 static void ppl_submit_current_io(struct ppl_log
*log
)
525 struct ppl_io_unit
*io
;
527 spin_lock_irq(&log
->io_list_lock
);
529 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
531 if (io
&& io
->submitted
)
534 spin_unlock_irq(&log
->io_list_lock
);
537 io
->submitted
= true;
539 if (io
== log
->current_io
)
540 log
->current_io
= NULL
;
542 ppl_submit_iounit(io
);
546 void ppl_write_stripe_run(struct r5conf
*conf
)
548 struct ppl_conf
*ppl_conf
= conf
->log_private
;
552 for (i
= 0; i
< ppl_conf
->count
; i
++) {
553 log
= &ppl_conf
->child_logs
[i
];
555 mutex_lock(&log
->io_mutex
);
556 ppl_submit_current_io(log
);
557 mutex_unlock(&log
->io_mutex
);
561 static void ppl_io_unit_finished(struct ppl_io_unit
*io
)
563 struct ppl_log
*log
= io
->log
;
564 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
565 struct r5conf
*conf
= ppl_conf
->mddev
->private;
568 pr_debug("%s: seq: %llu\n", __func__
, io
->seq
);
570 local_irq_save(flags
);
572 spin_lock(&log
->io_list_lock
);
573 list_del(&io
->log_sibling
);
574 spin_unlock(&log
->io_list_lock
);
576 mempool_free(io
, &ppl_conf
->io_pool
);
578 spin_lock(&ppl_conf
->no_mem_stripes_lock
);
579 if (!list_empty(&ppl_conf
->no_mem_stripes
)) {
580 struct stripe_head
*sh
;
582 sh
= list_first_entry(&ppl_conf
->no_mem_stripes
,
583 struct stripe_head
, log_list
);
584 list_del_init(&sh
->log_list
);
585 set_bit(STRIPE_HANDLE
, &sh
->state
);
586 raid5_release_stripe(sh
);
588 spin_unlock(&ppl_conf
->no_mem_stripes_lock
);
590 local_irq_restore(flags
);
592 wake_up(&conf
->wait_for_quiescent
);
595 static void ppl_flush_endio(struct bio
*bio
)
597 struct ppl_io_unit
*io
= bio
->bi_private
;
598 struct ppl_log
*log
= io
->log
;
599 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
600 struct r5conf
*conf
= ppl_conf
->mddev
->private;
601 char b
[BDEVNAME_SIZE
];
603 pr_debug("%s: dev: %s\n", __func__
, bio_devname(bio
, b
));
605 if (bio
->bi_status
) {
606 struct md_rdev
*rdev
;
609 rdev
= md_find_rdev_rcu(conf
->mddev
, bio_dev(bio
));
611 md_error(rdev
->mddev
, rdev
);
617 if (atomic_dec_and_test(&io
->pending_flushes
)) {
618 ppl_io_unit_finished(io
);
619 md_wakeup_thread(conf
->mddev
->thread
);
623 static void ppl_do_flush(struct ppl_io_unit
*io
)
625 struct ppl_log
*log
= io
->log
;
626 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
627 struct r5conf
*conf
= ppl_conf
->mddev
->private;
628 int raid_disks
= conf
->raid_disks
;
629 int flushed_disks
= 0;
632 atomic_set(&io
->pending_flushes
, raid_disks
);
634 for_each_set_bit(i
, &log
->disk_flush_bitmap
, raid_disks
) {
635 struct md_rdev
*rdev
;
636 struct block_device
*bdev
= NULL
;
639 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
640 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
646 char b
[BDEVNAME_SIZE
];
648 bio
= bio_alloc_bioset(GFP_NOIO
, 0, &ppl_conf
->flush_bs
);
649 bio_set_dev(bio
, bdev
);
650 bio
->bi_private
= io
;
651 bio
->bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
652 bio
->bi_end_io
= ppl_flush_endio
;
654 pr_debug("%s: dev: %s\n", __func__
,
655 bio_devname(bio
, b
));
662 log
->disk_flush_bitmap
= 0;
664 for (i
= flushed_disks
; i
< raid_disks
; i
++) {
665 if (atomic_dec_and_test(&io
->pending_flushes
))
666 ppl_io_unit_finished(io
);
670 static inline bool ppl_no_io_unit_submitted(struct r5conf
*conf
,
673 struct ppl_io_unit
*io
;
675 io
= list_first_entry_or_null(&log
->io_list
, struct ppl_io_unit
,
678 return !io
|| !io
->submitted
;
681 void ppl_quiesce(struct r5conf
*conf
, int quiesce
)
683 struct ppl_conf
*ppl_conf
= conf
->log_private
;
687 for (i
= 0; i
< ppl_conf
->count
; i
++) {
688 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
690 spin_lock_irq(&log
->io_list_lock
);
691 wait_event_lock_irq(conf
->wait_for_quiescent
,
692 ppl_no_io_unit_submitted(conf
, log
),
694 spin_unlock_irq(&log
->io_list_lock
);
699 int ppl_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
701 if (bio
->bi_iter
.bi_size
== 0) {
705 bio
->bi_opf
&= ~REQ_PREFLUSH
;
709 void ppl_stripe_write_finished(struct stripe_head
*sh
)
711 struct ppl_io_unit
*io
;
716 if (io
&& atomic_dec_and_test(&io
->pending_stripes
)) {
717 if (io
->log
->disk_flush_bitmap
)
720 ppl_io_unit_finished(io
);
724 static void ppl_xor(int size
, struct page
*page1
, struct page
*page2
)
726 struct async_submit_ctl submit
;
727 struct dma_async_tx_descriptor
*tx
;
728 struct page
*xor_srcs
[] = { page1
, page2
};
730 init_async_submit(&submit
, ASYNC_TX_ACK
|ASYNC_TX_XOR_DROP_DST
,
731 NULL
, NULL
, NULL
, NULL
);
732 tx
= async_xor(page1
, xor_srcs
, 0, 2, size
, &submit
);
734 async_tx_quiesce(&tx
);
738 * PPL recovery strategy: xor partial parity and data from all modified data
739 * disks within a stripe and write the result as the new stripe parity. If all
740 * stripe data disks are modified (full stripe write), no partial parity is
741 * available, so just xor the data disks.
743 * Recovery of a PPL entry shall occur only if all modified data disks are
744 * available and read from all of them succeeds.
746 * A PPL entry applies to a stripe, partial parity size for an entry is at most
747 * the size of the chunk. Examples of possible cases for a single entry:
749 * case 0: single data disk write:
750 * data0 data1 data2 ppl parity
751 * +--------+--------+--------+ +--------------------+
752 * | ------ | ------ | ------ | +----+ | (no change) |
753 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
754 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp |
755 * | ------ | ------ | ------ | +----+ | (no change) |
756 * +--------+--------+--------+ +--------------------+
757 * pp_size = data_size
759 * case 1: more than one data disk write:
760 * data0 data1 data2 ppl parity
761 * +--------+--------+--------+ +--------------------+
762 * | ------ | ------ | ------ | +----+ | (no change) |
763 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
764 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
765 * | ------ | ------ | ------ | +----+ | (no change) |
766 * +--------+--------+--------+ +--------------------+
767 * pp_size = data_size / modified_data_disks
769 * case 2: write to all data disks (also full stripe write):
770 * data0 data1 data2 parity
771 * +--------+--------+--------+ +--------------------+
772 * | ------ | ------ | ------ | | (no change) |
773 * | -data- | -data- | -data- | --------> | xor all data |
774 * | ------ | ------ | ------ | --------> | (no change) |
775 * | ------ | ------ | ------ | | (no change) |
776 * +--------+--------+--------+ +--------------------+
779 * The following cases are possible only in other implementations. The recovery
780 * code can handle them, but they are not generated at runtime because they can
781 * be reduced to cases 0, 1 and 2:
784 * data0 data1 data2 ppl parity
785 * +--------+--------+--------+ +----+ +--------------------+
786 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp |
787 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
788 * | -data- | -data- | -data- | | -- | -> | xor all data |
789 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp |
790 * +--------+--------+--------+ +----+ +--------------------+
791 * pp_size = chunk_size
794 * data0 data1 data2 ppl parity
795 * +--------+--------+--------+ +----+ +--------------------+
796 * | ------ | -data- | ------ | | pp | | data1 ^ pp |
797 * | ------ | ------ | ------ | | -- | -> | (no change) |
798 * | ------ | ------ | ------ | | -- | -> | (no change) |
799 * | -data- | ------ | ------ | | pp | | data0 ^ pp |
800 * +--------+--------+--------+ +----+ +--------------------+
801 * pp_size = chunk_size
803 static int ppl_recover_entry(struct ppl_log
*log
, struct ppl_header_entry
*e
,
806 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
807 struct mddev
*mddev
= ppl_conf
->mddev
;
808 struct r5conf
*conf
= mddev
->private;
809 int block_size
= ppl_conf
->block_size
;
812 sector_t r_sector_first
;
813 sector_t r_sector_last
;
818 char b
[BDEVNAME_SIZE
];
819 unsigned int pp_size
= le32_to_cpu(e
->pp_size
);
820 unsigned int data_size
= le32_to_cpu(e
->data_size
);
822 page1
= alloc_page(GFP_KERNEL
);
823 page2
= alloc_page(GFP_KERNEL
);
825 if (!page1
|| !page2
) {
830 r_sector_first
= le64_to_cpu(e
->data_sector
) * (block_size
>> 9);
832 if ((pp_size
>> 9) < conf
->chunk_sectors
) {
834 data_disks
= data_size
/ pp_size
;
835 strip_sectors
= pp_size
>> 9;
837 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
838 strip_sectors
= (data_size
>> 9) / data_disks
;
840 r_sector_last
= r_sector_first
+
841 (data_disks
- 1) * conf
->chunk_sectors
+
844 data_disks
= conf
->raid_disks
- conf
->max_degraded
;
845 strip_sectors
= conf
->chunk_sectors
;
846 r_sector_last
= r_sector_first
+ (data_size
>> 9);
849 pr_debug("%s: array sector first: %llu last: %llu\n", __func__
,
850 (unsigned long long)r_sector_first
,
851 (unsigned long long)r_sector_last
);
853 /* if start and end is 4k aligned, use a 4k block */
854 if (block_size
== 512 &&
855 (r_sector_first
& (STRIPE_SECTORS
- 1)) == 0 &&
856 (r_sector_last
& (STRIPE_SECTORS
- 1)) == 0)
857 block_size
= STRIPE_SIZE
;
859 /* iterate through blocks in strip */
860 for (i
= 0; i
< strip_sectors
; i
+= (block_size
>> 9)) {
861 bool update_parity
= false;
862 sector_t parity_sector
;
863 struct md_rdev
*parity_rdev
;
864 struct stripe_head sh
;
868 pr_debug("%s:%*s iter %d start\n", __func__
, indent
, "", i
);
871 memset(page_address(page1
), 0, PAGE_SIZE
);
873 /* iterate through data member disks */
874 for (disk
= 0; disk
< data_disks
; disk
++) {
876 struct md_rdev
*rdev
;
878 sector_t r_sector
= r_sector_first
+ i
+
879 (disk
* conf
->chunk_sectors
);
881 pr_debug("%s:%*s data member disk %d start\n",
882 __func__
, indent
, "", disk
);
885 if (r_sector
>= r_sector_last
) {
886 pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
887 __func__
, indent
, "",
888 (unsigned long long)r_sector
);
893 update_parity
= true;
895 /* map raid sector to member disk */
896 sector
= raid5_compute_sector(conf
, r_sector
, 0,
898 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
899 __func__
, indent
, "",
900 (unsigned long long)r_sector
, dd_idx
,
901 (unsigned long long)sector
);
903 rdev
= conf
->disks
[dd_idx
].rdev
;
904 if (!rdev
|| (!test_bit(In_sync
, &rdev
->flags
) &&
905 sector
>= rdev
->recovery_offset
)) {
906 pr_debug("%s:%*s data member disk %d missing\n",
907 __func__
, indent
, "", dd_idx
);
908 update_parity
= false;
912 pr_debug("%s:%*s reading data member disk %s sector %llu\n",
913 __func__
, indent
, "", bdevname(rdev
->bdev
, b
),
914 (unsigned long long)sector
);
915 if (!sync_page_io(rdev
, sector
, block_size
, page2
,
916 REQ_OP_READ
, 0, false)) {
917 md_error(mddev
, rdev
);
918 pr_debug("%s:%*s read failed!\n", __func__
,
924 ppl_xor(block_size
, page1
, page2
);
933 pr_debug("%s:%*s reading pp disk sector %llu\n",
934 __func__
, indent
, "",
935 (unsigned long long)(ppl_sector
+ i
));
936 if (!sync_page_io(log
->rdev
,
937 ppl_sector
- log
->rdev
->data_offset
+ i
,
938 block_size
, page2
, REQ_OP_READ
, 0,
940 pr_debug("%s:%*s read failed!\n", __func__
,
942 md_error(mddev
, log
->rdev
);
947 ppl_xor(block_size
, page1
, page2
);
950 /* map raid sector to parity disk */
951 parity_sector
= raid5_compute_sector(conf
, r_sector_first
+ i
,
953 BUG_ON(sh
.pd_idx
!= le32_to_cpu(e
->parity_disk
));
954 parity_rdev
= conf
->disks
[sh
.pd_idx
].rdev
;
956 BUG_ON(parity_rdev
->bdev
->bd_dev
!= log
->rdev
->bdev
->bd_dev
);
957 pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
958 __func__
, indent
, "",
959 (unsigned long long)parity_sector
,
960 bdevname(parity_rdev
->bdev
, b
));
961 if (!sync_page_io(parity_rdev
, parity_sector
, block_size
,
962 page1
, REQ_OP_WRITE
, 0, false)) {
963 pr_debug("%s:%*s parity write error!\n", __func__
,
965 md_error(mddev
, parity_rdev
);
978 static int ppl_recover(struct ppl_log
*log
, struct ppl_header
*pplhdr
,
981 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
982 struct md_rdev
*rdev
= log
->rdev
;
983 struct mddev
*mddev
= rdev
->mddev
;
984 sector_t ppl_sector
= rdev
->ppl
.sector
+ offset
+
985 (PPL_HEADER_SIZE
>> 9);
990 page
= alloc_page(GFP_KERNEL
);
994 /* iterate through all PPL entries saved */
995 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++) {
996 struct ppl_header_entry
*e
= &pplhdr
->entries
[i
];
997 u32 pp_size
= le32_to_cpu(e
->pp_size
);
998 sector_t sector
= ppl_sector
;
999 int ppl_entry_sectors
= pp_size
>> 9;
1000 u32 crc
, crc_stored
;
1002 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
1003 __func__
, rdev
->raid_disk
, i
,
1004 (unsigned long long)ppl_sector
, pp_size
);
1007 crc_stored
= le32_to_cpu(e
->checksum
);
1009 /* read parial parity for this entry and calculate its checksum */
1011 int s
= pp_size
> PAGE_SIZE
? PAGE_SIZE
: pp_size
;
1013 if (!sync_page_io(rdev
, sector
- rdev
->data_offset
,
1014 s
, page
, REQ_OP_READ
, 0, false)) {
1015 md_error(mddev
, rdev
);
1020 crc
= crc32c_le(crc
, page_address(page
), s
);
1028 if (crc
!= crc_stored
) {
1030 * Don't recover this entry if the checksum does not
1031 * match, but keep going and try to recover other
1034 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1035 __func__
, crc_stored
, crc
);
1036 ppl_conf
->mismatch_count
++;
1038 ret
= ppl_recover_entry(log
, e
, ppl_sector
);
1041 ppl_conf
->recovered_entries
++;
1044 ppl_sector
+= ppl_entry_sectors
;
1047 /* flush the disk cache after recovery if necessary */
1048 ret
= blkdev_issue_flush(rdev
->bdev
, GFP_KERNEL
, NULL
);
1054 static int ppl_write_empty_header(struct ppl_log
*log
)
1057 struct ppl_header
*pplhdr
;
1058 struct md_rdev
*rdev
= log
->rdev
;
1061 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__
,
1062 rdev
->raid_disk
, (unsigned long long)rdev
->ppl
.sector
);
1064 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
);
1068 pplhdr
= page_address(page
);
1069 /* zero out PPL space to avoid collision with old PPLs */
1070 blkdev_issue_zeroout(rdev
->bdev
, rdev
->ppl
.sector
,
1071 log
->rdev
->ppl
.size
, GFP_NOIO
, 0);
1072 memset(pplhdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
1073 pplhdr
->signature
= cpu_to_le32(log
->ppl_conf
->signature
);
1074 pplhdr
->checksum
= cpu_to_le32(~crc32c_le(~0, pplhdr
, PAGE_SIZE
));
1076 if (!sync_page_io(rdev
, rdev
->ppl
.sector
- rdev
->data_offset
,
1077 PPL_HEADER_SIZE
, page
, REQ_OP_WRITE
| REQ_SYNC
|
1078 REQ_FUA
, 0, false)) {
1079 md_error(rdev
->mddev
, rdev
);
1087 static int ppl_load_distributed(struct ppl_log
*log
)
1089 struct ppl_conf
*ppl_conf
= log
->ppl_conf
;
1090 struct md_rdev
*rdev
= log
->rdev
;
1091 struct mddev
*mddev
= rdev
->mddev
;
1092 struct page
*page
, *page2
, *tmp
;
1093 struct ppl_header
*pplhdr
= NULL
, *prev_pplhdr
= NULL
;
1094 u32 crc
, crc_stored
;
1097 sector_t pplhdr_offset
= 0, prev_pplhdr_offset
= 0;
1099 pr_debug("%s: disk: %d\n", __func__
, rdev
->raid_disk
);
1100 /* read PPL headers, find the recent one */
1101 page
= alloc_page(GFP_KERNEL
);
1105 page2
= alloc_page(GFP_KERNEL
);
1111 /* searching ppl area for latest ppl */
1112 while (pplhdr_offset
< rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9)) {
1113 if (!sync_page_io(rdev
,
1114 rdev
->ppl
.sector
- rdev
->data_offset
+
1115 pplhdr_offset
, PAGE_SIZE
, page
, REQ_OP_READ
,
1117 md_error(mddev
, rdev
);
1119 /* if not able to read - don't recover any PPL */
1123 pplhdr
= page_address(page
);
1125 /* check header validity */
1126 crc_stored
= le32_to_cpu(pplhdr
->checksum
);
1127 pplhdr
->checksum
= 0;
1128 crc
= ~crc32c_le(~0, pplhdr
, PAGE_SIZE
);
1130 if (crc_stored
!= crc
) {
1131 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1132 __func__
, crc_stored
, crc
,
1133 (unsigned long long)pplhdr_offset
);
1134 pplhdr
= prev_pplhdr
;
1135 pplhdr_offset
= prev_pplhdr_offset
;
1139 signature
= le32_to_cpu(pplhdr
->signature
);
1141 if (mddev
->external
) {
1143 * For external metadata the header signature is set and
1144 * validated in userspace.
1146 ppl_conf
->signature
= signature
;
1147 } else if (ppl_conf
->signature
!= signature
) {
1148 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1149 __func__
, signature
, ppl_conf
->signature
,
1150 (unsigned long long)pplhdr_offset
);
1151 pplhdr
= prev_pplhdr
;
1152 pplhdr_offset
= prev_pplhdr_offset
;
1156 if (prev_pplhdr
&& le64_to_cpu(prev_pplhdr
->generation
) >
1157 le64_to_cpu(pplhdr
->generation
)) {
1158 /* previous was newest */
1159 pplhdr
= prev_pplhdr
;
1160 pplhdr_offset
= prev_pplhdr_offset
;
1164 prev_pplhdr_offset
= pplhdr_offset
;
1165 prev_pplhdr
= pplhdr
;
1171 /* calculate next potential ppl offset */
1172 for (i
= 0; i
< le32_to_cpu(pplhdr
->entries_count
); i
++)
1174 le32_to_cpu(pplhdr
->entries
[i
].pp_size
) >> 9;
1175 pplhdr_offset
+= PPL_HEADER_SIZE
>> 9;
1178 /* no valid ppl found */
1180 ppl_conf
->mismatch_count
++;
1182 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1183 __func__
, (unsigned long long)pplhdr_offset
,
1184 le64_to_cpu(pplhdr
->generation
));
1186 /* attempt to recover from log if we are starting a dirty array */
1187 if (pplhdr
&& !mddev
->pers
&& mddev
->recovery_cp
!= MaxSector
)
1188 ret
= ppl_recover(log
, pplhdr
, pplhdr_offset
);
1190 /* write empty header if we are starting the array */
1191 if (!ret
&& !mddev
->pers
)
1192 ret
= ppl_write_empty_header(log
);
1197 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1198 __func__
, ret
, ppl_conf
->mismatch_count
,
1199 ppl_conf
->recovered_entries
);
1203 static int ppl_load(struct ppl_conf
*ppl_conf
)
1207 bool signature_set
= false;
1210 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1211 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1213 /* skip missing drive */
1217 ret
= ppl_load_distributed(log
);
1222 * For external metadata we can't check if the signature is
1223 * correct on a single drive, but we can check if it is the same
1226 if (ppl_conf
->mddev
->external
) {
1227 if (!signature_set
) {
1228 signature
= ppl_conf
->signature
;
1229 signature_set
= true;
1230 } else if (signature
!= ppl_conf
->signature
) {
1231 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1232 mdname(ppl_conf
->mddev
));
1239 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1240 __func__
, ret
, ppl_conf
->mismatch_count
,
1241 ppl_conf
->recovered_entries
);
1245 static void __ppl_exit_log(struct ppl_conf
*ppl_conf
)
1247 clear_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1248 clear_bit(MD_HAS_MULTIPLE_PPLS
, &ppl_conf
->mddev
->flags
);
1250 kfree(ppl_conf
->child_logs
);
1252 bioset_exit(&ppl_conf
->bs
);
1253 bioset_exit(&ppl_conf
->flush_bs
);
1254 mempool_exit(&ppl_conf
->io_pool
);
1255 kmem_cache_destroy(ppl_conf
->io_kc
);
1260 void ppl_exit_log(struct r5conf
*conf
)
1262 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1265 __ppl_exit_log(ppl_conf
);
1266 conf
->log_private
= NULL
;
1270 static int ppl_validate_rdev(struct md_rdev
*rdev
)
1272 char b
[BDEVNAME_SIZE
];
1273 int ppl_data_sectors
;
1277 * The configured PPL size must be enough to store
1278 * the header and (at the very least) partial parity
1279 * for one stripe. Round it down to ensure the data
1280 * space is cleanly divisible by stripe size.
1282 ppl_data_sectors
= rdev
->ppl
.size
- (PPL_HEADER_SIZE
>> 9);
1284 if (ppl_data_sectors
> 0)
1285 ppl_data_sectors
= rounddown(ppl_data_sectors
, STRIPE_SECTORS
);
1287 if (ppl_data_sectors
<= 0) {
1288 pr_warn("md/raid:%s: PPL space too small on %s\n",
1289 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1293 ppl_size_new
= ppl_data_sectors
+ (PPL_HEADER_SIZE
>> 9);
1295 if ((rdev
->ppl
.sector
< rdev
->data_offset
&&
1296 rdev
->ppl
.sector
+ ppl_size_new
> rdev
->data_offset
) ||
1297 (rdev
->ppl
.sector
>= rdev
->data_offset
&&
1298 rdev
->data_offset
+ rdev
->sectors
> rdev
->ppl
.sector
)) {
1299 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1300 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1304 if (!rdev
->mddev
->external
&&
1305 ((rdev
->ppl
.offset
> 0 && rdev
->ppl
.offset
< (rdev
->sb_size
>> 9)) ||
1306 (rdev
->ppl
.offset
<= 0 && rdev
->ppl
.offset
+ ppl_size_new
> 0))) {
1307 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1308 mdname(rdev
->mddev
), bdevname(rdev
->bdev
, b
));
1312 rdev
->ppl
.size
= ppl_size_new
;
1317 static void ppl_init_child_log(struct ppl_log
*log
, struct md_rdev
*rdev
)
1319 struct request_queue
*q
;
1321 if ((rdev
->ppl
.size
<< 9) >= (PPL_SPACE_SIZE
+
1322 PPL_HEADER_SIZE
) * 2) {
1323 log
->use_multippl
= true;
1324 set_bit(MD_HAS_MULTIPLE_PPLS
,
1325 &log
->ppl_conf
->mddev
->flags
);
1326 log
->entry_space
= PPL_SPACE_SIZE
;
1328 log
->use_multippl
= false;
1329 log
->entry_space
= (log
->rdev
->ppl
.size
<< 9) -
1332 log
->next_io_sector
= rdev
->ppl
.sector
;
1334 q
= bdev_get_queue(rdev
->bdev
);
1335 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
1336 log
->wb_cache_on
= true;
1339 int ppl_init_log(struct r5conf
*conf
)
1341 struct ppl_conf
*ppl_conf
;
1342 struct mddev
*mddev
= conf
->mddev
;
1347 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1348 mdname(conf
->mddev
));
1350 if (PAGE_SIZE
!= 4096)
1353 if (mddev
->level
!= 5) {
1354 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1355 mdname(mddev
), mddev
->level
);
1359 if (mddev
->bitmap_info
.file
|| mddev
->bitmap_info
.offset
) {
1360 pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1365 if (test_bit(MD_HAS_JOURNAL
, &mddev
->flags
)) {
1366 pr_warn("md/raid:%s PPL is not compatible with journal\n",
1371 max_disks
= FIELD_SIZEOF(struct ppl_log
, disk_flush_bitmap
) *
1373 if (conf
->raid_disks
> max_disks
) {
1374 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1375 mdname(mddev
), max_disks
);
1379 ppl_conf
= kzalloc(sizeof(struct ppl_conf
), GFP_KERNEL
);
1383 ppl_conf
->mddev
= mddev
;
1385 ppl_conf
->io_kc
= KMEM_CACHE(ppl_io_unit
, 0);
1386 if (!ppl_conf
->io_kc
) {
1391 ret
= mempool_init(&ppl_conf
->io_pool
, conf
->raid_disks
, ppl_io_pool_alloc
,
1392 ppl_io_pool_free
, ppl_conf
->io_kc
);
1396 ret
= bioset_init(&ppl_conf
->bs
, conf
->raid_disks
, 0, BIOSET_NEED_BVECS
);
1400 ret
= bioset_init(&ppl_conf
->flush_bs
, conf
->raid_disks
, 0, 0);
1404 ppl_conf
->count
= conf
->raid_disks
;
1405 ppl_conf
->child_logs
= kcalloc(ppl_conf
->count
, sizeof(struct ppl_log
),
1407 if (!ppl_conf
->child_logs
) {
1412 atomic64_set(&ppl_conf
->seq
, 0);
1413 INIT_LIST_HEAD(&ppl_conf
->no_mem_stripes
);
1414 spin_lock_init(&ppl_conf
->no_mem_stripes_lock
);
1415 ppl_conf
->write_hint
= RWF_WRITE_LIFE_NOT_SET
;
1417 if (!mddev
->external
) {
1418 ppl_conf
->signature
= ~crc32c_le(~0, mddev
->uuid
, sizeof(mddev
->uuid
));
1419 ppl_conf
->block_size
= 512;
1421 ppl_conf
->block_size
= queue_logical_block_size(mddev
->queue
);
1424 for (i
= 0; i
< ppl_conf
->count
; i
++) {
1425 struct ppl_log
*log
= &ppl_conf
->child_logs
[i
];
1426 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
1428 mutex_init(&log
->io_mutex
);
1429 spin_lock_init(&log
->io_list_lock
);
1430 INIT_LIST_HEAD(&log
->io_list
);
1432 log
->ppl_conf
= ppl_conf
;
1436 ret
= ppl_validate_rdev(rdev
);
1440 ppl_init_child_log(log
, rdev
);
1444 /* load and possibly recover the logs from the member disks */
1445 ret
= ppl_load(ppl_conf
);
1449 } else if (!mddev
->pers
&& mddev
->recovery_cp
== 0 &&
1450 ppl_conf
->recovered_entries
> 0 &&
1451 ppl_conf
->mismatch_count
== 0) {
1453 * If we are starting a dirty array and the recovery succeeds
1454 * without any issues, set the array as clean.
1456 mddev
->recovery_cp
= MaxSector
;
1457 set_bit(MD_SB_CHANGE_CLEAN
, &mddev
->sb_flags
);
1458 } else if (mddev
->pers
&& ppl_conf
->mismatch_count
> 0) {
1459 /* no mismatch allowed when enabling PPL for a running array */
1464 conf
->log_private
= ppl_conf
;
1465 set_bit(MD_HAS_PPL
, &ppl_conf
->mddev
->flags
);
1469 __ppl_exit_log(ppl_conf
);
1473 int ppl_modify_log(struct r5conf
*conf
, struct md_rdev
*rdev
, bool add
)
1475 struct ppl_conf
*ppl_conf
= conf
->log_private
;
1476 struct ppl_log
*log
;
1478 char b
[BDEVNAME_SIZE
];
1483 pr_debug("%s: disk: %d operation: %s dev: %s\n",
1484 __func__
, rdev
->raid_disk
, add
? "add" : "remove",
1485 bdevname(rdev
->bdev
, b
));
1487 if (rdev
->raid_disk
< 0)
1490 if (rdev
->raid_disk
>= ppl_conf
->count
)
1493 log
= &ppl_conf
->child_logs
[rdev
->raid_disk
];
1495 mutex_lock(&log
->io_mutex
);
1497 ret
= ppl_validate_rdev(rdev
);
1500 ret
= ppl_write_empty_header(log
);
1501 ppl_init_child_log(log
, rdev
);
1506 mutex_unlock(&log
->io_mutex
);
1512 ppl_write_hint_show(struct mddev
*mddev
, char *buf
)
1515 struct r5conf
*conf
;
1516 struct ppl_conf
*ppl_conf
= NULL
;
1518 spin_lock(&mddev
->lock
);
1519 conf
= mddev
->private;
1520 if (conf
&& raid5_has_ppl(conf
))
1521 ppl_conf
= conf
->log_private
;
1522 ret
= sprintf(buf
, "%d\n", ppl_conf
? ppl_conf
->write_hint
: 0);
1523 spin_unlock(&mddev
->lock
);
1529 ppl_write_hint_store(struct mddev
*mddev
, const char *page
, size_t len
)
1531 struct r5conf
*conf
;
1532 struct ppl_conf
*ppl_conf
;
1536 if (len
>= PAGE_SIZE
)
1538 if (kstrtou16(page
, 10, &new))
1541 err
= mddev_lock(mddev
);
1545 conf
= mddev
->private;
1548 } else if (raid5_has_ppl(conf
)) {
1549 ppl_conf
= conf
->log_private
;
1553 ppl_conf
->write_hint
= new;
1558 mddev_unlock(mddev
);
1563 struct md_sysfs_entry
1564 ppl_write_hint
= __ATTR(ppl_write_hint
, S_IRUGO
| S_IWUSR
,
1565 ppl_write_hint_show
,
1566 ppl_write_hint_store
);