4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "spdk/util.h"
37 #include "spdk/likely.h"
38 #include "spdk/string.h"
44 struct ftl_restore_band
{
45 struct ftl_restore
*parent
;
47 struct ftl_band
*band
;
49 enum ftl_md_status md_status
;
51 STAILQ_ENTRY(ftl_restore_band
) stailq
;
55 struct spdk_ftl_dev
*dev
;
63 struct ftl_restore_band
*bands
;
65 STAILQ_HEAD(, ftl_restore_band
) pad_bands
;
79 ftl_restore_tail_md(struct ftl_restore_band
*rband
);
81 ftl_pad_chunk_cb(struct ftl_io
*io
, void *arg
, int status
);
83 ftl_restore_pad_band(struct ftl_restore_band
*rband
);
86 ftl_restore_free(struct ftl_restore
*restore
)
92 spdk_dma_free(restore
->md_buf
);
93 free(restore
->lba_map
);
98 static struct ftl_restore
*
99 ftl_restore_init(struct spdk_ftl_dev
*dev
, ftl_restore_fn cb
)
101 struct ftl_restore
*restore
;
102 struct ftl_restore_band
*rband
;
105 restore
= calloc(1, sizeof(*restore
));
112 restore
->l2p_phase
= false;
114 restore
->bands
= calloc(ftl_dev_num_bands(dev
), sizeof(*restore
->bands
));
115 if (!restore
->bands
) {
119 STAILQ_INIT(&restore
->pad_bands
);
121 for (i
= 0; i
< ftl_dev_num_bands(dev
); ++i
) {
122 rband
= &restore
->bands
[i
];
123 rband
->band
= &dev
->bands
[i
];
124 rband
->parent
= restore
;
125 rband
->md_status
= FTL_MD_NO_MD
;
128 /* Allocate buffer capable of holding either tail md or head mds of all bands */
129 md_size
= spdk_max(ftl_dev_num_bands(dev
) * ftl_head_md_num_lbks(dev
) * FTL_BLOCK_SIZE
,
130 ftl_tail_md_num_lbks(dev
) * FTL_BLOCK_SIZE
);
132 restore
->md_buf
= spdk_dma_zmalloc(md_size
, FTL_BLOCK_SIZE
, NULL
);
133 if (!restore
->md_buf
) {
137 restore
->lba_map
= calloc(ftl_num_band_lbks(dev
), sizeof(uint64_t));
138 if (!restore
->lba_map
) {
144 ftl_restore_free(restore
);
149 ftl_restore_complete(struct ftl_restore
*restore
, int status
)
151 struct ftl_restore
*ctx
= status
? NULL
: restore
;
152 bool l2p_phase
= restore
->l2p_phase
;
154 restore
->cb(restore
->dev
, ctx
, status
);
155 if (status
|| l2p_phase
) {
156 ftl_restore_free(restore
);
161 ftl_band_cmp(const void *lband
, const void *rband
)
163 uint64_t lseq
= ((struct ftl_restore_band
*)lband
)->band
->seq
;
164 uint64_t rseq
= ((struct ftl_restore_band
*)rband
)->band
->seq
;
174 ftl_restore_check_seq(const struct ftl_restore
*restore
)
176 const struct spdk_ftl_dev
*dev
= restore
->dev
;
177 const struct ftl_restore_band
*rband
;
178 const struct ftl_band
*next_band
;
181 for (i
= 0; i
< ftl_dev_num_bands(dev
); ++i
) {
182 rband
= &restore
->bands
[i
];
184 if (rband
->md_status
!= FTL_MD_SUCCESS
) {
188 next_band
= LIST_NEXT(rband
->band
, list_entry
);
189 if (next_band
&& rband
->band
->seq
== next_band
->seq
) {
198 ftl_restore_head_valid(struct spdk_ftl_dev
*dev
, struct ftl_restore
*restore
, size_t *num_valid
)
200 struct ftl_restore_band
*rband
;
203 for (i
= 0; i
< ftl_dev_num_bands(dev
); ++i
) {
204 rband
= &restore
->bands
[i
];
206 if (rband
->md_status
!= FTL_MD_SUCCESS
&&
207 rband
->md_status
!= FTL_MD_NO_MD
&&
208 rband
->md_status
!= FTL_MD_IO_FAILURE
) {
209 SPDK_ERRLOG("Inconsistent head metadata found on band %u\n",
214 if (rband
->md_status
== FTL_MD_SUCCESS
) {
223 ftl_restore_head_complete(struct ftl_restore
*restore
)
225 struct spdk_ftl_dev
*dev
= restore
->dev
;
226 size_t num_valid
= 0;
229 if (!ftl_restore_head_valid(dev
, restore
, &num_valid
)) {
233 if (num_valid
== 0) {
234 SPDK_ERRLOG("Couldn't find any valid bands\n");
238 /* Sort bands in sequence number ascending order */
239 qsort(restore
->bands
, ftl_dev_num_bands(dev
), sizeof(struct ftl_restore_band
),
242 if (ftl_restore_check_seq(restore
)) {
243 SPDK_ERRLOG("Band sequence consistency failed\n");
247 dev
->num_lbas
= dev
->global_md
.num_lbas
;
250 ftl_restore_complete(restore
, status
);
254 ftl_restore_head_cb(struct ftl_io
*io
, void *ctx
, int status
)
256 struct ftl_restore_band
*rband
= ctx
;
257 struct ftl_restore
*restore
= rband
->parent
;
258 unsigned int num_ios
;
260 rband
->md_status
= status
;
261 num_ios
= __atomic_fetch_sub(&restore
->num_ios
, 1, __ATOMIC_SEQ_CST
);
265 ftl_restore_head_complete(restore
);
270 ftl_restore_head_md(struct ftl_restore
*restore
)
272 struct spdk_ftl_dev
*dev
= restore
->dev
;
273 struct ftl_restore_band
*rband
;
274 struct ftl_lba_map
*lba_map
;
275 unsigned int num_failed
= 0, num_ios
;
278 restore
->num_ios
= ftl_dev_num_bands(dev
);
280 for (i
= 0; i
< ftl_dev_num_bands(dev
); ++i
) {
281 rband
= &restore
->bands
[i
];
282 lba_map
= &rband
->band
->lba_map
;
284 lba_map
->dma_buf
= restore
->md_buf
+ i
* ftl_head_md_num_lbks(dev
) * FTL_BLOCK_SIZE
;
286 if (ftl_band_read_head_md(rband
->band
, ftl_restore_head_cb
, rband
)) {
287 if (spdk_likely(rband
->band
->num_chunks
)) {
288 SPDK_ERRLOG("Failed to read metadata on band %zu\n", i
);
290 rband
->md_status
= FTL_MD_INVALID_CRC
;
292 /* If the first IO fails, don't bother sending anything else */
294 ftl_restore_free(restore
);
303 if (spdk_unlikely(num_failed
> 0)) {
304 num_ios
= __atomic_fetch_sub(&restore
->num_ios
, num_failed
, __ATOMIC_SEQ_CST
);
305 if (num_ios
== num_failed
) {
306 ftl_restore_free(restore
);
315 ftl_restore_md(struct spdk_ftl_dev
*dev
, ftl_restore_fn cb
)
317 struct ftl_restore
*restore
;
319 restore
= ftl_restore_init(dev
, cb
);
324 return ftl_restore_head_md(restore
);
328 ftl_restore_l2p(struct ftl_band
*band
)
330 struct spdk_ftl_dev
*dev
= band
->dev
;
335 for (i
= 0; i
< ftl_num_band_lbks(band
->dev
); ++i
) {
336 if (!spdk_bit_array_get(band
->lba_map
.vld
, i
)) {
340 lba
= band
->lba_map
.map
[i
];
341 if (lba
>= dev
->num_lbas
) {
345 ppa
= ftl_l2p_get(dev
, lba
);
346 if (!ftl_ppa_invalid(ppa
)) {
347 ftl_invalidate_addr(dev
, ppa
);
350 ppa
= ftl_band_ppa_from_lbkoff(band
, i
);
352 ftl_band_set_addr(band
, lba
, ppa
);
353 ftl_l2p_set(dev
, lba
, ppa
);
356 band
->lba_map
.map
= NULL
;
360 static struct ftl_restore_band
*
361 ftl_restore_next_band(struct ftl_restore
*restore
)
363 struct ftl_restore_band
*rband
;
365 for (; restore
->current
< ftl_dev_num_bands(restore
->dev
); ++restore
->current
) {
366 rband
= &restore
->bands
[restore
->current
];
368 if (spdk_likely(rband
->band
->num_chunks
) &&
369 rband
->md_status
== FTL_MD_SUCCESS
) {
379 ftl_pad_chunk_pad_finish(struct ftl_restore_band
*rband
, bool direct_access
)
381 struct ftl_restore
*restore
= rband
->parent
;
382 size_t i
, num_pad_chunks
= 0;
384 if (spdk_unlikely(restore
->pad_status
&& !restore
->num_ios
)) {
386 /* In case of any errors found we want to clear direct access. */
387 /* Direct access bands have their own allocated md, which would be lost */
388 /* on restore complete otherwise. */
389 rband
->band
->state
= FTL_BAND_STATE_CLOSED
;
390 ftl_band_set_direct_access(rband
->band
, false);
392 ftl_restore_complete(restore
, restore
->pad_status
);
396 for (i
= 0; i
< rband
->band
->num_chunks
; ++i
) {
397 if (rband
->band
->chunk_buf
[i
].state
!= FTL_CHUNK_STATE_CLOSED
) {
402 /* Finished all chunks in a band, check if all bands are done */
403 if (num_pad_chunks
== 0) {
405 rband
->band
->state
= FTL_BAND_STATE_CLOSED
;
406 ftl_band_set_direct_access(rband
->band
, false);
408 if (--restore
->num_pad_bands
== 0) {
409 ftl_restore_complete(restore
, restore
->pad_status
);
412 /* Start off padding in the next band */
413 ftl_restore_pad_band(STAILQ_NEXT(rband
, stailq
));
421 static struct ftl_io
*
422 ftl_restore_init_pad_io(struct ftl_restore_band
*rband
, void *buffer
,
425 struct ftl_band
*band
= rband
->band
;
426 struct spdk_ftl_dev
*dev
= band
->dev
;
427 int flags
= FTL_IO_PAD
| FTL_IO_INTERNAL
| FTL_IO_PPA_MODE
| FTL_IO_MD
|
428 FTL_IO_DIRECT_ACCESS
;
429 struct ftl_io_init_opts opts
= {
434 .size
= sizeof(struct ftl_io
),
436 .type
= FTL_IO_WRITE
,
437 .lbk_cnt
= dev
->xfer_size
,
438 .cb_fn
= ftl_pad_chunk_cb
,
445 io
= ftl_io_init_internal(&opts
);
446 if (spdk_unlikely(!io
)) {
451 rband
->parent
->num_ios
++;
457 ftl_pad_chunk_cb(struct ftl_io
*io
, void *arg
, int status
)
459 struct ftl_restore_band
*rband
= arg
;
460 struct ftl_restore
*restore
= rband
->parent
;
461 struct ftl_band
*band
= io
->band
;
462 struct ftl_chunk
*chunk
;
463 struct ftl_io
*new_io
;
466 /* TODO check for next unit error vs early close error */
468 restore
->pad_status
= status
;
472 if (io
->ppa
.lbk
+ io
->lbk_cnt
== band
->dev
->geo
.clba
) {
473 chunk
= ftl_band_chunk_from_ppa(band
, io
->ppa
);
474 chunk
->state
= FTL_CHUNK_STATE_CLOSED
;
476 struct ftl_ppa ppa
= io
->ppa
;
477 ppa
.lbk
+= io
->lbk_cnt
;
478 new_io
= ftl_restore_init_pad_io(rband
, io
->iov
[0].iov_base
, ppa
);
479 if (spdk_unlikely(!new_io
)) {
480 restore
->pad_status
= -ENOMEM
;
484 ftl_io_write(new_io
);
489 spdk_dma_free(io
->iov
[0].iov_base
);
490 ftl_pad_chunk_pad_finish(rband
, true);
494 ftl_restore_pad_band(struct ftl_restore_band
*rband
)
496 struct spdk_ocssd_chunk_information_entry info
;
497 struct ftl_restore
*restore
= rband
->parent
;
498 struct ftl_band
*band
= rband
->band
;
499 struct spdk_ftl_dev
*dev
= band
->dev
;
506 /* Check if some chunks are not closed */
507 if (ftl_pad_chunk_pad_finish(rband
, false)) {
508 /* If we're here, end meta wasn't recognized, but the whole band is written */
509 /* Assume the band was padded and ignore it */
513 /* The LBA map was assigned from restore pool */
514 band
->lba_map
.map
= NULL
;
515 band
->state
= FTL_BAND_STATE_OPEN
;
516 rc
= ftl_band_set_direct_access(band
, true);
518 restore
->pad_status
= rc
;
519 if (--restore
->num_pad_bands
== 0) {
520 ftl_restore_complete(restore
, restore
->pad_status
);
525 for (i
= 0; i
< band
->num_chunks
; ++i
) {
526 if (band
->chunk_buf
[i
].state
== FTL_CHUNK_STATE_CLOSED
) {
530 rc
= ftl_retrieve_chunk_info(dev
, band
->chunk_buf
[i
].start_ppa
, &info
, 1);
531 if (spdk_unlikely(rc
)) {
534 ppa
= band
->chunk_buf
[i
].start_ppa
;
537 buffer
= spdk_dma_zmalloc(FTL_BLOCK_SIZE
* dev
->xfer_size
, sizeof(uint32_t), NULL
);
538 if (spdk_unlikely(!buffer
)) {
543 io
= ftl_restore_init_pad_io(rband
, buffer
, ppa
);
544 if (spdk_unlikely(!io
)) {
546 spdk_dma_free(buffer
);
556 restore
->pad_status
= rc
;
557 ftl_pad_chunk_pad_finish(rband
, true);
561 ftl_restore_pad_open_bands(void *ctx
)
563 struct ftl_restore
*restore
= ctx
;
565 ftl_restore_pad_band(STAILQ_FIRST(&restore
->pad_bands
));
569 ftl_restore_tail_md_cb(struct ftl_io
*io
, void *ctx
, int status
)
571 struct ftl_restore_band
*rband
= ctx
;
572 struct ftl_restore
*restore
= rband
->parent
;
573 struct spdk_ftl_dev
*dev
= rband
->band
->dev
;
576 if (!dev
->conf
.allow_open_bands
) {
577 SPDK_ERRLOG("%s while restoring tail md in band %u.\n",
578 spdk_strerror(-status
), rband
->band
->id
);
579 ftl_restore_complete(restore
, status
);
582 SPDK_ERRLOG("%s while restoring tail md. Will attempt to pad band %u.\n",
583 spdk_strerror(-status
), rband
->band
->id
);
584 STAILQ_INSERT_TAIL(&restore
->pad_bands
, rband
, stailq
);
585 restore
->num_pad_bands
++;
589 if (!status
&& ftl_restore_l2p(rband
->band
)) {
590 ftl_restore_complete(restore
, -ENOTRECOVERABLE
);
594 rband
= ftl_restore_next_band(restore
);
596 if (!STAILQ_EMPTY(&restore
->pad_bands
)) {
597 spdk_thread_send_msg(ftl_get_core_thread(dev
), ftl_restore_pad_open_bands
,
600 ftl_restore_complete(restore
, status
);
605 ftl_restore_tail_md(rband
);
609 ftl_restore_tail_md(struct ftl_restore_band
*rband
)
611 struct ftl_restore
*restore
= rband
->parent
;
612 struct ftl_band
*band
= rband
->band
;
614 band
->tail_md_ppa
= ftl_band_tail_md_ppa(band
);
615 band
->lba_map
.map
= restore
->lba_map
;
616 band
->lba_map
.dma_buf
= restore
->md_buf
;
618 if (ftl_band_read_tail_md(band
, band
->tail_md_ppa
, ftl_restore_tail_md_cb
, rband
)) {
619 SPDK_ERRLOG("Failed to send tail metadata read\n");
620 ftl_restore_complete(restore
, -EIO
);
628 ftl_restore_device(struct ftl_restore
*restore
, ftl_restore_fn cb
)
630 struct ftl_restore_band
*rband
;
632 restore
->l2p_phase
= true;
633 restore
->current
= 0;
636 /* If restore_device is called, there must be at least one valid band */
637 rband
= ftl_restore_next_band(restore
);
638 return ftl_restore_tail_md(rband
);