]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/pblk-recovery.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * pblk-recovery.c - pblk's recovery path
19 void pblk_submit_rec(struct work_struct
*work
)
21 struct pblk_rec_ctx
*recovery
=
22 container_of(work
, struct pblk_rec_ctx
, ws_rec
);
23 struct pblk
*pblk
= recovery
->pblk
;
24 struct nvm_tgt_dev
*dev
= pblk
->dev
;
25 struct nvm_rq
*rqd
= recovery
->rqd
;
26 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
27 int max_secs
= nvm_max_phys_sects(dev
);
29 unsigned int nr_rec_secs
;
30 unsigned int pgs_read
;
33 nr_rec_secs
= bitmap_weight((unsigned long int *)&rqd
->ppa_status
,
36 bio
= bio_alloc(GFP_KERNEL
, nr_rec_secs
);
38 pr_err("pblk: not able to create recovery bio\n");
42 bio
->bi_iter
.bi_sector
= 0;
43 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
45 rqd
->nr_ppas
= nr_rec_secs
;
47 pgs_read
= pblk_rb_read_to_bio_list(&pblk
->rwb
, bio
, &recovery
->failed
,
49 if (pgs_read
!= nr_rec_secs
) {
50 pr_err("pblk: could not read recovery entries\n");
54 if (pblk_setup_w_rec_rq(pblk
, rqd
, c_ctx
)) {
55 pr_err("pblk: could not setup recovery request\n");
59 #ifdef CONFIG_NVM_DEBUG
60 atomic_long_add(nr_rec_secs
, &pblk
->recov_writes
);
63 ret
= pblk_submit_io(pblk
, rqd
);
65 pr_err("pblk: I/O submission failed: %d\n", ret
);
69 mempool_free(recovery
, pblk
->rec_pool
);
74 pblk_free_rqd(pblk
, rqd
, WRITE
);
77 int pblk_recov_setup_rq(struct pblk
*pblk
, struct pblk_c_ctx
*c_ctx
,
78 struct pblk_rec_ctx
*recovery
, u64
*comp_bits
,
81 struct nvm_tgt_dev
*dev
= pblk
->dev
;
82 int max_secs
= nvm_max_phys_sects(dev
);
83 struct nvm_rq
*rec_rqd
;
84 struct pblk_c_ctx
*rec_ctx
;
85 int nr_entries
= c_ctx
->nr_valid
+ c_ctx
->nr_padded
;
87 rec_rqd
= pblk_alloc_rqd(pblk
, WRITE
);
88 if (IS_ERR(rec_rqd
)) {
89 pr_err("pblk: could not create recovery req.\n");
93 rec_ctx
= nvm_rq_to_pdu(rec_rqd
);
95 /* Copy completion bitmap, but exclude the first X completed entries */
96 bitmap_shift_right((unsigned long int *)&rec_rqd
->ppa_status
,
97 (unsigned long int *)comp_bits
,
100 /* Save the context for the entries that need to be re-written and
101 * update current context with the completed entries.
103 rec_ctx
->sentry
= pblk_rb_wrap_pos(&pblk
->rwb
, c_ctx
->sentry
+ comp
);
104 if (comp
>= c_ctx
->nr_valid
) {
105 rec_ctx
->nr_valid
= 0;
106 rec_ctx
->nr_padded
= nr_entries
- comp
;
108 c_ctx
->nr_padded
= comp
- c_ctx
->nr_valid
;
110 rec_ctx
->nr_valid
= c_ctx
->nr_valid
- comp
;
111 rec_ctx
->nr_padded
= c_ctx
->nr_padded
;
113 c_ctx
->nr_valid
= comp
;
114 c_ctx
->nr_padded
= 0;
117 recovery
->rqd
= rec_rqd
;
118 recovery
->pblk
= pblk
;
123 __le64
*pblk_recov_get_lba_list(struct pblk
*pblk
, struct line_emeta
*emeta_buf
)
127 crc
= pblk_calc_emeta_crc(pblk
, emeta_buf
);
128 if (le32_to_cpu(emeta_buf
->crc
) != crc
)
131 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
)
134 return emeta_to_lbas(pblk
, emeta_buf
);
137 static int pblk_recov_l2p_from_emeta(struct pblk
*pblk
, struct pblk_line
*line
)
139 struct nvm_tgt_dev
*dev
= pblk
->dev
;
140 struct nvm_geo
*geo
= &dev
->geo
;
141 struct pblk_line_meta
*lm
= &pblk
->lm
;
142 struct pblk_emeta
*emeta
= line
->emeta
;
143 struct line_emeta
*emeta_buf
= emeta
->buf
;
146 int nr_data_lbas
, nr_valid_lbas
, nr_lbas
= 0;
149 lba_list
= pblk_recov_get_lba_list(pblk
, emeta_buf
);
153 data_start
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
154 nr_data_lbas
= lm
->sec_per_line
- lm
->emeta_sec
[0];
155 nr_valid_lbas
= le64_to_cpu(emeta_buf
->nr_valid_lbas
);
157 for (i
= data_start
; i
< nr_data_lbas
&& nr_lbas
< nr_valid_lbas
; i
++) {
161 ppa
= addr_to_pblk_ppa(pblk
, i
, line
->id
);
162 pos
= pblk_ppa_to_pos(geo
, ppa
);
164 /* Do not update bad blocks */
165 if (test_bit(pos
, line
->blk_bitmap
))
168 if (le64_to_cpu(lba_list
[i
]) == ADDR_EMPTY
) {
169 spin_lock(&line
->lock
);
170 if (test_and_set_bit(i
, line
->invalid_bitmap
))
171 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
173 le32_add_cpu(line
->vsc
, -1);
174 spin_unlock(&line
->lock
);
179 pblk_update_map(pblk
, le64_to_cpu(lba_list
[i
]), ppa
);
183 if (nr_valid_lbas
!= nr_lbas
)
184 pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
185 line
->id
, emeta_buf
->nr_valid_lbas
, nr_lbas
);
187 line
->left_msecs
= 0;
192 static int pblk_calc_sec_in_line(struct pblk
*pblk
, struct pblk_line
*line
)
194 struct nvm_tgt_dev
*dev
= pblk
->dev
;
195 struct nvm_geo
*geo
= &dev
->geo
;
196 struct pblk_line_meta
*lm
= &pblk
->lm
;
197 int nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
199 return lm
->sec_per_line
- lm
->smeta_sec
- lm
->emeta_sec
[0] -
200 nr_bb
* geo
->sec_per_blk
;
203 struct pblk_recov_alloc
{
204 struct ppa_addr
*ppa_list
;
205 struct pblk_sec_meta
*meta_list
;
208 dma_addr_t dma_ppa_list
;
209 dma_addr_t dma_meta_list
;
212 static int pblk_recov_read_oob(struct pblk
*pblk
, struct pblk_line
*line
,
213 struct pblk_recov_alloc p
, u64 r_ptr
)
215 struct nvm_tgt_dev
*dev
= pblk
->dev
;
216 struct nvm_geo
*geo
= &dev
->geo
;
217 struct ppa_addr
*ppa_list
;
218 struct pblk_sec_meta
*meta_list
;
222 dma_addr_t dma_ppa_list
, dma_meta_list
;
228 DECLARE_COMPLETION_ONSTACK(wait
);
230 ppa_list
= p
.ppa_list
;
231 meta_list
= p
.meta_list
;
234 dma_ppa_list
= p
.dma_ppa_list
;
235 dma_meta_list
= p
.dma_meta_list
;
237 left_ppas
= line
->cur_sec
- r_ptr
;
244 memset(rqd
, 0, pblk_g_rq_size
);
246 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
248 rq_ppas
= pblk
->min_write_pgs
;
249 rq_len
= rq_ppas
* geo
->sec_size
;
251 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
255 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
256 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
259 rqd
->opcode
= NVM_OP_PREAD
;
260 rqd
->meta_list
= meta_list
;
261 rqd
->nr_ppas
= rq_ppas
;
262 rqd
->ppa_list
= ppa_list
;
263 rqd
->dma_ppa_list
= dma_ppa_list
;
264 rqd
->dma_meta_list
= dma_meta_list
;
265 rqd
->end_io
= pblk_end_io_sync
;
266 rqd
->private = &wait
;
268 if (pblk_io_aligned(pblk
, rq_ppas
))
269 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
271 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
273 for (i
= 0; i
< rqd
->nr_ppas
; ) {
277 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
278 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
280 while (test_bit(pos
, line
->blk_bitmap
)) {
281 r_ptr_int
+= pblk
->min_write_pgs
;
282 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
283 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
286 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, r_ptr_int
++)
288 addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
291 /* If read fails, more padding is needed */
292 ret
= pblk_submit_io(pblk
, rqd
);
294 pr_err("pblk: I/O submission failed: %d\n", ret
);
298 if (!wait_for_completion_io_timeout(&wait
,
299 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
300 pr_err("pblk: L2P recovery read timed out\n");
303 atomic_dec(&pblk
->inflight_io
);
304 reinit_completion(&wait
);
306 /* At this point, the read should not fail. If it does, it is a problem
307 * we cannot recover from here. Need FTL log.
310 pr_err("pblk: L2P recovery failed (%d)\n", rqd
->error
);
314 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
315 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
317 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
320 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
323 left_ppas
-= rq_ppas
;
330 static void pblk_recov_complete(struct kref
*ref
)
332 struct pblk_pad_rq
*pad_rq
= container_of(ref
, struct pblk_pad_rq
, ref
);
334 complete(&pad_rq
->wait
);
337 static void pblk_end_io_recov(struct nvm_rq
*rqd
)
339 struct pblk_pad_rq
*pad_rq
= rqd
->private;
340 struct pblk
*pblk
= pad_rq
->pblk
;
341 struct nvm_tgt_dev
*dev
= pblk
->dev
;
343 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
344 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
345 pblk_free_rqd(pblk
, rqd
, WRITE
);
348 static int pblk_recov_pad_oob(struct pblk
*pblk
, struct pblk_line
*line
,
351 struct nvm_tgt_dev
*dev
= pblk
->dev
;
352 struct nvm_geo
*geo
= &dev
->geo
;
353 struct ppa_addr
*ppa_list
;
354 struct pblk_sec_meta
*meta_list
;
355 struct pblk_pad_rq
*pad_rq
;
359 dma_addr_t dma_ppa_list
, dma_meta_list
;
360 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
361 u64 w_ptr
= line
->cur_sec
;
362 int left_line_ppas
, rq_ppas
, rq_len
;
366 spin_lock(&line
->lock
);
367 left_line_ppas
= line
->left_msecs
;
368 spin_unlock(&line
->lock
);
370 pad_rq
= kmalloc(sizeof(struct pblk_pad_rq
), GFP_KERNEL
);
374 data
= vzalloc(pblk
->max_write_pgs
* geo
->sec_size
);
381 init_completion(&pad_rq
->wait
);
382 kref_init(&pad_rq
->ref
);
385 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
386 if (rq_ppas
< pblk
->min_write_pgs
) {
387 pr_err("pblk: corrupted pad line %d\n", line
->id
);
391 rq_len
= rq_ppas
* geo
->sec_size
;
393 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
399 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
400 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
402 rqd
= pblk_alloc_rqd(pblk
, WRITE
);
407 memset(rqd
, 0, pblk_w_rq_size
);
409 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
415 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
416 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
419 rqd
->opcode
= NVM_OP_PWRITE
;
420 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
421 rqd
->meta_list
= meta_list
;
422 rqd
->nr_ppas
= rq_ppas
;
423 rqd
->ppa_list
= ppa_list
;
424 rqd
->dma_ppa_list
= dma_ppa_list
;
425 rqd
->dma_meta_list
= dma_meta_list
;
426 rqd
->end_io
= pblk_end_io_recov
;
427 rqd
->private = pad_rq
;
429 for (i
= 0; i
< rqd
->nr_ppas
; ) {
433 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
434 ppa
= addr_to_pblk_ppa(pblk
, w_ptr
, line
->id
);
435 pos
= pblk_ppa_to_pos(geo
, ppa
);
437 while (test_bit(pos
, line
->blk_bitmap
)) {
438 w_ptr
+= pblk
->min_write_pgs
;
439 ppa
= addr_to_pblk_ppa(pblk
, w_ptr
, line
->id
);
440 pos
= pblk_ppa_to_pos(geo
, ppa
);
443 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++) {
444 struct ppa_addr dev_ppa
;
445 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
447 dev_ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
449 pblk_map_invalidate(pblk
, dev_ppa
);
450 lba_list
[w_ptr
] = meta_list
[i
].lba
= addr_empty
;
451 rqd
->ppa_list
[i
] = dev_ppa
;
455 kref_get(&pad_rq
->ref
);
457 ret
= pblk_submit_io(pblk
, rqd
);
459 pr_err("pblk: I/O submission failed: %d\n", ret
);
463 atomic_dec(&pblk
->inflight_io
);
465 left_line_ppas
-= rq_ppas
;
466 left_ppas
-= rq_ppas
;
467 if (left_ppas
&& left_line_ppas
)
470 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
472 if (!wait_for_completion_io_timeout(&pad_rq
->wait
,
473 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
474 pr_err("pblk: pad write timed out\n");
485 pblk_free_rqd(pblk
, rqd
, WRITE
);
487 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
492 /* When this function is called, it means that not all upper pages have been
493 * written in a page that contains valid data. In order to recover this data, we
494 * first find the write pointer on the device, then we pad all necessary
495 * sectors, and finally attempt to read the valid data
497 static int pblk_recov_scan_all_oob(struct pblk
*pblk
, struct pblk_line
*line
,
498 struct pblk_recov_alloc p
)
500 struct nvm_tgt_dev
*dev
= pblk
->dev
;
501 struct nvm_geo
*geo
= &dev
->geo
;
502 struct ppa_addr
*ppa_list
;
503 struct pblk_sec_meta
*meta_list
;
507 dma_addr_t dma_ppa_list
, dma_meta_list
;
508 u64 w_ptr
= 0, r_ptr
;
513 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
) - line
->cur_sec
;
514 DECLARE_COMPLETION_ONSTACK(wait
);
516 ppa_list
= p
.ppa_list
;
517 meta_list
= p
.meta_list
;
520 dma_ppa_list
= p
.dma_ppa_list
;
521 dma_meta_list
= p
.dma_meta_list
;
523 /* we could recover up until the line write pointer */
524 r_ptr
= line
->cur_sec
;
528 memset(rqd
, 0, pblk_g_rq_size
);
530 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
532 rq_ppas
= pblk
->min_write_pgs
;
533 rq_len
= rq_ppas
* geo
->sec_size
;
535 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
539 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
540 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
543 rqd
->opcode
= NVM_OP_PREAD
;
544 rqd
->meta_list
= meta_list
;
545 rqd
->nr_ppas
= rq_ppas
;
546 rqd
->ppa_list
= ppa_list
;
547 rqd
->dma_ppa_list
= dma_ppa_list
;
548 rqd
->dma_meta_list
= dma_meta_list
;
549 rqd
->end_io
= pblk_end_io_sync
;
550 rqd
->private = &wait
;
552 if (pblk_io_aligned(pblk
, rq_ppas
))
553 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
555 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
557 for (i
= 0; i
< rqd
->nr_ppas
; ) {
561 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
562 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
563 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
565 while (test_bit(pos
, line
->blk_bitmap
)) {
566 w_ptr
+= pblk
->min_write_pgs
;
567 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
568 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
571 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++)
573 addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
576 ret
= pblk_submit_io(pblk
, rqd
);
578 pr_err("pblk: I/O submission failed: %d\n", ret
);
582 if (!wait_for_completion_io_timeout(&wait
,
583 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
584 pr_err("pblk: L2P recovery read timed out\n");
586 atomic_dec(&pblk
->inflight_io
);
587 reinit_completion(&wait
);
589 /* This should not happen since the read failed during normal recovery,
590 * but the media works funny sometimes...
592 if (!rec_round
++ && !rqd
->error
) {
594 for (i
= 0; i
< rqd
->nr_ppas
; i
++, r_ptr
++) {
595 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
597 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
600 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
604 /* Reached the end of the written line */
605 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
606 int pad_secs
, nr_error_bits
, bit
;
609 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
610 nr_error_bits
= rqd
->nr_ppas
- bit
;
612 /* Roll back failed sectors */
613 line
->cur_sec
-= nr_error_bits
;
614 line
->left_msecs
+= nr_error_bits
;
615 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
617 pad_secs
= pblk_pad_distance(pblk
);
618 if (pad_secs
> line
->left_msecs
)
619 pad_secs
= line
->left_msecs
;
621 ret
= pblk_recov_pad_oob(pblk
, line
, pad_secs
);
623 pr_err("pblk: OOB padding failed (err:%d)\n", ret
);
625 ret
= pblk_recov_read_oob(pblk
, line
, p
, r_ptr
);
627 pr_err("pblk: OOB read failed (err:%d)\n", ret
);
632 left_ppas
-= rq_ppas
;
639 static int pblk_recov_scan_oob(struct pblk
*pblk
, struct pblk_line
*line
,
640 struct pblk_recov_alloc p
, int *done
)
642 struct nvm_tgt_dev
*dev
= pblk
->dev
;
643 struct nvm_geo
*geo
= &dev
->geo
;
644 struct ppa_addr
*ppa_list
;
645 struct pblk_sec_meta
*meta_list
;
649 dma_addr_t dma_ppa_list
, dma_meta_list
;
654 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
);
655 DECLARE_COMPLETION_ONSTACK(wait
);
657 ppa_list
= p
.ppa_list
;
658 meta_list
= p
.meta_list
;
661 dma_ppa_list
= p
.dma_ppa_list
;
662 dma_meta_list
= p
.dma_meta_list
;
667 memset(rqd
, 0, pblk_g_rq_size
);
669 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
671 rq_ppas
= pblk
->min_write_pgs
;
672 rq_len
= rq_ppas
* geo
->sec_size
;
674 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
678 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
679 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
682 rqd
->opcode
= NVM_OP_PREAD
;
683 rqd
->meta_list
= meta_list
;
684 rqd
->nr_ppas
= rq_ppas
;
685 rqd
->ppa_list
= ppa_list
;
686 rqd
->dma_ppa_list
= dma_ppa_list
;
687 rqd
->dma_meta_list
= dma_meta_list
;
688 rqd
->end_io
= pblk_end_io_sync
;
689 rqd
->private = &wait
;
691 if (pblk_io_aligned(pblk
, rq_ppas
))
692 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
694 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
696 for (i
= 0; i
< rqd
->nr_ppas
; ) {
700 paddr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
701 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
702 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
704 while (test_bit(pos
, line
->blk_bitmap
)) {
705 paddr
+= pblk
->min_write_pgs
;
706 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
707 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
710 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, paddr
++)
712 addr_to_gen_ppa(pblk
, paddr
, line
->id
);
715 ret
= pblk_submit_io(pblk
, rqd
);
717 pr_err("pblk: I/O submission failed: %d\n", ret
);
722 if (!wait_for_completion_io_timeout(&wait
,
723 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
724 pr_err("pblk: L2P recovery read timed out\n");
726 atomic_dec(&pblk
->inflight_io
);
727 reinit_completion(&wait
);
729 /* Reached the end of the written line */
731 int nr_error_bits
, bit
;
733 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
734 nr_error_bits
= rqd
->nr_ppas
- bit
;
736 /* Roll back failed sectors */
737 line
->cur_sec
-= nr_error_bits
;
738 line
->left_msecs
+= nr_error_bits
;
739 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
744 if (rqd
->error
!= NVM_RSP_ERR_EMPTYPAGE
)
748 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
749 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
751 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
754 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
757 left_ppas
-= rq_ppas
;
764 /* Scan line for lbas on out of bound area */
765 static int pblk_recov_l2p_from_oob(struct pblk
*pblk
, struct pblk_line
*line
)
767 struct nvm_tgt_dev
*dev
= pblk
->dev
;
768 struct nvm_geo
*geo
= &dev
->geo
;
770 struct ppa_addr
*ppa_list
;
771 struct pblk_sec_meta
*meta_list
;
772 struct pblk_recov_alloc p
;
774 dma_addr_t dma_ppa_list
, dma_meta_list
;
777 rqd
= pblk_alloc_rqd(pblk
, READ
);
781 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
787 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
788 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
790 data
= kcalloc(pblk
->max_write_pgs
, geo
->sec_size
, GFP_KERNEL
);
796 p
.ppa_list
= ppa_list
;
797 p
.meta_list
= meta_list
;
800 p
.dma_ppa_list
= dma_ppa_list
;
801 p
.dma_meta_list
= dma_meta_list
;
803 ret
= pblk_recov_scan_oob(pblk
, line
, p
, &done
);
805 pr_err("pblk: could not recover L2P from OOB\n");
810 ret
= pblk_recov_scan_all_oob(pblk
, line
, p
);
812 pr_err("pblk: could not recover L2P from OOB\n");
817 if (pblk_line_is_full(line
))
818 pblk_line_recov_close(pblk
, line
);
823 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
825 pblk_free_rqd(pblk
, rqd
, READ
);
830 /* Insert lines ordered by sequence number (seq_num) on list */
831 static void pblk_recov_line_add_ordered(struct list_head
*head
,
832 struct pblk_line
*line
)
834 struct pblk_line
*t
= NULL
;
836 list_for_each_entry(t
, head
, list
)
837 if (t
->seq_nr
> line
->seq_nr
)
840 __list_add(&line
->list
, t
->list
.prev
, &t
->list
);
843 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
)
845 struct nvm_tgt_dev
*dev
= pblk
->dev
;
846 struct nvm_geo
*geo
= &dev
->geo
;
847 struct pblk_line_meta
*lm
= &pblk
->lm
;
848 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
849 struct pblk_line
*line
, *tline
, *data_line
= NULL
;
850 struct pblk_smeta
*smeta
;
851 struct pblk_emeta
*emeta
;
852 struct line_smeta
*smeta_buf
;
853 int found_lines
= 0, recovered_lines
= 0, open_lines
= 0;
856 int i
, valid_uuid
= 0;
857 LIST_HEAD(recov_list
);
859 /* TODO: Implement FTL snapshot */
861 /* Scan recovery - takes place when FTL snapshot fails */
862 spin_lock(&l_mg
->free_lock
);
863 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
864 set_bit(meta_line
, &l_mg
->meta_bitmap
);
865 smeta
= l_mg
->sline_meta
[meta_line
];
866 emeta
= l_mg
->eline_meta
[meta_line
];
867 smeta_buf
= (struct line_smeta
*)smeta
;
868 spin_unlock(&l_mg
->free_lock
);
870 /* Order data lines using their sequence number */
871 for (i
= 0; i
< l_mg
->nr_lines
; i
++) {
874 line
= &pblk
->lines
[i
];
876 memset(smeta
, 0, lm
->smeta_len
);
878 line
->lun_bitmap
= ((void *)(smeta_buf
)) +
879 sizeof(struct line_smeta
);
881 /* Lines that cannot be read are assumed as not written here */
882 if (pblk_line_read_smeta(pblk
, line
))
885 crc
= pblk_calc_smeta_crc(pblk
, smeta_buf
);
886 if (le32_to_cpu(smeta_buf
->crc
) != crc
)
889 if (le32_to_cpu(smeta_buf
->header
.identifier
) != PBLK_MAGIC
)
892 if (le16_to_cpu(smeta_buf
->header
.version
) != 1) {
893 pr_err("pblk: found incompatible line version %u\n",
894 smeta_buf
->header
.version
);
895 return ERR_PTR(-EINVAL
);
898 /* The first valid instance uuid is used for initialization */
900 memcpy(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16);
904 if (memcmp(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16)) {
905 pr_debug("pblk: ignore line %u due to uuid mismatch\n",
910 /* Update line metadata */
911 spin_lock(&line
->lock
);
912 line
->id
= le32_to_cpu(smeta_buf
->header
.id
);
913 line
->type
= le16_to_cpu(smeta_buf
->header
.type
);
914 line
->seq_nr
= le64_to_cpu(smeta_buf
->seq_nr
);
915 spin_unlock(&line
->lock
);
917 /* Update general metadata */
918 spin_lock(&l_mg
->free_lock
);
919 if (line
->seq_nr
>= l_mg
->d_seq_nr
)
920 l_mg
->d_seq_nr
= line
->seq_nr
+ 1;
921 l_mg
->nr_free_lines
--;
922 spin_unlock(&l_mg
->free_lock
);
924 if (pblk_line_recov_alloc(pblk
, line
))
927 pblk_recov_line_add_ordered(&recov_list
, line
);
929 pr_debug("pblk: recovering data line %d, seq:%llu\n",
930 line
->id
, smeta_buf
->seq_nr
);
934 pblk_setup_uuid(pblk
);
936 spin_lock(&l_mg
->free_lock
);
937 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
938 &l_mg
->meta_bitmap
));
939 spin_unlock(&l_mg
->free_lock
);
944 /* Verify closed blocks and recover this portion of L2P table*/
945 list_for_each_entry_safe(line
, tline
, &recov_list
, list
) {
949 /* Calculate where emeta starts based on the line bb */
950 off
= lm
->sec_per_line
- lm
->emeta_sec
[0];
951 nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
952 off
-= nr_bb
* geo
->sec_per_pl
;
954 line
->emeta_ssec
= off
;
956 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
958 if (pblk_line_read_emeta(pblk
, line
, line
->emeta
->buf
)) {
959 pblk_recov_l2p_from_oob(pblk
, line
);
963 if (pblk_recov_l2p_from_emeta(pblk
, line
))
964 pblk_recov_l2p_from_oob(pblk
, line
);
967 if (pblk_line_is_full(line
)) {
968 struct list_head
*move_list
;
970 spin_lock(&line
->lock
);
971 line
->state
= PBLK_LINESTATE_CLOSED
;
972 move_list
= pblk_line_gc_list(pblk
, line
);
973 spin_unlock(&line
->lock
);
975 spin_lock(&l_mg
->gc_lock
);
976 list_move_tail(&line
->list
, move_list
);
977 spin_unlock(&l_mg
->gc_lock
);
979 mempool_free(line
->map_bitmap
, pblk
->line_meta_pool
);
980 line
->map_bitmap
= NULL
;
985 pr_err("pblk: failed to recover L2P\n");
988 line
->meta_line
= meta_line
;
993 spin_lock(&l_mg
->free_lock
);
995 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
996 &l_mg
->meta_bitmap
));
997 pblk_line_replace_data(pblk
);
999 /* Allocate next line for preparation */
1000 l_mg
->data_next
= pblk_line_get(pblk
);
1001 if (l_mg
->data_next
) {
1002 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1003 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1007 spin_unlock(&l_mg
->free_lock
);
1010 pblk_line_erase(pblk
, l_mg
->data_next
);
1011 pblk_rl_free_lines_dec(&pblk
->rl
, l_mg
->data_next
);
1015 if (found_lines
!= recovered_lines
)
1016 pr_err("pblk: failed to recover all found lines %d/%d\n",
1017 found_lines
, recovered_lines
);
1025 int pblk_recov_pad(struct pblk
*pblk
)
1027 struct pblk_line
*line
;
1028 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1032 spin_lock(&l_mg
->free_lock
);
1033 line
= l_mg
->data_line
;
1034 left_msecs
= line
->left_msecs
;
1035 spin_unlock(&l_mg
->free_lock
);
1037 ret
= pblk_recov_pad_oob(pblk
, line
, left_msecs
);
1039 pr_err("pblk: Tear down padding failed (%d)\n", ret
);
1043 pblk_line_close_meta(pblk
, line
);