struct pblk *pblk = pad_rq->pblk;
struct nvm_tgt_dev *dev = pblk->dev;
- kref_put(&pad_rq->ref, pblk_recov_complete);
+ pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+
+ bio_put(rqd->bio);
nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
pblk_free_rqd(pblk, rqd, WRITE);
+
+ atomic_dec(&pblk->inflight_io);
+ kref_put(&pad_rq->ref, pblk_recov_complete);
}
static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
if (rq_ppas < pblk->min_write_pgs) {
pr_err("pblk: corrupted pad line %d\n", line->id);
- goto free_rq;
+ goto fail_free_pad;
}
rq_len = rq_ppas * geo->sec_size;
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
if (!meta_list) {
ret = -ENOMEM;
- goto free_data;
+ goto fail_free_pad;
}
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
ret = PTR_ERR(rqd);
goto fail_free_meta;
}
- memset(rqd, 0, pblk_w_rq_size);
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
+ bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
+ PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
goto fail_free_rqd;
}
kref_get(&pad_rq->ref);
+ pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
- goto free_data;
+ pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ goto fail_free_bio;
}
- atomic_dec(&pblk->inflight_io);
-
left_line_ppas -= rq_ppas;
left_ppas -= rq_ppas;
if (left_ppas && left_line_ppas)
ret = -ETIME;
}
+ if (!pblk_line_is_full(line))
+ pr_err("pblk: corrupted padded line: %d\n", line->id);
+
+ vfree(data);
free_rq:
kfree(pad_rq);
-free_data:
- vfree(data);
return ret;
+fail_free_bio:
+ bio_put(bio);
fail_free_rqd:
pblk_free_rqd(pblk, rqd, WRITE);
fail_free_meta:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
+fail_free_pad:
kfree(pad_rq);
+ vfree(data);
return ret;
}