]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/pblk-read.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-read.c - pblk's read path
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
27 static int pblk_read_from_cache(struct pblk
*pblk
, struct bio
*bio
,
28 sector_t lba
, struct ppa_addr ppa
,
29 int bio_iter
, bool advanced_bio
)
31 #ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa
));
34 BUG_ON(!pblk_addr_in_cache(ppa
));
37 return pblk_rb_copy_to_bio(&pblk
->rwb
, bio
, lba
, ppa
,
38 bio_iter
, advanced_bio
);
41 static void pblk_read_ppalist_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
42 unsigned long *read_bitmap
)
44 struct bio
*bio
= rqd
->bio
;
45 struct ppa_addr ppas
[PBLK_MAX_REQ_ADDRS
];
46 sector_t blba
= pblk_get_lba(bio
);
47 int nr_secs
= rqd
->nr_ppas
;
48 bool advanced_bio
= false;
51 /* logic error: lba out-of-bounds. Ignore read request */
52 if (blba
+ nr_secs
>= pblk
->rl
.nr_secs
) {
53 WARN(1, "pblk: read lbas out of bounds\n");
57 pblk_lookup_l2p_seq(pblk
, ppas
, blba
, nr_secs
);
59 for (i
= 0; i
< nr_secs
; i
++) {
60 struct ppa_addr p
= ppas
[i
];
61 sector_t lba
= blba
+ i
;
64 if (pblk_ppa_empty(p
)) {
65 WARN_ON(test_and_set_bit(i
, read_bitmap
));
67 if (unlikely(!advanced_bio
)) {
68 bio_advance(bio
, (i
) * PBLK_EXPOSED_PAGE_SIZE
);
75 /* Try to read from write buffer. The address is later checked
76 * on the write buffer to prevent retrieving overwritten data.
78 if (pblk_addr_in_cache(p
)) {
79 if (!pblk_read_from_cache(pblk
, bio
, lba
, p
, i
,
81 pblk_lookup_l2p_seq(pblk
, &p
, lba
, 1);
84 WARN_ON(test_and_set_bit(i
, read_bitmap
));
86 #ifdef CONFIG_NVM_DEBUG
87 atomic_long_inc(&pblk
->cache_reads
);
90 /* Read from media non-cached sectors */
91 rqd
->ppa_list
[j
++] = p
;
96 bio_advance(bio
, PBLK_EXPOSED_PAGE_SIZE
);
99 if (pblk_io_aligned(pblk
, nr_secs
))
100 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
102 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
104 #ifdef CONFIG_NVM_DEBUG
105 atomic_long_add(nr_secs
, &pblk
->inflight_reads
);
109 static int pblk_submit_read_io(struct pblk
*pblk
, struct nvm_rq
*rqd
)
113 err
= pblk_submit_io(pblk
, rqd
);
120 static void pblk_end_io_read(struct nvm_rq
*rqd
)
122 struct pblk
*pblk
= rqd
->private;
123 struct nvm_tgt_dev
*dev
= pblk
->dev
;
124 struct pblk_g_ctx
*r_ctx
= nvm_rq_to_pdu(rqd
);
125 struct bio
*bio
= rqd
->bio
;
128 pblk_log_read_err(pblk
, rqd
);
129 #ifdef CONFIG_NVM_DEBUG
131 WARN_ONCE(bio
->bi_status
, "pblk: corrupted read error\n");
134 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
137 if (r_ctx
->private) {
138 struct bio
*orig_bio
= r_ctx
->private;
140 #ifdef CONFIG_NVM_DEBUG
141 WARN_ONCE(orig_bio
->bi_status
, "pblk: corrupted read bio\n");
147 #ifdef CONFIG_NVM_DEBUG
148 atomic_long_add(rqd
->nr_ppas
, &pblk
->sync_reads
);
149 atomic_long_sub(rqd
->nr_ppas
, &pblk
->inflight_reads
);
152 pblk_free_rqd(pblk
, rqd
, READ
);
153 atomic_dec(&pblk
->inflight_io
);
156 static int pblk_fill_partial_read_bio(struct pblk
*pblk
, struct nvm_rq
*rqd
,
157 unsigned int bio_init_idx
,
158 unsigned long *read_bitmap
)
160 struct bio
*new_bio
, *bio
= rqd
->bio
;
161 struct bio_vec src_bv
, dst_bv
;
162 void *ppa_ptr
= NULL
;
164 dma_addr_t dma_ppa_list
= 0;
165 int nr_secs
= rqd
->nr_ppas
;
166 int nr_holes
= nr_secs
- bitmap_weight(read_bitmap
, nr_secs
);
168 DECLARE_COMPLETION_ONSTACK(wait
);
170 new_bio
= bio_alloc(GFP_KERNEL
, nr_holes
);
172 pr_err("pblk: could not alloc read bio\n");
176 if (pblk_bio_add_pages(pblk
, new_bio
, GFP_KERNEL
, nr_holes
))
179 if (nr_holes
!= new_bio
->bi_vcnt
) {
180 pr_err("pblk: malformed bio\n");
184 new_bio
->bi_iter
.bi_sector
= 0; /* internal bio */
185 bio_set_op_attrs(new_bio
, REQ_OP_READ
, 0);
186 new_bio
->bi_private
= &wait
;
187 new_bio
->bi_end_io
= pblk_end_bio_sync
;
190 rqd
->nr_ppas
= nr_holes
;
191 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
194 if (unlikely(nr_secs
> 1 && nr_holes
== 1)) {
195 ppa_ptr
= rqd
->ppa_list
;
196 dma_ppa_list
= rqd
->dma_ppa_list
;
197 rqd
->ppa_addr
= rqd
->ppa_list
[0];
200 ret
= pblk_submit_read_io(pblk
, rqd
);
203 pr_err("pblk: read IO submission failed\n");
207 if (!wait_for_completion_io_timeout(&wait
,
208 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
209 pr_err("pblk: partial read I/O timed out\n");
213 atomic_long_inc(&pblk
->read_failed
);
214 #ifdef CONFIG_NVM_DEBUG
215 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
219 if (unlikely(nr_secs
> 1 && nr_holes
== 1)) {
220 rqd
->ppa_list
= ppa_ptr
;
221 rqd
->dma_ppa_list
= dma_ppa_list
;
224 /* Fill the holes in the original bio */
226 hole
= find_first_zero_bit(read_bitmap
, nr_secs
);
228 src_bv
= new_bio
->bi_io_vec
[i
++];
229 dst_bv
= bio
->bi_io_vec
[bio_init_idx
+ hole
];
231 src_p
= kmap_atomic(src_bv
.bv_page
);
232 dst_p
= kmap_atomic(dst_bv
.bv_page
);
234 memcpy(dst_p
+ dst_bv
.bv_offset
,
235 src_p
+ src_bv
.bv_offset
,
236 PBLK_EXPOSED_PAGE_SIZE
);
238 kunmap_atomic(src_p
);
239 kunmap_atomic(dst_p
);
241 mempool_free(src_bv
.bv_page
, pblk
->page_pool
);
243 hole
= find_next_zero_bit(read_bitmap
, nr_secs
, hole
+ 1);
244 } while (hole
< nr_secs
);
248 /* Complete the original bio and associated request */
250 rqd
->nr_ppas
= nr_secs
;
254 pblk_end_io_read(rqd
);
258 /* Free allocated pages in new bio */
259 pblk_bio_free_pages(pblk
, bio
, 0, new_bio
->bi_vcnt
);
261 pblk_end_io_read(rqd
);
265 static void pblk_read_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
266 unsigned long *read_bitmap
)
268 struct bio
*bio
= rqd
->bio
;
270 sector_t lba
= pblk_get_lba(bio
);
272 /* logic error: lba out-of-bounds. Ignore read request */
273 if (lba
>= pblk
->rl
.nr_secs
) {
274 WARN(1, "pblk: read lba out of bounds\n");
278 pblk_lookup_l2p_seq(pblk
, &ppa
, lba
, 1);
280 #ifdef CONFIG_NVM_DEBUG
281 atomic_long_inc(&pblk
->inflight_reads
);
285 if (pblk_ppa_empty(ppa
)) {
286 WARN_ON(test_and_set_bit(0, read_bitmap
));
290 /* Try to read from write buffer. The address is later checked on the
291 * write buffer to prevent retrieving overwritten data.
293 if (pblk_addr_in_cache(ppa
)) {
294 if (!pblk_read_from_cache(pblk
, bio
, lba
, ppa
, 0, 1)) {
295 pblk_lookup_l2p_seq(pblk
, &ppa
, lba
, 1);
298 WARN_ON(test_and_set_bit(0, read_bitmap
));
299 #ifdef CONFIG_NVM_DEBUG
300 atomic_long_inc(&pblk
->cache_reads
);
306 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
309 int pblk_submit_read(struct pblk
*pblk
, struct bio
*bio
)
311 struct nvm_tgt_dev
*dev
= pblk
->dev
;
312 unsigned int nr_secs
= pblk_get_secs(bio
);
314 unsigned long read_bitmap
; /* Max 64 ppas per request */
315 unsigned int bio_init_idx
;
316 int ret
= NVM_IO_ERR
;
318 if (nr_secs
> PBLK_MAX_REQ_ADDRS
)
321 bitmap_zero(&read_bitmap
, nr_secs
);
323 rqd
= pblk_alloc_rqd(pblk
, READ
);
325 pr_err_ratelimited("pblk: not able to alloc rqd");
329 rqd
->opcode
= NVM_OP_PREAD
;
331 rqd
->nr_ppas
= nr_secs
;
333 rqd
->end_io
= pblk_end_io_read
;
335 /* Save the index for this bio's start. This is needed in case
336 * we need to fill a partial read.
338 bio_init_idx
= pblk_get_bi_idx(bio
);
340 rqd
->meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
341 &rqd
->dma_meta_list
);
342 if (!rqd
->meta_list
) {
343 pr_err("pblk: not able to allocate ppa list\n");
348 rqd
->ppa_list
= rqd
->meta_list
+ pblk_dma_meta_size
;
349 rqd
->dma_ppa_list
= rqd
->dma_meta_list
+ pblk_dma_meta_size
;
351 pblk_read_ppalist_rq(pblk
, rqd
, &read_bitmap
);
353 pblk_read_rq(pblk
, rqd
, &read_bitmap
);
357 if (bitmap_full(&read_bitmap
, nr_secs
)) {
359 atomic_inc(&pblk
->inflight_io
);
360 pblk_end_io_read(rqd
);
364 /* All sectors are to be read from the device */
365 if (bitmap_empty(&read_bitmap
, rqd
->nr_ppas
)) {
366 struct bio
*int_bio
= NULL
;
367 struct pblk_g_ctx
*r_ctx
= nvm_rq_to_pdu(rqd
);
369 /* Clone read bio to deal with read errors internally */
370 int_bio
= bio_clone_fast(bio
, GFP_KERNEL
, pblk_bio_set
);
372 pr_err("pblk: could not clone read bio\n");
377 r_ctx
->private = bio
;
379 ret
= pblk_submit_read_io(pblk
, rqd
);
381 pr_err("pblk: read IO submission failed\n");
390 /* The read bio request could be partially filled by the write buffer,
391 * but there are some holes that need to be read from the drive.
393 ret
= pblk_fill_partial_read_bio(pblk
, rqd
, bio_init_idx
, &read_bitmap
);
395 pr_err("pblk: failed to perform partial read\n");
402 pblk_free_rqd(pblk
, rqd
, READ
);
406 static int read_ppalist_rq_gc(struct pblk
*pblk
, struct nvm_rq
*rqd
,
407 struct pblk_line
*line
, u64
*lba_list
,
408 unsigned int nr_secs
)
410 struct ppa_addr ppas
[PBLK_MAX_REQ_ADDRS
];
414 pblk_lookup_l2p_rand(pblk
, ppas
, lba_list
, nr_secs
);
416 for (i
= 0; i
< nr_secs
; i
++) {
417 if (pblk_addr_in_cache(ppas
[i
]) || ppas
[i
].g
.blk
!= line
->id
||
418 pblk_ppa_empty(ppas
[i
])) {
419 lba_list
[i
] = ADDR_EMPTY
;
423 rqd
->ppa_list
[valid_secs
++] = ppas
[i
];
426 #ifdef CONFIG_NVM_DEBUG
427 atomic_long_add(valid_secs
, &pblk
->inflight_reads
);
432 static int read_rq_gc(struct pblk
*pblk
, struct nvm_rq
*rqd
,
433 struct pblk_line
*line
, sector_t lba
)
438 if (lba
== ADDR_EMPTY
)
441 /* logic error: lba out-of-bounds */
442 if (lba
>= pblk
->rl
.nr_secs
) {
443 WARN(1, "pblk: read lba out of bounds\n");
447 spin_lock(&pblk
->trans_lock
);
448 ppa
= pblk_trans_map_get(pblk
, lba
);
449 spin_unlock(&pblk
->trans_lock
);
451 /* Ignore updated values until the moment */
452 if (pblk_addr_in_cache(ppa
) || ppa
.g
.blk
!= line
->id
||
459 #ifdef CONFIG_NVM_DEBUG
460 atomic_long_inc(&pblk
->inflight_reads
);
467 int pblk_submit_read_gc(struct pblk
*pblk
, u64
*lba_list
, void *data
,
468 unsigned int nr_secs
, unsigned int *secs_to_gc
,
469 struct pblk_line
*line
)
471 struct nvm_tgt_dev
*dev
= pblk
->dev
;
472 struct nvm_geo
*geo
= &dev
->geo
;
476 DECLARE_COMPLETION_ONSTACK(wait
);
478 memset(&rqd
, 0, sizeof(struct nvm_rq
));
480 rqd
.meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
486 rqd
.ppa_list
= rqd
.meta_list
+ pblk_dma_meta_size
;
487 rqd
.dma_ppa_list
= rqd
.dma_meta_list
+ pblk_dma_meta_size
;
489 *secs_to_gc
= read_ppalist_rq_gc(pblk
, &rqd
, line
, lba_list
,
491 if (*secs_to_gc
== 1)
492 rqd
.ppa_addr
= rqd
.ppa_list
[0];
494 *secs_to_gc
= read_rq_gc(pblk
, &rqd
, line
, lba_list
[0]);
500 data_len
= (*secs_to_gc
) * geo
->sec_size
;
501 bio
= pblk_bio_map_addr(pblk
, data
, *secs_to_gc
, data_len
,
502 PBLK_KMALLOC_META
, GFP_KERNEL
);
504 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio
));
508 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
509 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
511 rqd
.opcode
= NVM_OP_PREAD
;
512 rqd
.end_io
= pblk_end_io_sync
;
514 rqd
.nr_ppas
= *secs_to_gc
;
515 rqd
.flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
518 ret
= pblk_submit_read_io(pblk
, &rqd
);
521 pr_err("pblk: GC read request failed\n");
525 if (!wait_for_completion_io_timeout(&wait
,
526 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
527 pr_err("pblk: GC read I/O timed out\n");
529 atomic_dec(&pblk
->inflight_io
);
532 atomic_long_inc(&pblk
->read_failed_gc
);
533 #ifdef CONFIG_NVM_DEBUG
534 pblk_print_failed_rqd(pblk
, &rqd
, rqd
.error
);
538 #ifdef CONFIG_NVM_DEBUG
539 atomic_long_add(*secs_to_gc
, &pblk
->sync_reads
);
540 atomic_long_add(*secs_to_gc
, &pblk
->recov_gc_reads
);
541 atomic_long_sub(*secs_to_gc
, &pblk
->inflight_reads
);
545 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
549 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);