]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/pblk-write.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-write.c - pblk's write path from write buffer to media
20 static unsigned long pblk_end_w_bio(struct pblk
*pblk
, struct nvm_rq
*rqd
,
21 struct pblk_c_ctx
*c_ctx
)
23 struct nvm_tgt_dev
*dev
= pblk
->dev
;
24 struct bio
*original_bio
;
28 for (i
= 0; i
< c_ctx
->nr_valid
; i
++) {
29 struct pblk_w_ctx
*w_ctx
;
31 w_ctx
= pblk_rb_w_ctx(&pblk
->rwb
, c_ctx
->sentry
+ i
);
32 while ((original_bio
= bio_list_pop(&w_ctx
->bios
)))
33 bio_endio(original_bio
);
36 #ifdef CONFIG_NVM_DEBUG
37 atomic_long_add(c_ctx
->nr_valid
, &pblk
->sync_writes
);
40 ret
= pblk_rb_sync_advance(&pblk
->rwb
, c_ctx
->nr_valid
);
43 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
,
47 pblk_free_rqd(pblk
, rqd
, WRITE
);
52 static unsigned long pblk_end_queued_w_bio(struct pblk
*pblk
,
54 struct pblk_c_ctx
*c_ctx
)
56 list_del(&c_ctx
->list
);
57 return pblk_end_w_bio(pblk
, rqd
, c_ctx
);
60 static void pblk_complete_write(struct pblk
*pblk
, struct nvm_rq
*rqd
,
61 struct pblk_c_ctx
*c_ctx
)
63 struct pblk_c_ctx
*c
, *r
;
67 #ifdef CONFIG_NVM_DEBUG
68 atomic_long_sub(c_ctx
->nr_valid
, &pblk
->inflight_writes
);
71 pblk_up_rq(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
, c_ctx
->lun_bitmap
);
73 pos
= pblk_rb_sync_init(&pblk
->rwb
, &flags
);
74 if (pos
== c_ctx
->sentry
) {
75 pos
= pblk_end_w_bio(pblk
, rqd
, c_ctx
);
78 list_for_each_entry_safe(c
, r
, &pblk
->compl_list
, list
) {
79 rqd
= nvm_rq_from_c_ctx(c
);
80 if (c
->sentry
== pos
) {
81 pos
= pblk_end_queued_w_bio(pblk
, rqd
, c
);
86 WARN_ON(nvm_rq_from_c_ctx(c_ctx
) != rqd
);
87 list_add_tail(&c_ctx
->list
, &pblk
->compl_list
);
89 pblk_rb_sync_end(&pblk
->rwb
, &flags
);
92 /* When a write fails, we are not sure whether the block has grown bad or a page
93 * range is more susceptible to write errors. If a high number of pages fail, we
94 * assume that the block is bad and we mark it accordingly. In all cases, we
95 * remap and resubmit the failed entries as fast as possible; if a flush is
96 * waiting on a completion, the whole stack would stall otherwise.
98 static void pblk_end_w_fail(struct pblk
*pblk
, struct nvm_rq
*rqd
)
100 void *comp_bits
= &rqd
->ppa_status
;
101 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
102 struct pblk_rec_ctx
*recovery
;
103 struct ppa_addr
*ppa_list
= rqd
->ppa_list
;
104 int nr_ppas
= rqd
->nr_ppas
;
105 unsigned int c_entries
;
108 if (unlikely(nr_ppas
== 1))
109 ppa_list
= &rqd
->ppa_addr
;
111 recovery
= mempool_alloc(pblk
->rec_pool
, GFP_ATOMIC
);
113 pr_err("pblk: could not allocate recovery context\n");
116 INIT_LIST_HEAD(&recovery
->failed
);
119 while ((bit
= find_next_bit(comp_bits
, nr_ppas
, bit
+ 1)) < nr_ppas
) {
120 struct pblk_rb_entry
*entry
;
124 if (bit
> c_ctx
->nr_valid
) {
125 WARN_ONCE(1, "pblk: corrupted write request\n");
126 mempool_free(recovery
, pblk
->rec_pool
);
131 entry
= pblk_rb_sync_scan_entry(&pblk
->rwb
, &ppa
);
133 pr_err("pblk: could not scan entry on write failure\n");
134 mempool_free(recovery
, pblk
->rec_pool
);
138 /* The list is filled first and emptied afterwards. No need for
139 * protecting it with a lock
141 list_add_tail(&entry
->index
, &recovery
->failed
);
144 c_entries
= find_first_bit(comp_bits
, nr_ppas
);
145 ret
= pblk_recov_setup_rq(pblk
, c_ctx
, recovery
, comp_bits
, c_entries
);
147 pr_err("pblk: could not recover from write failure\n");
148 mempool_free(recovery
, pblk
->rec_pool
);
152 INIT_WORK(&recovery
->ws_rec
, pblk_submit_rec
);
153 queue_work(pblk
->close_wq
, &recovery
->ws_rec
);
156 pblk_complete_write(pblk
, rqd
, c_ctx
);
159 static void pblk_end_io_write(struct nvm_rq
*rqd
)
161 struct pblk
*pblk
= rqd
->private;
162 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
165 pblk_log_write_err(pblk
, rqd
);
166 return pblk_end_w_fail(pblk
, rqd
);
168 #ifdef CONFIG_NVM_DEBUG
170 WARN_ONCE(rqd
->bio
->bi_status
, "pblk: corrupted write error\n");
173 pblk_complete_write(pblk
, rqd
, c_ctx
);
174 atomic_dec(&pblk
->inflight_io
);
177 static void pblk_end_io_write_meta(struct nvm_rq
*rqd
)
179 struct pblk
*pblk
= rqd
->private;
180 struct nvm_tgt_dev
*dev
= pblk
->dev
;
181 struct nvm_geo
*geo
= &dev
->geo
;
182 struct pblk_g_ctx
*m_ctx
= nvm_rq_to_pdu(rqd
);
183 struct pblk_line
*line
= m_ctx
->private;
184 struct pblk_emeta
*emeta
= line
->emeta
;
185 int pos
= pblk_ppa_to_pos(geo
, rqd
->ppa_list
[0]);
186 struct pblk_lun
*rlun
= &pblk
->luns
[pos
];
192 pblk_log_write_err(pblk
, rqd
);
193 pr_err("pblk: metadata I/O failed\n");
195 #ifdef CONFIG_NVM_DEBUG
197 WARN_ONCE(rqd
->bio
->bi_status
, "pblk: corrupted write error\n");
200 sync
= atomic_add_return(rqd
->nr_ppas
, &emeta
->sync
);
201 if (sync
== emeta
->nr_entries
)
202 pblk_line_run_ws(pblk
, line
, NULL
, pblk_line_close_ws
,
206 pblk_free_rqd(pblk
, rqd
, READ
);
208 atomic_dec(&pblk
->inflight_io
);
211 static int pblk_alloc_w_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
212 unsigned int nr_secs
,
213 nvm_end_io_fn(*end_io
))
215 struct nvm_tgt_dev
*dev
= pblk
->dev
;
217 /* Setup write request */
218 rqd
->opcode
= NVM_OP_PWRITE
;
219 rqd
->nr_ppas
= nr_secs
;
220 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
222 rqd
->end_io
= end_io
;
224 rqd
->meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
225 &rqd
->dma_meta_list
);
229 if (unlikely(nr_secs
== 1))
232 rqd
->ppa_list
= rqd
->meta_list
+ pblk_dma_meta_size
;
233 rqd
->dma_ppa_list
= rqd
->dma_meta_list
+ pblk_dma_meta_size
;
238 static int pblk_setup_w_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
239 struct pblk_c_ctx
*c_ctx
, struct ppa_addr
*erase_ppa
)
241 struct pblk_line_meta
*lm
= &pblk
->lm
;
242 struct pblk_line
*e_line
= pblk_line_get_erase(pblk
);
243 unsigned int valid
= c_ctx
->nr_valid
;
244 unsigned int padded
= c_ctx
->nr_padded
;
245 unsigned int nr_secs
= valid
+ padded
;
246 unsigned long *lun_bitmap
;
249 lun_bitmap
= kzalloc(lm
->lun_bitmap_len
, GFP_KERNEL
);
252 c_ctx
->lun_bitmap
= lun_bitmap
;
254 ret
= pblk_alloc_w_rq(pblk
, rqd
, nr_secs
, pblk_end_io_write
);
260 if (likely(!e_line
|| !atomic_read(&e_line
->left_eblks
)))
261 pblk_map_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
, valid
, 0);
263 pblk_map_erase_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
,
269 int pblk_setup_w_rec_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
270 struct pblk_c_ctx
*c_ctx
)
272 struct pblk_line_meta
*lm
= &pblk
->lm
;
273 unsigned long *lun_bitmap
;
276 lun_bitmap
= kzalloc(lm
->lun_bitmap_len
, GFP_KERNEL
);
280 c_ctx
->lun_bitmap
= lun_bitmap
;
282 ret
= pblk_alloc_w_rq(pblk
, rqd
, rqd
->nr_ppas
, pblk_end_io_write
);
286 pblk_map_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
, c_ctx
->nr_valid
, 0);
288 rqd
->ppa_status
= (u64
)0;
289 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
294 static int pblk_calc_secs_to_sync(struct pblk
*pblk
, unsigned int secs_avail
,
295 unsigned int secs_to_flush
)
299 secs_to_sync
= pblk_calc_secs(pblk
, secs_avail
, secs_to_flush
);
301 #ifdef CONFIG_NVM_DEBUG
302 if ((!secs_to_sync
&& secs_to_flush
)
303 || (secs_to_sync
< 0)
304 || (secs_to_sync
> secs_avail
&& !secs_to_flush
)) {
305 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
306 secs_avail
, secs_to_sync
, secs_to_flush
);
313 static inline int pblk_valid_meta_ppa(struct pblk
*pblk
,
314 struct pblk_line
*meta_line
,
315 struct ppa_addr
*ppa_list
, int nr_ppas
)
317 struct nvm_tgt_dev
*dev
= pblk
->dev
;
318 struct nvm_geo
*geo
= &dev
->geo
;
319 struct pblk_line
*data_line
;
320 struct ppa_addr ppa
, ppa_opt
;
324 data_line
= &pblk
->lines
[pblk_dev_ppa_to_line(ppa_list
[0])];
325 paddr
= pblk_lookup_page(pblk
, meta_line
);
326 ppa
= addr_to_gen_ppa(pblk
, paddr
, 0);
328 if (test_bit(pblk_ppa_to_pos(geo
, ppa
), data_line
->blk_bitmap
))
331 /* Schedule a metadata I/O that is half the distance from the data I/O
332 * with regards to the number of LUNs forming the pblk instance. This
333 * balances LUN conflicts across every I/O.
335 * When the LUN configuration changes (e.g., due to GC), this distance
336 * can align, which would result on a LUN deadlock. In this case, modify
337 * the distance to not be optimal, but allow metadata I/Os to succeed.
339 ppa_opt
= addr_to_gen_ppa(pblk
, paddr
+ data_line
->meta_distance
, 0);
340 if (unlikely(ppa_opt
.ppa
== ppa
.ppa
)) {
341 data_line
->meta_distance
--;
345 for (i
= 0; i
< nr_ppas
; i
+= pblk
->min_write_pgs
)
346 if (ppa_list
[i
].g
.ch
== ppa_opt
.g
.ch
&&
347 ppa_list
[i
].g
.lun
== ppa_opt
.g
.lun
)
350 if (test_bit(pblk_ppa_to_pos(geo
, ppa_opt
), data_line
->blk_bitmap
)) {
351 for (i
= 0; i
< nr_ppas
; i
+= pblk
->min_write_pgs
)
352 if (ppa_list
[i
].g
.ch
== ppa
.g
.ch
&&
353 ppa_list
[i
].g
.lun
== ppa
.g
.lun
)
362 int pblk_submit_meta_io(struct pblk
*pblk
, struct pblk_line
*meta_line
)
364 struct nvm_tgt_dev
*dev
= pblk
->dev
;
365 struct nvm_geo
*geo
= &dev
->geo
;
366 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
367 struct pblk_line_meta
*lm
= &pblk
->lm
;
368 struct pblk_emeta
*emeta
= meta_line
->emeta
;
369 struct pblk_g_ctx
*m_ctx
;
370 struct pblk_lun
*rlun
;
375 int rq_ppas
= pblk
->min_write_pgs
;
376 int id
= meta_line
->id
;
381 rqd
= pblk_alloc_rqd(pblk
, READ
);
383 pr_err("pblk: cannot allocate write req.\n");
386 m_ctx
= nvm_rq_to_pdu(rqd
);
387 m_ctx
->private = meta_line
;
389 rq_len
= rq_ppas
* geo
->sec_size
;
390 data
= ((void *)emeta
->buf
) + emeta
->mem
;
392 bio
= pblk_bio_map_addr(pblk
, data
, rq_ppas
, rq_len
,
393 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
398 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
399 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
402 ret
= pblk_alloc_w_rq(pblk
, rqd
, rq_ppas
, pblk_end_io_write_meta
);
406 for (i
= 0; i
< rqd
->nr_ppas
; ) {
407 spin_lock(&meta_line
->lock
);
408 paddr
= __pblk_alloc_page(pblk
, meta_line
, rq_ppas
);
409 spin_unlock(&meta_line
->lock
);
410 for (j
= 0; j
< rq_ppas
; j
++, i
++, paddr
++)
411 rqd
->ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, id
);
414 rlun
= &pblk
->luns
[pblk_ppa_to_pos(geo
, rqd
->ppa_list
[0])];
415 ret
= down_timeout(&rlun
->wr_sem
, msecs_to_jiffies(5000));
417 pr_err("pblk: lun semaphore timed out (%d)\n", ret
);
421 emeta
->mem
+= rq_len
;
422 if (emeta
->mem
>= lm
->emeta_len
[0]) {
423 spin_lock(&l_mg
->close_lock
);
424 list_del(&meta_line
->list
);
425 WARN(!bitmap_full(meta_line
->map_bitmap
, lm
->sec_per_line
),
426 "pblk: corrupt meta line %d\n", meta_line
->id
);
427 spin_unlock(&l_mg
->close_lock
);
430 ret
= pblk_submit_io(pblk
, rqd
);
432 pr_err("pblk: emeta I/O submission failed: %d\n", ret
);
439 spin_lock(&l_mg
->close_lock
);
440 pblk_dealloc_page(pblk
, meta_line
, rq_ppas
);
441 list_add(&meta_line
->list
, &meta_line
->list
);
442 spin_unlock(&l_mg
->close_lock
);
444 if (likely(l_mg
->emeta_alloc_type
== PBLK_VMALLOC_META
))
447 pblk_free_rqd(pblk
, rqd
, READ
);
451 static int pblk_sched_meta_io(struct pblk
*pblk
, struct ppa_addr
*prev_list
,
454 struct pblk_line_meta
*lm
= &pblk
->lm
;
455 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
456 struct pblk_line
*meta_line
;
458 spin_lock(&l_mg
->close_lock
);
460 if (list_empty(&l_mg
->emeta_list
)) {
461 spin_unlock(&l_mg
->close_lock
);
464 meta_line
= list_first_entry(&l_mg
->emeta_list
, struct pblk_line
, list
);
465 if (bitmap_full(meta_line
->map_bitmap
, lm
->sec_per_line
))
467 spin_unlock(&l_mg
->close_lock
);
469 if (!pblk_valid_meta_ppa(pblk
, meta_line
, prev_list
, prev_n
))
472 return pblk_submit_meta_io(pblk
, meta_line
);
475 static int pblk_submit_io_set(struct pblk
*pblk
, struct nvm_rq
*rqd
)
477 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
478 struct ppa_addr erase_ppa
;
481 ppa_set_empty(&erase_ppa
);
483 /* Assign lbas to ppas and populate request structure */
484 err
= pblk_setup_w_rq(pblk
, rqd
, c_ctx
, &erase_ppa
);
486 pr_err("pblk: could not setup write request: %d\n", err
);
490 if (likely(ppa_empty(erase_ppa
))) {
491 /* Submit metadata write for previous data line */
492 err
= pblk_sched_meta_io(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
494 pr_err("pblk: metadata I/O submission failed: %d", err
);
498 /* Submit data write for current data line */
499 err
= pblk_submit_io(pblk
, rqd
);
501 pr_err("pblk: data I/O submission failed: %d\n", err
);
505 /* Submit data write for current data line */
506 err
= pblk_submit_io(pblk
, rqd
);
508 pr_err("pblk: data I/O submission failed: %d\n", err
);
512 /* Submit available erase for next data line */
513 if (pblk_blk_erase_async(pblk
, erase_ppa
)) {
514 struct pblk_line
*e_line
= pblk_line_get_erase(pblk
);
515 struct nvm_tgt_dev
*dev
= pblk
->dev
;
516 struct nvm_geo
*geo
= &dev
->geo
;
519 atomic_inc(&e_line
->left_eblks
);
520 bit
= pblk_ppa_to_pos(geo
, erase_ppa
);
521 WARN_ON(!test_and_clear_bit(bit
, e_line
->erase_bitmap
));
528 static void pblk_free_write_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
)
530 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
531 struct bio
*bio
= rqd
->bio
;
533 if (c_ctx
->nr_padded
)
534 pblk_bio_free_pages(pblk
, bio
, rqd
->nr_ppas
, c_ctx
->nr_padded
);
537 static int pblk_submit_write(struct pblk
*pblk
)
541 unsigned int secs_avail
, secs_to_sync
, secs_to_com
;
542 unsigned int secs_to_flush
;
545 /* If there are no sectors in the cache, flushes (bios without data)
546 * will be cleared on the cache threads
548 secs_avail
= pblk_rb_read_count(&pblk
->rwb
);
552 secs_to_flush
= pblk_rb_sync_point_count(&pblk
->rwb
);
553 if (!secs_to_flush
&& secs_avail
< pblk
->min_write_pgs
)
556 rqd
= pblk_alloc_rqd(pblk
, WRITE
);
558 pr_err("pblk: cannot allocate write req.\n");
562 bio
= bio_alloc(GFP_KERNEL
, pblk
->max_write_pgs
);
564 pr_err("pblk: cannot allocate write bio\n");
567 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
568 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
571 secs_to_sync
= pblk_calc_secs_to_sync(pblk
, secs_avail
, secs_to_flush
);
572 if (secs_to_sync
> pblk
->max_write_pgs
) {
573 pr_err("pblk: bad buffer sync calculation\n");
577 secs_to_com
= (secs_to_sync
> secs_avail
) ? secs_avail
: secs_to_sync
;
578 pos
= pblk_rb_read_commit(&pblk
->rwb
, secs_to_com
);
580 if (pblk_rb_read_to_bio(&pblk
->rwb
, rqd
, bio
, pos
, secs_to_sync
,
582 pr_err("pblk: corrupted write bio\n");
586 if (pblk_submit_io_set(pblk
, rqd
))
589 #ifdef CONFIG_NVM_DEBUG
590 atomic_long_add(secs_to_sync
, &pblk
->sub_writes
);
596 pblk_free_write_rqd(pblk
, rqd
);
600 pblk_free_rqd(pblk
, rqd
, WRITE
);
605 int pblk_write_ts(void *data
)
607 struct pblk
*pblk
= data
;
609 while (!kthread_should_stop()) {
610 if (!pblk_submit_write(pblk
))
612 set_current_state(TASK_INTERRUPTIBLE
);