]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/pblk-write.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-write.c - pblk's write path from write buffer to media
20 static unsigned long pblk_end_w_bio(struct pblk
*pblk
, struct nvm_rq
*rqd
,
21 struct pblk_c_ctx
*c_ctx
)
23 struct nvm_tgt_dev
*dev
= pblk
->dev
;
24 struct bio
*original_bio
;
28 for (i
= 0; i
< c_ctx
->nr_valid
; i
++) {
29 struct pblk_w_ctx
*w_ctx
;
31 w_ctx
= pblk_rb_w_ctx(&pblk
->rwb
, c_ctx
->sentry
+ i
);
32 while ((original_bio
= bio_list_pop(&w_ctx
->bios
)))
33 bio_endio(original_bio
);
36 #ifdef CONFIG_NVM_DEBUG
37 atomic_long_add(c_ctx
->nr_valid
, &pblk
->sync_writes
);
40 ret
= pblk_rb_sync_advance(&pblk
->rwb
, c_ctx
->nr_valid
);
42 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
45 pblk_free_rqd(pblk
, rqd
, WRITE
);
50 static unsigned long pblk_end_queued_w_bio(struct pblk
*pblk
,
52 struct pblk_c_ctx
*c_ctx
)
54 list_del(&c_ctx
->list
);
55 return pblk_end_w_bio(pblk
, rqd
, c_ctx
);
58 static void pblk_complete_write(struct pblk
*pblk
, struct nvm_rq
*rqd
,
59 struct pblk_c_ctx
*c_ctx
)
61 struct pblk_c_ctx
*c
, *r
;
65 #ifdef CONFIG_NVM_DEBUG
66 atomic_long_sub(c_ctx
->nr_valid
, &pblk
->inflight_writes
);
69 pblk_up_rq(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
, c_ctx
->lun_bitmap
);
71 pos
= pblk_rb_sync_init(&pblk
->rwb
, &flags
);
72 if (pos
== c_ctx
->sentry
) {
73 pos
= pblk_end_w_bio(pblk
, rqd
, c_ctx
);
76 list_for_each_entry_safe(c
, r
, &pblk
->compl_list
, list
) {
77 rqd
= nvm_rq_from_c_ctx(c
);
78 if (c
->sentry
== pos
) {
79 pos
= pblk_end_queued_w_bio(pblk
, rqd
, c
);
84 WARN_ON(nvm_rq_from_c_ctx(c_ctx
) != rqd
);
85 list_add_tail(&c_ctx
->list
, &pblk
->compl_list
);
87 pblk_rb_sync_end(&pblk
->rwb
, &flags
);
90 /* When a write fails, we are not sure whether the block has grown bad or a page
91 * range is more susceptible to write errors. If a high number of pages fail, we
92 * assume that the block is bad and we mark it accordingly. In all cases, we
93 * remap and resubmit the failed entries as fast as possible; if a flush is
94 * waiting on a completion, the whole stack would stall otherwise.
96 static void pblk_end_w_fail(struct pblk
*pblk
, struct nvm_rq
*rqd
)
98 void *comp_bits
= &rqd
->ppa_status
;
99 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
100 struct pblk_rec_ctx
*recovery
;
101 struct ppa_addr
*ppa_list
= rqd
->ppa_list
;
102 int nr_ppas
= rqd
->nr_ppas
;
103 unsigned int c_entries
;
106 if (unlikely(nr_ppas
== 1))
107 ppa_list
= &rqd
->ppa_addr
;
109 recovery
= mempool_alloc(pblk
->rec_pool
, GFP_ATOMIC
);
111 pr_err("pblk: could not allocate recovery context\n");
114 INIT_LIST_HEAD(&recovery
->failed
);
117 while ((bit
= find_next_bit(comp_bits
, nr_ppas
, bit
+ 1)) < nr_ppas
) {
118 struct pblk_rb_entry
*entry
;
122 if (bit
> c_ctx
->nr_valid
) {
123 WARN_ONCE(1, "pblk: corrupted write request\n");
124 mempool_free(recovery
, pblk
->rec_pool
);
129 entry
= pblk_rb_sync_scan_entry(&pblk
->rwb
, &ppa
);
131 pr_err("pblk: could not scan entry on write failure\n");
132 mempool_free(recovery
, pblk
->rec_pool
);
136 /* The list is filled first and emptied afterwards. No need for
137 * protecting it with a lock
139 list_add_tail(&entry
->index
, &recovery
->failed
);
142 c_entries
= find_first_bit(comp_bits
, nr_ppas
);
143 ret
= pblk_recov_setup_rq(pblk
, c_ctx
, recovery
, comp_bits
, c_entries
);
145 pr_err("pblk: could not recover from write failure\n");
146 mempool_free(recovery
, pblk
->rec_pool
);
150 INIT_WORK(&recovery
->ws_rec
, pblk_submit_rec
);
151 queue_work(pblk
->close_wq
, &recovery
->ws_rec
);
154 pblk_complete_write(pblk
, rqd
, c_ctx
);
157 static void pblk_end_io_write(struct nvm_rq
*rqd
)
159 struct pblk
*pblk
= rqd
->private;
160 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
163 pblk_log_write_err(pblk
, rqd
);
164 return pblk_end_w_fail(pblk
, rqd
);
166 #ifdef CONFIG_NVM_DEBUG
168 WARN_ONCE(rqd
->bio
->bi_status
, "pblk: corrupted write error\n");
171 pblk_complete_write(pblk
, rqd
, c_ctx
);
172 atomic_dec(&pblk
->inflight_io
);
175 static void pblk_end_io_write_meta(struct nvm_rq
*rqd
)
177 struct pblk
*pblk
= rqd
->private;
178 struct nvm_tgt_dev
*dev
= pblk
->dev
;
179 struct pblk_g_ctx
*m_ctx
= nvm_rq_to_pdu(rqd
);
180 struct pblk_line
*line
= m_ctx
->private;
181 struct pblk_emeta
*emeta
= line
->emeta
;
184 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
187 pblk_log_write_err(pblk
, rqd
);
188 pr_err("pblk: metadata I/O failed. Line %d\n", line
->id
);
190 #ifdef CONFIG_NVM_DEBUG
192 WARN_ONCE(rqd
->bio
->bi_status
, "pblk: corrupted write error\n");
195 sync
= atomic_add_return(rqd
->nr_ppas
, &emeta
->sync
);
196 if (sync
== emeta
->nr_entries
)
197 pblk_line_run_ws(pblk
, line
, NULL
, pblk_line_close_ws
,
201 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
202 pblk_free_rqd(pblk
, rqd
, READ
);
204 atomic_dec(&pblk
->inflight_io
);
207 static int pblk_alloc_w_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
208 unsigned int nr_secs
,
209 nvm_end_io_fn(*end_io
))
211 struct nvm_tgt_dev
*dev
= pblk
->dev
;
213 /* Setup write request */
214 rqd
->opcode
= NVM_OP_PWRITE
;
215 rqd
->nr_ppas
= nr_secs
;
216 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
218 rqd
->end_io
= end_io
;
220 rqd
->meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
221 &rqd
->dma_meta_list
);
225 rqd
->ppa_list
= rqd
->meta_list
+ pblk_dma_meta_size
;
226 rqd
->dma_ppa_list
= rqd
->dma_meta_list
+ pblk_dma_meta_size
;
231 static int pblk_setup_w_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
232 struct pblk_c_ctx
*c_ctx
, struct ppa_addr
*erase_ppa
)
234 struct pblk_line_meta
*lm
= &pblk
->lm
;
235 struct pblk_line
*e_line
= pblk_line_get_erase(pblk
);
236 unsigned int valid
= c_ctx
->nr_valid
;
237 unsigned int padded
= c_ctx
->nr_padded
;
238 unsigned int nr_secs
= valid
+ padded
;
239 unsigned long *lun_bitmap
;
242 lun_bitmap
= kzalloc(lm
->lun_bitmap_len
, GFP_KERNEL
);
245 c_ctx
->lun_bitmap
= lun_bitmap
;
247 ret
= pblk_alloc_w_rq(pblk
, rqd
, nr_secs
, pblk_end_io_write
);
253 if (likely(!e_line
|| !atomic_read(&e_line
->left_eblks
)))
254 pblk_map_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
, valid
, 0);
256 pblk_map_erase_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
,
262 int pblk_setup_w_rec_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
263 struct pblk_c_ctx
*c_ctx
)
265 struct pblk_line_meta
*lm
= &pblk
->lm
;
266 unsigned long *lun_bitmap
;
269 lun_bitmap
= kzalloc(lm
->lun_bitmap_len
, GFP_KERNEL
);
273 c_ctx
->lun_bitmap
= lun_bitmap
;
275 ret
= pblk_alloc_w_rq(pblk
, rqd
, rqd
->nr_ppas
, pblk_end_io_write
);
279 pblk_map_rq(pblk
, rqd
, c_ctx
->sentry
, lun_bitmap
, c_ctx
->nr_valid
, 0);
281 rqd
->ppa_status
= (u64
)0;
282 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
287 static int pblk_calc_secs_to_sync(struct pblk
*pblk
, unsigned int secs_avail
,
288 unsigned int secs_to_flush
)
292 secs_to_sync
= pblk_calc_secs(pblk
, secs_avail
, secs_to_flush
);
294 #ifdef CONFIG_NVM_DEBUG
295 if ((!secs_to_sync
&& secs_to_flush
)
296 || (secs_to_sync
< 0)
297 || (secs_to_sync
> secs_avail
&& !secs_to_flush
)) {
298 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
299 secs_avail
, secs_to_sync
, secs_to_flush
);
306 static inline int pblk_valid_meta_ppa(struct pblk
*pblk
,
307 struct pblk_line
*meta_line
,
308 struct ppa_addr
*ppa_list
, int nr_ppas
)
310 struct nvm_tgt_dev
*dev
= pblk
->dev
;
311 struct nvm_geo
*geo
= &dev
->geo
;
312 struct pblk_line
*data_line
;
313 struct ppa_addr ppa
, ppa_opt
;
317 data_line
= &pblk
->lines
[pblk_dev_ppa_to_line(ppa_list
[0])];
318 paddr
= pblk_lookup_page(pblk
, meta_line
);
319 ppa
= addr_to_gen_ppa(pblk
, paddr
, 0);
321 if (test_bit(pblk_ppa_to_pos(geo
, ppa
), data_line
->blk_bitmap
))
324 /* Schedule a metadata I/O that is half the distance from the data I/O
325 * with regards to the number of LUNs forming the pblk instance. This
326 * balances LUN conflicts across every I/O.
328 * When the LUN configuration changes (e.g., due to GC), this distance
329 * can align, which would result on a LUN deadlock. In this case, modify
330 * the distance to not be optimal, but allow metadata I/Os to succeed.
332 ppa_opt
= addr_to_gen_ppa(pblk
, paddr
+ data_line
->meta_distance
, 0);
333 if (unlikely(ppa_opt
.ppa
== ppa
.ppa
)) {
334 data_line
->meta_distance
--;
338 for (i
= 0; i
< nr_ppas
; i
+= pblk
->min_write_pgs
)
339 if (ppa_list
[i
].g
.ch
== ppa_opt
.g
.ch
&&
340 ppa_list
[i
].g
.lun
== ppa_opt
.g
.lun
)
343 if (test_bit(pblk_ppa_to_pos(geo
, ppa_opt
), data_line
->blk_bitmap
)) {
344 for (i
= 0; i
< nr_ppas
; i
+= pblk
->min_write_pgs
)
345 if (ppa_list
[i
].g
.ch
== ppa
.g
.ch
&&
346 ppa_list
[i
].g
.lun
== ppa
.g
.lun
)
355 int pblk_submit_meta_io(struct pblk
*pblk
, struct pblk_line
*meta_line
)
357 struct nvm_tgt_dev
*dev
= pblk
->dev
;
358 struct nvm_geo
*geo
= &dev
->geo
;
359 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
360 struct pblk_line_meta
*lm
= &pblk
->lm
;
361 struct pblk_emeta
*emeta
= meta_line
->emeta
;
362 struct pblk_g_ctx
*m_ctx
;
367 int rq_ppas
= pblk
->min_write_pgs
;
368 int id
= meta_line
->id
;
373 rqd
= pblk_alloc_rqd(pblk
, READ
);
375 pr_err("pblk: cannot allocate write req.\n");
378 m_ctx
= nvm_rq_to_pdu(rqd
);
379 m_ctx
->private = meta_line
;
381 rq_len
= rq_ppas
* geo
->sec_size
;
382 data
= ((void *)emeta
->buf
) + emeta
->mem
;
384 bio
= pblk_bio_map_addr(pblk
, data
, rq_ppas
, rq_len
,
385 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
390 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
391 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
394 ret
= pblk_alloc_w_rq(pblk
, rqd
, rq_ppas
, pblk_end_io_write_meta
);
398 for (i
= 0; i
< rqd
->nr_ppas
; ) {
399 spin_lock(&meta_line
->lock
);
400 paddr
= __pblk_alloc_page(pblk
, meta_line
, rq_ppas
);
401 spin_unlock(&meta_line
->lock
);
402 for (j
= 0; j
< rq_ppas
; j
++, i
++, paddr
++)
403 rqd
->ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, id
);
406 emeta
->mem
+= rq_len
;
407 if (emeta
->mem
>= lm
->emeta_len
[0]) {
408 spin_lock(&l_mg
->close_lock
);
409 list_del(&meta_line
->list
);
410 WARN(!bitmap_full(meta_line
->map_bitmap
, lm
->sec_per_line
),
411 "pblk: corrupt meta line %d\n", meta_line
->id
);
412 spin_unlock(&l_mg
->close_lock
);
415 pblk_down_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
417 ret
= pblk_submit_io(pblk
, rqd
);
419 pr_err("pblk: emeta I/O submission failed: %d\n", ret
);
426 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
427 spin_lock(&l_mg
->close_lock
);
428 pblk_dealloc_page(pblk
, meta_line
, rq_ppas
);
429 list_add(&meta_line
->list
, &meta_line
->list
);
430 spin_unlock(&l_mg
->close_lock
);
432 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
434 if (likely(l_mg
->emeta_alloc_type
== PBLK_VMALLOC_META
))
437 pblk_free_rqd(pblk
, rqd
, READ
);
441 static int pblk_sched_meta_io(struct pblk
*pblk
, struct ppa_addr
*prev_list
,
444 struct pblk_line_meta
*lm
= &pblk
->lm
;
445 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
446 struct pblk_line
*meta_line
;
448 spin_lock(&l_mg
->close_lock
);
450 if (list_empty(&l_mg
->emeta_list
)) {
451 spin_unlock(&l_mg
->close_lock
);
454 meta_line
= list_first_entry(&l_mg
->emeta_list
, struct pblk_line
, list
);
455 if (bitmap_full(meta_line
->map_bitmap
, lm
->sec_per_line
))
457 spin_unlock(&l_mg
->close_lock
);
459 if (!pblk_valid_meta_ppa(pblk
, meta_line
, prev_list
, prev_n
))
462 return pblk_submit_meta_io(pblk
, meta_line
);
465 static int pblk_submit_io_set(struct pblk
*pblk
, struct nvm_rq
*rqd
)
467 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
468 struct ppa_addr erase_ppa
;
471 ppa_set_empty(&erase_ppa
);
473 /* Assign lbas to ppas and populate request structure */
474 err
= pblk_setup_w_rq(pblk
, rqd
, c_ctx
, &erase_ppa
);
476 pr_err("pblk: could not setup write request: %d\n", err
);
480 if (likely(ppa_empty(erase_ppa
))) {
481 /* Submit metadata write for previous data line */
482 err
= pblk_sched_meta_io(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
484 pr_err("pblk: metadata I/O submission failed: %d", err
);
488 /* Submit data write for current data line */
489 err
= pblk_submit_io(pblk
, rqd
);
491 pr_err("pblk: data I/O submission failed: %d\n", err
);
495 /* Submit data write for current data line */
496 err
= pblk_submit_io(pblk
, rqd
);
498 pr_err("pblk: data I/O submission failed: %d\n", err
);
502 /* Submit available erase for next data line */
503 if (pblk_blk_erase_async(pblk
, erase_ppa
)) {
504 struct pblk_line
*e_line
= pblk_line_get_erase(pblk
);
505 struct nvm_tgt_dev
*dev
= pblk
->dev
;
506 struct nvm_geo
*geo
= &dev
->geo
;
509 atomic_inc(&e_line
->left_eblks
);
510 bit
= pblk_ppa_to_pos(geo
, erase_ppa
);
511 WARN_ON(!test_and_clear_bit(bit
, e_line
->erase_bitmap
));
518 static void pblk_free_write_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
)
520 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
521 struct bio
*bio
= rqd
->bio
;
523 if (c_ctx
->nr_padded
)
524 pblk_bio_free_pages(pblk
, bio
, rqd
->nr_ppas
, c_ctx
->nr_padded
);
527 static int pblk_submit_write(struct pblk
*pblk
)
531 unsigned int secs_avail
, secs_to_sync
, secs_to_com
;
532 unsigned int secs_to_flush
;
535 /* If there are no sectors in the cache, flushes (bios without data)
536 * will be cleared on the cache threads
538 secs_avail
= pblk_rb_read_count(&pblk
->rwb
);
542 secs_to_flush
= pblk_rb_sync_point_count(&pblk
->rwb
);
543 if (!secs_to_flush
&& secs_avail
< pblk
->min_write_pgs
)
546 rqd
= pblk_alloc_rqd(pblk
, WRITE
);
548 pr_err("pblk: cannot allocate write req.\n");
552 bio
= bio_alloc(GFP_KERNEL
, pblk
->max_write_pgs
);
554 pr_err("pblk: cannot allocate write bio\n");
557 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
558 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
561 secs_to_sync
= pblk_calc_secs_to_sync(pblk
, secs_avail
, secs_to_flush
);
562 if (secs_to_sync
> pblk
->max_write_pgs
) {
563 pr_err("pblk: bad buffer sync calculation\n");
567 secs_to_com
= (secs_to_sync
> secs_avail
) ? secs_avail
: secs_to_sync
;
568 pos
= pblk_rb_read_commit(&pblk
->rwb
, secs_to_com
);
570 if (pblk_rb_read_to_bio(&pblk
->rwb
, rqd
, bio
, pos
, secs_to_sync
,
572 pr_err("pblk: corrupted write bio\n");
576 if (pblk_submit_io_set(pblk
, rqd
))
579 #ifdef CONFIG_NVM_DEBUG
580 atomic_long_add(secs_to_sync
, &pblk
->sub_writes
);
586 pblk_free_write_rqd(pblk
, rqd
);
590 pblk_free_rqd(pblk
, rqd
, WRITE
);
595 int pblk_write_ts(void *data
)
597 struct pblk
*pblk
= data
;
599 while (!kthread_should_stop()) {
600 if (!pblk_submit_write(pblk
))
602 set_current_state(TASK_INTERRUPTIBLE
);