]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/lightnvm/pblk-core.c
Merge tag 'xfs-4.18-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[mirror_ubuntu-hirsute-kernel.git] / drivers / lightnvm / pblk-core.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-core.c - pblk's core functionality
16 *
17 */
18
19#include "pblk.h"
a4bd217b 20
8bd40020
JG
21static void pblk_line_mark_bb(struct work_struct *work)
22{
23 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24 ws);
25 struct pblk *pblk = line_ws->pblk;
26 struct nvm_tgt_dev *dev = pblk->dev;
27 struct ppa_addr *ppa = line_ws->priv;
28 int ret;
29
30 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31 if (ret) {
32 struct pblk_line *line;
33 int pos;
34
b1bcfda1
JG
35 line = &pblk->lines[pblk_ppa_to_line(*ppa)];
36 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
8bd40020
JG
37
38 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
39 line->id, pos);
40 }
41
42 kfree(ppa);
b906bbb6 43 mempool_free(line_ws, &pblk->gen_ws_pool);
8bd40020
JG
44}
45
a4bd217b 46static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
32ef9412 47 struct ppa_addr ppa_addr)
a4bd217b
JG
48{
49 struct nvm_tgt_dev *dev = pblk->dev;
50 struct nvm_geo *geo = &dev->geo;
32ef9412
JG
51 struct ppa_addr *ppa;
52 int pos = pblk_ppa_to_pos(geo, ppa_addr);
a4bd217b
JG
53
54 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
55 atomic_long_inc(&pblk->erase_failed);
56
a44f53fa 57 atomic_dec(&line->blk_in_line);
a4bd217b
JG
58 if (test_and_set_bit(pos, line->blk_bitmap))
59 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
60 line->id, pos);
61
32ef9412
JG
62 /* Not necessary to mark bad blocks on 2.0 spec. */
63 if (geo->version == NVM_OCSSD_SPEC_20)
64 return;
65
66 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67 if (!ppa)
68 return;
69
70 *ppa = ppa_addr;
b84ae4a8
JG
71 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72 GFP_ATOMIC, pblk->bb_wq);
a4bd217b
JG
73}
74
75static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
76{
32ef9412
JG
77 struct nvm_tgt_dev *dev = pblk->dev;
78 struct nvm_geo *geo = &dev->geo;
79 struct nvm_chk_meta *chunk;
a4bd217b 80 struct pblk_line *line;
32ef9412 81 int pos;
a4bd217b 82
b1bcfda1 83 line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
32ef9412
JG
84 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85 chunk = &line->chks[pos];
86
a4bd217b
JG
87 atomic_dec(&line->left_seblks);
88
89 if (rqd->error) {
32ef9412
JG
90 chunk->state = NVM_CHK_ST_OFFLINE;
91 pblk_mark_bb(pblk, line, rqd->ppa_addr);
92 } else {
93 chunk->state = NVM_CHK_ST_FREE;
a4bd217b 94 }
588726d3
JG
95
96 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
97}
98
99/* Erase completion assumes that only one block is erased at the time */
100static void pblk_end_io_erase(struct nvm_rq *rqd)
101{
102 struct pblk *pblk = rqd->private;
103
a4bd217b 104 __pblk_end_io_erase(pblk, rqd);
b906bbb6 105 mempool_free(rqd, &pblk->e_rq_pool);
a4bd217b
JG
106}
107
32ef9412
JG
108/*
109 * Get information for all chunks from the device.
110 *
111 * The caller is responsible for freeing the returned structure
112 */
113struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
114{
115 struct nvm_tgt_dev *dev = pblk->dev;
116 struct nvm_geo *geo = &dev->geo;
117 struct nvm_chk_meta *meta;
118 struct ppa_addr ppa;
119 unsigned long len;
120 int ret;
121
122 ppa.ppa = 0;
123
124 len = geo->all_chunks * sizeof(*meta);
125 meta = kzalloc(len, GFP_KERNEL);
126 if (!meta)
127 return ERR_PTR(-ENOMEM);
128
129 ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
130 if (ret) {
131 kfree(meta);
132 return ERR_PTR(-EIO);
133 }
134
135 return meta;
136}
137
138struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139 struct nvm_chk_meta *meta,
140 struct ppa_addr ppa)
141{
142 struct nvm_tgt_dev *dev = pblk->dev;
143 struct nvm_geo *geo = &dev->geo;
144 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145 int lun_off = ppa.m.pu * geo->num_chk;
146 int chk_off = ppa.m.chk;
147
148 return meta + ch_off + lun_off + chk_off;
149}
150
0880a9aa
JG
151void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
152 u64 paddr)
a4bd217b
JG
153{
154 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155 struct list_head *move_list = NULL;
156
157 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158 * table is modified with reclaimed sectors, a check is done to endure
159 * that newer updates are not overwritten.
160 */
161 spin_lock(&line->lock);
d340121e 162 WARN_ON(line->state == PBLK_LINESTATE_FREE);
a4bd217b
JG
163
164 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165 WARN_ONCE(1, "pblk: double invalidate\n");
166 spin_unlock(&line->lock);
167 return;
168 }
dd2a4343 169 le32_add_cpu(line->vsc, -1);
a4bd217b
JG
170
171 if (line->state == PBLK_LINESTATE_CLOSED)
172 move_list = pblk_line_gc_list(pblk, line);
173 spin_unlock(&line->lock);
174
175 if (move_list) {
176 spin_lock(&l_mg->gc_lock);
177 spin_lock(&line->lock);
178 /* Prevent moving a line that has just been chosen for GC */
d340121e 179 if (line->state == PBLK_LINESTATE_GC) {
a4bd217b
JG
180 spin_unlock(&line->lock);
181 spin_unlock(&l_mg->gc_lock);
182 return;
183 }
184 spin_unlock(&line->lock);
185
186 list_move_tail(&line->list, move_list);
187 spin_unlock(&l_mg->gc_lock);
188 }
189}
190
191void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
192{
193 struct pblk_line *line;
194 u64 paddr;
195 int line_id;
196
197#ifdef CONFIG_NVM_DEBUG
198 /* Callers must ensure that the ppa points to a device address */
199 BUG_ON(pblk_addr_in_cache(ppa));
200 BUG_ON(pblk_ppa_empty(ppa));
201#endif
202
b1bcfda1 203 line_id = pblk_ppa_to_line(ppa);
a4bd217b
JG
204 line = &pblk->lines[line_id];
205 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
206
207 __pblk_map_invalidate(pblk, line, paddr);
208}
209
a4bd217b
JG
210static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
211 unsigned int nr_secs)
212{
213 sector_t lba;
214
215 spin_lock(&pblk->trans_lock);
216 for (lba = slba; lba < slba + nr_secs; lba++) {
217 struct ppa_addr ppa;
218
219 ppa = pblk_trans_map_get(pblk, lba);
220
221 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
222 pblk_map_invalidate(pblk, ppa);
223
224 pblk_ppa_set_empty(&ppa);
225 pblk_trans_map_set(pblk, lba, ppa);
226 }
227 spin_unlock(&pblk->trans_lock);
228}
229
67bf26a3
JG
230/* Caller must guarantee that the request is a valid type */
231struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
a4bd217b
JG
232{
233 mempool_t *pool;
234 struct nvm_rq *rqd;
235 int rq_size;
236
67bf26a3
JG
237 switch (type) {
238 case PBLK_WRITE:
239 case PBLK_WRITE_INT:
b906bbb6 240 pool = &pblk->w_rq_pool;
a4bd217b 241 rq_size = pblk_w_rq_size;
67bf26a3
JG
242 break;
243 case PBLK_READ:
b906bbb6 244 pool = &pblk->r_rq_pool;
084ec9ba 245 rq_size = pblk_g_rq_size;
67bf26a3
JG
246 break;
247 default:
b906bbb6 248 pool = &pblk->e_rq_pool;
67bf26a3 249 rq_size = pblk_g_rq_size;
a4bd217b
JG
250 }
251
252 rqd = mempool_alloc(pool, GFP_KERNEL);
253 memset(rqd, 0, rq_size);
254
255 return rqd;
256}
257
67bf26a3
JG
258/* Typically used on completion path. Cannot guarantee request consistency */
259void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
a4bd217b 260{
67bf26a3 261 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
262 mempool_t *pool;
263
67bf26a3
JG
264 switch (type) {
265 case PBLK_WRITE:
266 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267 case PBLK_WRITE_INT:
b906bbb6 268 pool = &pblk->w_rq_pool;
67bf26a3
JG
269 break;
270 case PBLK_READ:
b906bbb6 271 pool = &pblk->r_rq_pool;
67bf26a3
JG
272 break;
273 case PBLK_ERASE:
b906bbb6 274 pool = &pblk->e_rq_pool;
67bf26a3
JG
275 break;
276 default:
277 pr_err("pblk: trying to free unknown rqd type\n");
278 return;
279 }
a4bd217b 280
f142ac0b
IK
281 if (rqd->meta_list)
282 nvm_dev_dma_free(dev->parent, rqd->meta_list,
283 rqd->dma_meta_list);
a4bd217b
JG
284 mempool_free(rqd, pool);
285}
286
287void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
288 int nr_pages)
289{
290 struct bio_vec bv;
291 int i;
292
293 WARN_ON(off + nr_pages != bio->bi_vcnt);
294
a4bd217b
JG
295 for (i = off; i < nr_pages + off; i++) {
296 bv = bio->bi_io_vec[i];
b906bbb6 297 mempool_free(bv.bv_page, &pblk->page_bio_pool);
a4bd217b
JG
298 }
299}
300
301int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
302 int nr_pages)
303{
304 struct request_queue *q = pblk->dev->q;
305 struct page *page;
306 int i, ret;
307
308 for (i = 0; i < nr_pages; i++) {
b906bbb6 309 page = mempool_alloc(&pblk->page_bio_pool, flags);
a4bd217b
JG
310
311 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
312 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
313 pr_err("pblk: could not add page to bio\n");
b906bbb6 314 mempool_free(page, &pblk->page_bio_pool);
a4bd217b
JG
315 goto err;
316 }
317 }
318
319 return 0;
320err:
f142ac0b 321 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
a4bd217b
JG
322 return -1;
323}
324
cc9c9a00 325void pblk_write_kick(struct pblk *pblk)
a4bd217b
JG
326{
327 wake_up_process(pblk->writer_ts);
328 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
329}
330
87c1d2d3 331void pblk_write_timer_fn(struct timer_list *t)
a4bd217b 332{
87c1d2d3 333 struct pblk *pblk = from_timer(pblk, t, wtimer);
a4bd217b
JG
334
335 /* kick the write thread every tick to flush outstanding data */
336 pblk_write_kick(pblk);
337}
338
339void pblk_write_should_kick(struct pblk *pblk)
340{
341 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
342
343 if (secs_avail >= pblk->min_write_pgs)
344 pblk_write_kick(pblk);
345}
346
8bd40020 347static void pblk_wait_for_meta(struct pblk *pblk)
a4bd217b 348{
588726d3
JG
349 do {
350 if (!atomic_read(&pblk->inflight_io))
351 break;
a4bd217b 352
588726d3
JG
353 schedule();
354 } while (1);
355}
a4bd217b 356
588726d3
JG
357static void pblk_flush_writer(struct pblk *pblk)
358{
359 pblk_rb_flush(&pblk->rwb);
360 do {
ee8d5c1a 361 if (!pblk_rb_sync_count(&pblk->rwb))
588726d3 362 break;
a4bd217b 363
ee8d5c1a 364 pblk_write_kick(pblk);
588726d3
JG
365 schedule();
366 } while (1);
a4bd217b
JG
367}
368
369struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
370{
371 struct pblk_line_meta *lm = &pblk->lm;
372 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
373 struct list_head *move_list = NULL;
dd2a4343 374 int vsc = le32_to_cpu(*line->vsc);
a4bd217b 375
476118c9
JG
376 lockdep_assert_held(&line->lock);
377
48b8d208
HH
378 if (line->w_err_gc->has_write_err) {
379 if (line->gc_group != PBLK_LINEGC_WERR) {
380 line->gc_group = PBLK_LINEGC_WERR;
381 move_list = &l_mg->gc_werr_list;
382 pblk_rl_werr_line_in(&pblk->rl);
383 }
384 } else if (!vsc) {
a4bd217b
JG
385 if (line->gc_group != PBLK_LINEGC_FULL) {
386 line->gc_group = PBLK_LINEGC_FULL;
387 move_list = &l_mg->gc_full_list;
388 }
b20ba1bc 389 } else if (vsc < lm->high_thrs) {
a4bd217b
JG
390 if (line->gc_group != PBLK_LINEGC_HIGH) {
391 line->gc_group = PBLK_LINEGC_HIGH;
392 move_list = &l_mg->gc_high_list;
393 }
b20ba1bc 394 } else if (vsc < lm->mid_thrs) {
a4bd217b
JG
395 if (line->gc_group != PBLK_LINEGC_MID) {
396 line->gc_group = PBLK_LINEGC_MID;
397 move_list = &l_mg->gc_mid_list;
398 }
dd2a4343 399 } else if (vsc < line->sec_in_line) {
a4bd217b
JG
400 if (line->gc_group != PBLK_LINEGC_LOW) {
401 line->gc_group = PBLK_LINEGC_LOW;
402 move_list = &l_mg->gc_low_list;
403 }
dd2a4343 404 } else if (vsc == line->sec_in_line) {
a4bd217b
JG
405 if (line->gc_group != PBLK_LINEGC_EMPTY) {
406 line->gc_group = PBLK_LINEGC_EMPTY;
407 move_list = &l_mg->gc_empty_list;
408 }
409 } else {
410 line->state = PBLK_LINESTATE_CORRUPT;
411 line->gc_group = PBLK_LINEGC_NONE;
412 move_list = &l_mg->corrupt_list;
413 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
dd2a4343 414 line->id, vsc,
a4bd217b
JG
415 line->sec_in_line,
416 lm->high_thrs, lm->mid_thrs);
417 }
418
419 return move_list;
420}
421
422void pblk_discard(struct pblk *pblk, struct bio *bio)
423{
424 sector_t slba = pblk_get_lba(bio);
425 sector_t nr_secs = pblk_get_secs(bio);
426
427 pblk_invalidate_range(pblk, slba, nr_secs);
428}
429
a4bd217b
JG
430void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
431{
432 atomic_long_inc(&pblk->write_failed);
433#ifdef CONFIG_NVM_DEBUG
434 pblk_print_failed_rqd(pblk, rqd, rqd->error);
435#endif
436}
437
438void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
439{
440 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
441 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
442 atomic_long_inc(&pblk->read_empty);
443 return;
444 }
445
446 switch (rqd->error) {
447 case NVM_RSP_WARN_HIGHECC:
448 atomic_long_inc(&pblk->read_high_ecc);
449 break;
450 case NVM_RSP_ERR_FAILECC:
451 case NVM_RSP_ERR_FAILCRC:
452 atomic_long_inc(&pblk->read_failed);
453 break;
454 default:
455 pr_err("pblk: unknown read error:%d\n", rqd->error);
456 }
457#ifdef CONFIG_NVM_DEBUG
458 pblk_print_failed_rqd(pblk, rqd, rqd->error);
459#endif
460}
461
c2e9f5d4
JG
462void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
463{
464 pblk->sec_per_write = sec_per_write;
465}
466
a4bd217b
JG
467int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
468{
469 struct nvm_tgt_dev *dev = pblk->dev;
470
b6730dd4 471 atomic_inc(&pblk->inflight_io);
a4bd217b 472
b6730dd4
JG
473#ifdef CONFIG_NVM_DEBUG
474 if (pblk_check_io(pblk, rqd))
475 return NVM_IO_ERR;
1a94b2d4 476#endif
a4bd217b 477
1a94b2d4
JG
478 return nvm_submit_io(dev, rqd);
479}
a4bd217b 480
1a94b2d4
JG
481int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
482{
483 struct nvm_tgt_dev *dev = pblk->dev;
484
b6730dd4 485 atomic_inc(&pblk->inflight_io);
1a94b2d4 486
b6730dd4
JG
487#ifdef CONFIG_NVM_DEBUG
488 if (pblk_check_io(pblk, rqd))
489 return NVM_IO_ERR;
a4bd217b 490#endif
588726d3 491
1a94b2d4 492 return nvm_submit_io_sync(dev, rqd);
a4bd217b
JG
493}
494
55e836d4
JG
495static void pblk_bio_map_addr_endio(struct bio *bio)
496{
497 bio_put(bio);
498}
499
a4bd217b
JG
500struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
501 unsigned int nr_secs, unsigned int len,
de54e703 502 int alloc_type, gfp_t gfp_mask)
a4bd217b
JG
503{
504 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
505 void *kaddr = data;
506 struct page *page;
507 struct bio *bio;
508 int i, ret;
509
de54e703 510 if (alloc_type == PBLK_KMALLOC_META)
a4bd217b
JG
511 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
512
513 bio = bio_kmalloc(gfp_mask, nr_secs);
514 if (!bio)
515 return ERR_PTR(-ENOMEM);
516
517 for (i = 0; i < nr_secs; i++) {
518 page = vmalloc_to_page(kaddr);
519 if (!page) {
520 pr_err("pblk: could not map vmalloc bio\n");
521 bio_put(bio);
522 bio = ERR_PTR(-ENOMEM);
523 goto out;
524 }
525
526 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
527 if (ret != PAGE_SIZE) {
528 pr_err("pblk: could not add page to bio\n");
529 bio_put(bio);
530 bio = ERR_PTR(-ENOMEM);
531 goto out;
532 }
533
534 kaddr += PAGE_SIZE;
535 }
55e836d4
JG
536
537 bio->bi_end_io = pblk_bio_map_addr_endio;
a4bd217b
JG
538out:
539 return bio;
540}
541
542int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
543 unsigned long secs_to_flush)
544{
c2e9f5d4 545 int max = pblk->sec_per_write;
a4bd217b
JG
546 int min = pblk->min_write_pgs;
547 int secs_to_sync = 0;
548
549 if (secs_avail >= max)
550 secs_to_sync = max;
551 else if (secs_avail >= min)
552 secs_to_sync = min * (secs_avail / min);
553 else if (secs_to_flush)
554 secs_to_sync = min;
555
556 return secs_to_sync;
557}
558
dd2a4343
JG
559void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
560{
561 u64 addr;
562 int i;
563
e57903fd 564 spin_lock(&line->lock);
dd2a4343
JG
565 addr = find_next_zero_bit(line->map_bitmap,
566 pblk->lm.sec_per_line, line->cur_sec);
567 line->cur_sec = addr - nr_secs;
568
569 for (i = 0; i < nr_secs; i++, line->cur_sec--)
570 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
e57903fd 571 spin_unlock(&line->lock);
dd2a4343
JG
572}
573
574u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
a4bd217b
JG
575{
576 u64 addr;
577 int i;
578
476118c9
JG
579 lockdep_assert_held(&line->lock);
580
a4bd217b
JG
581 /* logic error: ppa out-of-bounds. Prevent generating bad address */
582 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
583 WARN(1, "pblk: page allocation out of bounds\n");
584 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
585 }
586
587 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
588 pblk->lm.sec_per_line, line->cur_sec);
589 for (i = 0; i < nr_secs; i++, line->cur_sec++)
590 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
591
592 return addr;
593}
594
595u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
596{
597 u64 addr;
598
599 /* Lock needed in case a write fails and a recovery needs to remap
600 * failed write buffer entries
601 */
602 spin_lock(&line->lock);
603 addr = __pblk_alloc_page(pblk, line, nr_secs);
604 line->left_msecs -= nr_secs;
605 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
606 spin_unlock(&line->lock);
607
608 return addr;
609}
610
dd2a4343
JG
611u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
612{
613 u64 paddr;
614
615 spin_lock(&line->lock);
616 paddr = find_next_zero_bit(line->map_bitmap,
617 pblk->lm.sec_per_line, line->cur_sec);
618 spin_unlock(&line->lock);
619
620 return paddr;
621}
622
a4bd217b
JG
623/*
624 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
625 * taking the per LUN semaphore.
626 */
627static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
dd2a4343 628 void *emeta_buf, u64 paddr, int dir)
a4bd217b
JG
629{
630 struct nvm_tgt_dev *dev = pblk->dev;
631 struct nvm_geo *geo = &dev->geo;
de54e703 632 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b 633 struct pblk_line_meta *lm = &pblk->lm;
63e3809c 634 void *ppa_list, *meta_list;
a4bd217b
JG
635 struct bio *bio;
636 struct nvm_rq rqd;
63e3809c 637 dma_addr_t dma_ppa_list, dma_meta_list;
a4bd217b 638 int min = pblk->min_write_pgs;
dd2a4343 639 int left_ppas = lm->emeta_sec[0];
a4bd217b
JG
640 int id = line->id;
641 int rq_ppas, rq_len;
642 int cmd_op, bio_op;
a4bd217b
JG
643 int i, j;
644 int ret;
a4bd217b 645
e2cddf20 646 if (dir == PBLK_WRITE) {
a4bd217b
JG
647 bio_op = REQ_OP_WRITE;
648 cmd_op = NVM_OP_PWRITE;
e2cddf20 649 } else if (dir == PBLK_READ) {
a4bd217b
JG
650 bio_op = REQ_OP_READ;
651 cmd_op = NVM_OP_PREAD;
a4bd217b
JG
652 } else
653 return -EINVAL;
654
63e3809c
JG
655 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
656 &dma_meta_list);
657 if (!meta_list)
a4bd217b
JG
658 return -ENOMEM;
659
63e3809c
JG
660 ppa_list = meta_list + pblk_dma_meta_size;
661 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
662
a4bd217b
JG
663next_rq:
664 memset(&rqd, 0, sizeof(struct nvm_rq));
665
666 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
e46f4e48 667 rq_len = rq_ppas * geo->csecs;
a4bd217b 668
de54e703
JG
669 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
670 l_mg->emeta_alloc_type, GFP_KERNEL);
a4bd217b
JG
671 if (IS_ERR(bio)) {
672 ret = PTR_ERR(bio);
673 goto free_rqd_dma;
674 }
675
676 bio->bi_iter.bi_sector = 0; /* internal bio */
677 bio_set_op_attrs(bio, bio_op, 0);
678
679 rqd.bio = bio;
63e3809c 680 rqd.meta_list = meta_list;
a4bd217b 681 rqd.ppa_list = ppa_list;
63e3809c 682 rqd.dma_meta_list = dma_meta_list;
a4bd217b 683 rqd.dma_ppa_list = dma_ppa_list;
63e3809c
JG
684 rqd.opcode = cmd_op;
685 rqd.nr_ppas = rq_ppas;
a4bd217b 686
e2cddf20 687 if (dir == PBLK_WRITE) {
63e3809c
JG
688 struct pblk_sec_meta *meta_list = rqd.meta_list;
689
e2cddf20 690 rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
a4bd217b
JG
691 for (i = 0; i < rqd.nr_ppas; ) {
692 spin_lock(&line->lock);
693 paddr = __pblk_alloc_page(pblk, line, min);
694 spin_unlock(&line->lock);
63e3809c
JG
695 for (j = 0; j < min; j++, i++, paddr++) {
696 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
697 rqd.ppa_list[i] =
698 addr_to_gen_ppa(pblk, paddr, id);
63e3809c 699 }
a4bd217b
JG
700 }
701 } else {
702 for (i = 0; i < rqd.nr_ppas; ) {
703 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
b1bcfda1 704 int pos = pblk_ppa_to_pos(geo, ppa);
f9c10152
JG
705 int read_type = PBLK_READ_RANDOM;
706
707 if (pblk_io_aligned(pblk, rq_ppas))
708 read_type = PBLK_READ_SEQUENTIAL;
709 rqd.flags = pblk_set_read_mode(pblk, read_type);
a4bd217b
JG
710
711 while (test_bit(pos, line->blk_bitmap)) {
712 paddr += min;
713 if (pblk_boundary_paddr_checks(pblk, paddr)) {
714 pr_err("pblk: corrupt emeta line:%d\n",
715 line->id);
716 bio_put(bio);
717 ret = -EINTR;
718 goto free_rqd_dma;
719 }
720
721 ppa = addr_to_gen_ppa(pblk, paddr, id);
b1bcfda1 722 pos = pblk_ppa_to_pos(geo, ppa);
a4bd217b
JG
723 }
724
725 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
726 pr_err("pblk: corrupt emeta line:%d\n",
727 line->id);
728 bio_put(bio);
729 ret = -EINTR;
730 goto free_rqd_dma;
731 }
732
733 for (j = 0; j < min; j++, i++, paddr++)
734 rqd.ppa_list[i] =
735 addr_to_gen_ppa(pblk, paddr, line->id);
736 }
737 }
738
1a94b2d4 739 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
740 if (ret) {
741 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
742 bio_put(bio);
743 goto free_rqd_dma;
744 }
745
588726d3 746 atomic_dec(&pblk->inflight_io);
a4bd217b 747
a4bd217b 748 if (rqd.error) {
e2cddf20 749 if (dir == PBLK_WRITE)
a4bd217b
JG
750 pblk_log_write_err(pblk, &rqd);
751 else
752 pblk_log_read_err(pblk, &rqd);
753 }
754
dd2a4343 755 emeta_buf += rq_len;
a4bd217b
JG
756 left_ppas -= rq_ppas;
757 if (left_ppas)
758 goto next_rq;
759free_rqd_dma:
63e3809c 760 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
761 return ret;
762}
763
764u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
765{
766 struct nvm_tgt_dev *dev = pblk->dev;
767 struct nvm_geo *geo = &dev->geo;
768 struct pblk_line_meta *lm = &pblk->lm;
769 int bit;
770
771 /* This usually only happens on bad lines */
772 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
773 if (bit >= lm->blk_per_line)
774 return -1;
775
e46f4e48 776 return bit * geo->ws_opt;
a4bd217b
JG
777}
778
779static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
780 u64 paddr, int dir)
781{
782 struct nvm_tgt_dev *dev = pblk->dev;
783 struct pblk_line_meta *lm = &pblk->lm;
784 struct bio *bio;
785 struct nvm_rq rqd;
786 __le64 *lba_list = NULL;
787 int i, ret;
788 int cmd_op, bio_op;
789 int flags;
a4bd217b 790
e2cddf20 791 if (dir == PBLK_WRITE) {
a4bd217b
JG
792 bio_op = REQ_OP_WRITE;
793 cmd_op = NVM_OP_PWRITE;
e2cddf20 794 flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
dd2a4343 795 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
8f554597 796 } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
a4bd217b
JG
797 bio_op = REQ_OP_READ;
798 cmd_op = NVM_OP_PREAD;
f9c10152 799 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
a4bd217b
JG
800 } else
801 return -EINVAL;
802
803 memset(&rqd, 0, sizeof(struct nvm_rq));
804
63e3809c
JG
805 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
806 &rqd.dma_meta_list);
807 if (!rqd.meta_list)
a4bd217b
JG
808 return -ENOMEM;
809
63e3809c
JG
810 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
811 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
812
a4bd217b
JG
813 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
814 if (IS_ERR(bio)) {
815 ret = PTR_ERR(bio);
816 goto free_ppa_list;
817 }
818
819 bio->bi_iter.bi_sector = 0; /* internal bio */
820 bio_set_op_attrs(bio, bio_op, 0);
821
822 rqd.bio = bio;
823 rqd.opcode = cmd_op;
824 rqd.flags = flags;
825 rqd.nr_ppas = lm->smeta_sec;
a4bd217b
JG
826
827 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
63e3809c
JG
828 struct pblk_sec_meta *meta_list = rqd.meta_list;
829
a4bd217b 830 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
63e3809c 831
e2cddf20 832 if (dir == PBLK_WRITE) {
f417aa0b 833 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63e3809c
JG
834
835 meta_list[i].lba = lba_list[paddr] = addr_empty;
836 }
a4bd217b
JG
837 }
838
839 /*
840 * This I/O is sent by the write thread when a line is replace. Since
841 * the write thread is the only one sending write and erase commands,
842 * there is no need to take the LUN semaphore.
843 */
1a94b2d4 844 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
845 if (ret) {
846 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
847 bio_put(bio);
848 goto free_ppa_list;
849 }
850
588726d3 851 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
852
853 if (rqd.error) {
6cf17a2f 854 if (dir == PBLK_WRITE) {
a4bd217b 855 pblk_log_write_err(pblk, &rqd);
6cf17a2f
HH
856 ret = 1;
857 } else if (dir == PBLK_READ)
a4bd217b
JG
858 pblk_log_read_err(pblk, &rqd);
859 }
860
861free_ppa_list:
63e3809c 862 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
863
864 return ret;
865}
866
867int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
868{
869 u64 bpaddr = pblk_line_smeta_start(pblk, line);
870
8f554597 871 return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
a4bd217b
JG
872}
873
dd2a4343
JG
874int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
875 void *emeta_buf)
a4bd217b 876{
dd2a4343 877 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
e2cddf20 878 line->emeta_ssec, PBLK_READ);
a4bd217b
JG
879}
880
881static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
882 struct ppa_addr ppa)
883{
884 rqd->opcode = NVM_OP_ERASE;
885 rqd->ppa_addr = ppa;
886 rqd->nr_ppas = 1;
e2cddf20 887 rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
a4bd217b
JG
888 rqd->bio = NULL;
889}
890
891static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
892{
893 struct nvm_rq rqd;
588726d3 894 int ret = 0;
a4bd217b
JG
895
896 memset(&rqd, 0, sizeof(struct nvm_rq));
897
898 pblk_setup_e_rq(pblk, &rqd, ppa);
899
a4bd217b
JG
900 /* The write thread schedules erases so that it minimizes disturbances
901 * with writes. Thus, there is no need to take the LUN semaphore.
902 */
1a94b2d4 903 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
904 if (ret) {
905 struct nvm_tgt_dev *dev = pblk->dev;
906 struct nvm_geo *geo = &dev->geo;
907
908 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
b1bcfda1
JG
909 pblk_ppa_to_line(ppa),
910 pblk_ppa_to_pos(geo, ppa));
a4bd217b
JG
911
912 rqd.error = ret;
913 goto out;
914 }
915
a4bd217b
JG
916out:
917 rqd.private = pblk;
918 __pblk_end_io_erase(pblk, &rqd);
919
588726d3 920 return ret;
a4bd217b
JG
921}
922
923int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
924{
925 struct pblk_line_meta *lm = &pblk->lm;
926 struct ppa_addr ppa;
588726d3 927 int ret, bit = -1;
a4bd217b 928
a44f53fa
JG
929 /* Erase only good blocks, one at a time */
930 do {
931 spin_lock(&line->lock);
932 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
933 bit + 1);
934 if (bit >= lm->blk_per_line) {
935 spin_unlock(&line->lock);
936 break;
937 }
938
a4bd217b 939 ppa = pblk->luns[bit].bppa; /* set ch and lun */
69471513 940 ppa.a.blk = line->id;
a4bd217b 941
a44f53fa 942 atomic_dec(&line->left_eblks);
a4bd217b 943 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
a44f53fa 944 spin_unlock(&line->lock);
a4bd217b 945
588726d3
JG
946 ret = pblk_blk_erase_sync(pblk, ppa);
947 if (ret) {
a4bd217b 948 pr_err("pblk: failed to erase line %d\n", line->id);
588726d3 949 return ret;
a4bd217b 950 }
a44f53fa 951 } while (1);
a4bd217b
JG
952
953 return 0;
954}
955
dd2a4343
JG
956static void pblk_line_setup_metadata(struct pblk_line *line,
957 struct pblk_line_mgmt *l_mg,
958 struct pblk_line_meta *lm)
959{
960 int meta_line;
961
588726d3
JG
962 lockdep_assert_held(&l_mg->free_lock);
963
dd2a4343
JG
964retry_meta:
965 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
966 if (meta_line == PBLK_DATA_LINES) {
967 spin_unlock(&l_mg->free_lock);
968 io_schedule();
969 spin_lock(&l_mg->free_lock);
970 goto retry_meta;
971 }
972
973 set_bit(meta_line, &l_mg->meta_bitmap);
974 line->meta_line = meta_line;
975
976 line->smeta = l_mg->sline_meta[meta_line];
977 line->emeta = l_mg->eline_meta[meta_line];
978
979 memset(line->smeta, 0, lm->smeta_len);
980 memset(line->emeta->buf, 0, lm->emeta_len[0]);
981
982 line->emeta->mem = 0;
983 atomic_set(&line->emeta->sync, 0);
984}
985
a4bd217b
JG
986/* For now lines are always assumed full lines. Thus, smeta former and current
987 * lun bitmaps are omitted.
988 */
dd2a4343 989static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
a4bd217b
JG
990 struct pblk_line *cur)
991{
992 struct nvm_tgt_dev *dev = pblk->dev;
993 struct nvm_geo *geo = &dev->geo;
994 struct pblk_line_meta *lm = &pblk->lm;
995 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
dd2a4343
JG
996 struct pblk_emeta *emeta = line->emeta;
997 struct line_emeta *emeta_buf = emeta->buf;
998 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
a4bd217b
JG
999 int nr_blk_line;
1000
1001 /* After erasing the line, new bad blocks might appear and we risk
1002 * having an invalid line
1003 */
1004 nr_blk_line = lm->blk_per_line -
1005 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1006 if (nr_blk_line < lm->min_blk_line) {
1007 spin_lock(&l_mg->free_lock);
1008 spin_lock(&line->lock);
1009 line->state = PBLK_LINESTATE_BAD;
1010 spin_unlock(&line->lock);
1011
1012 list_add_tail(&line->list, &l_mg->bad_list);
1013 spin_unlock(&l_mg->free_lock);
1014
1015 pr_debug("pblk: line %d is bad\n", line->id);
1016
1017 return 0;
1018 }
1019
1020 /* Run-time metadata */
dd2a4343 1021 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
a4bd217b
JG
1022
1023 /* Mark LUNs allocated in this line (all for now) */
1024 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1025
dd2a4343
JG
1026 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1027 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1028 smeta_buf->header.id = cpu_to_le32(line->id);
1029 smeta_buf->header.type = cpu_to_le16(line->type);
d0ab0b1a
HH
1030 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1031 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
a4bd217b
JG
1032
1033 /* Start metadata */
dd2a4343 1034 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
fae7fae4 1035 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
a4bd217b
JG
1036
1037 /* Fill metadata among lines */
1038 if (cur) {
1039 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
dd2a4343
JG
1040 smeta_buf->prev_id = cpu_to_le32(cur->id);
1041 cur->emeta->buf->next_id = cpu_to_le32(line->id);
a4bd217b 1042 } else {
dd2a4343 1043 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
a4bd217b
JG
1044 }
1045
1046 /* All smeta must be set at this point */
dd2a4343
JG
1047 smeta_buf->header.crc = cpu_to_le32(
1048 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1049 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
a4bd217b
JG
1050
1051 /* End metadata */
dd2a4343
JG
1052 memcpy(&emeta_buf->header, &smeta_buf->header,
1053 sizeof(struct line_header));
d0ab0b1a
HH
1054
1055 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1056 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1057 emeta_buf->header.crc = cpu_to_le32(
1058 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1059
dd2a4343
JG
1060 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1061 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1062 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1063 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1064 emeta_buf->crc = cpu_to_le32(0);
1065 emeta_buf->prev_id = smeta_buf->prev_id;
a4bd217b
JG
1066
1067 return 1;
1068}
1069
9cfd5a95
JG
1070static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1071{
1072 struct pblk_line_meta *lm = &pblk->lm;
1073
1074 line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1075 if (!line->map_bitmap)
1076 return -ENOMEM;
1077
1078 /* will be initialized using bb info from map_bitmap */
1079 line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
1080 if (!line->invalid_bitmap) {
1081 kfree(line->map_bitmap);
1082 line->map_bitmap = NULL;
1083 return -ENOMEM;
1084 }
1085
1086 return 0;
1087}
1088
a4bd217b
JG
1089/* For now lines are always assumed full lines. Thus, smeta former and current
1090 * lun bitmaps are omitted.
1091 */
1092static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1093 int init)
1094{
1095 struct nvm_tgt_dev *dev = pblk->dev;
1096 struct nvm_geo *geo = &dev->geo;
1097 struct pblk_line_meta *lm = &pblk->lm;
1098 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b
JG
1099 u64 off;
1100 int bit = -1;
cfe1c9e2 1101 int emeta_secs;
a4bd217b
JG
1102
1103 line->sec_in_line = lm->sec_per_line;
1104
1105 /* Capture bad block information on line mapping bitmaps */
1106 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1107 bit + 1)) < lm->blk_per_line) {
e46f4e48 1108 off = bit * geo->ws_opt;
a4bd217b
JG
1109 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1110 lm->sec_per_line);
1111 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1112 lm->sec_per_line);
e46f4e48 1113 line->sec_in_line -= geo->clba;
a4bd217b
JG
1114 }
1115
1116 /* Mark smeta metadata sectors as bad sectors */
1117 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
e46f4e48 1118 off = bit * geo->ws_opt;
a4bd217b
JG
1119 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1120 line->sec_in_line -= lm->smeta_sec;
1121 line->smeta_ssec = off;
1122 line->cur_sec = off + lm->smeta_sec;
1123
e2cddf20 1124 if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
a4bd217b 1125 pr_debug("pblk: line smeta I/O failed. Retry\n");
6cf17a2f 1126 return 0;
a4bd217b
JG
1127 }
1128
1129 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1130
1131 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1132 * blocks to make sure that there are enough sectors to store emeta
1133 */
cfe1c9e2
HH
1134 emeta_secs = lm->emeta_sec[0];
1135 off = lm->sec_per_line;
1136 while (emeta_secs) {
e46f4e48 1137 off -= geo->ws_opt;
a4bd217b 1138 if (!test_bit(off, line->invalid_bitmap)) {
e46f4e48
JG
1139 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1140 emeta_secs -= geo->ws_opt;
a4bd217b
JG
1141 }
1142 }
1143
a4bd217b 1144 line->emeta_ssec = off;
cfe1c9e2 1145 line->sec_in_line -= lm->emeta_sec[0];
dd2a4343 1146 line->nr_valid_lbas = 0;
0880a9aa 1147 line->left_msecs = line->sec_in_line;
dd2a4343 1148 *line->vsc = cpu_to_le32(line->sec_in_line);
a4bd217b
JG
1149
1150 if (lm->sec_per_line - line->sec_in_line !=
1151 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1152 spin_lock(&line->lock);
1153 line->state = PBLK_LINESTATE_BAD;
1154 spin_unlock(&line->lock);
1155
1156 list_add_tail(&line->list, &l_mg->bad_list);
1157 pr_err("pblk: unexpected line %d is bad\n", line->id);
1158
1159 return 0;
1160 }
1161
1162 return 1;
1163}
1164
32ef9412
JG
1165static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1166{
1167 struct pblk_line_meta *lm = &pblk->lm;
1168 struct nvm_tgt_dev *dev = pblk->dev;
1169 struct nvm_geo *geo = &dev->geo;
1170 int blk_to_erase = atomic_read(&line->blk_in_line);
1171 int i;
1172
1173 for (i = 0; i < lm->blk_per_line; i++) {
1174 struct pblk_lun *rlun = &pblk->luns[i];
1175 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1176 int state = line->chks[pos].state;
1177
1178 /* Free chunks should not be erased */
1179 if (state & NVM_CHK_ST_FREE) {
1180 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1181 line->erase_bitmap);
1182 blk_to_erase--;
1183 }
1184 }
1185
1186 return blk_to_erase;
1187}
1188
a4bd217b
JG
1189static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1190{
1191 struct pblk_line_meta *lm = &pblk->lm;
1d8b33e0 1192 int blk_in_line = atomic_read(&line->blk_in_line);
9cfd5a95 1193 int blk_to_erase;
a4bd217b 1194
32ef9412
JG
1195 /* Bad blocks do not need to be erased */
1196 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1197
a4bd217b 1198 spin_lock(&line->lock);
32ef9412
JG
1199
1200 /* If we have not written to this line, we need to mark up free chunks
1201 * as already erased
1202 */
1203 if (line->state == PBLK_LINESTATE_NEW) {
1204 blk_to_erase = pblk_prepare_new_line(pblk, line);
1205 line->state = PBLK_LINESTATE_FREE;
1206 } else {
1d8b33e0
JG
1207 blk_to_erase = blk_in_line;
1208 }
1209
1210 if (blk_in_line < lm->min_blk_line) {
9cfd5a95
JG
1211 spin_unlock(&line->lock);
1212 return -EAGAIN;
32ef9412
JG
1213 }
1214
a4bd217b 1215 if (line->state != PBLK_LINESTATE_FREE) {
588726d3
JG
1216 WARN(1, "pblk: corrupted line %d, state %d\n",
1217 line->id, line->state);
9cfd5a95
JG
1218 spin_unlock(&line->lock);
1219 return -EINTR;
a4bd217b 1220 }
588726d3 1221
a4bd217b 1222 line->state = PBLK_LINESTATE_OPEN;
a44f53fa 1223
32ef9412
JG
1224 atomic_set(&line->left_eblks, blk_to_erase);
1225 atomic_set(&line->left_seblks, blk_to_erase);
dd2a4343
JG
1226
1227 line->meta_distance = lm->meta_distance;
a4bd217b
JG
1228 spin_unlock(&line->lock);
1229
a4bd217b
JG
1230 kref_init(&line->ref);
1231
1232 return 0;
1233}
1234
1235int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1236{
1237 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1238 int ret;
1239
1240 spin_lock(&l_mg->free_lock);
1241 l_mg->data_line = line;
1242 list_del(&line->list);
a4bd217b
JG
1243
1244 ret = pblk_line_prepare(pblk, line);
1245 if (ret) {
1246 list_add(&line->list, &l_mg->free_list);
3dc001f3 1247 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1248 return ret;
1249 }
3dc001f3 1250 spin_unlock(&l_mg->free_lock);
a4bd217b 1251
9cfd5a95
JG
1252 ret = pblk_line_alloc_bitmaps(pblk, line);
1253 if (ret)
1254 return ret;
a4bd217b
JG
1255
1256 if (!pblk_line_init_bb(pblk, line, 0)) {
1257 list_add(&line->list, &l_mg->free_list);
1258 return -EINTR;
1259 }
1260
9cfd5a95 1261 pblk_rl_free_lines_dec(&pblk->rl, line, true);
a4bd217b
JG
1262 return 0;
1263}
1264
1265void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1266{
e72ec1d3 1267 kfree(line->map_bitmap);
a4bd217b
JG
1268 line->map_bitmap = NULL;
1269 line->smeta = NULL;
1270 line->emeta = NULL;
1271}
1272
9cfd5a95
JG
1273static void pblk_line_reinit(struct pblk_line *line)
1274{
1275 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1276
1277 line->map_bitmap = NULL;
1278 line->invalid_bitmap = NULL;
1279 line->smeta = NULL;
1280 line->emeta = NULL;
1281}
1282
1283void pblk_line_free(struct pblk_line *line)
1284{
1285 kfree(line->map_bitmap);
1286 kfree(line->invalid_bitmap);
1287
1288 pblk_line_reinit(line);
1289}
1290
a4bd217b
JG
1291struct pblk_line *pblk_line_get(struct pblk *pblk)
1292{
1293 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1294 struct pblk_line_meta *lm = &pblk->lm;
588726d3
JG
1295 struct pblk_line *line;
1296 int ret, bit;
a4bd217b
JG
1297
1298 lockdep_assert_held(&l_mg->free_lock);
1299
588726d3 1300retry:
a4bd217b
JG
1301 if (list_empty(&l_mg->free_list)) {
1302 pr_err("pblk: no free lines\n");
588726d3 1303 return NULL;
a4bd217b
JG
1304 }
1305
1306 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1307 list_del(&line->list);
1308 l_mg->nr_free_lines--;
1309
1310 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1311 if (unlikely(bit >= lm->blk_per_line)) {
1312 spin_lock(&line->lock);
1313 line->state = PBLK_LINESTATE_BAD;
1314 spin_unlock(&line->lock);
1315
1316 list_add_tail(&line->list, &l_mg->bad_list);
1317
1318 pr_debug("pblk: line %d is bad\n", line->id);
588726d3 1319 goto retry;
a4bd217b
JG
1320 }
1321
588726d3
JG
1322 ret = pblk_line_prepare(pblk, line);
1323 if (ret) {
1d8b33e0
JG
1324 switch (ret) {
1325 case -EAGAIN:
1326 list_add(&line->list, &l_mg->bad_list);
1327 goto retry;
1328 case -EINTR:
588726d3
JG
1329 list_add(&line->list, &l_mg->corrupt_list);
1330 goto retry;
1d8b33e0 1331 default:
588726d3
JG
1332 pr_err("pblk: failed to prepare line %d\n", line->id);
1333 list_add(&line->list, &l_mg->free_list);
1334 l_mg->nr_free_lines++;
1335 return NULL;
1336 }
a4bd217b
JG
1337 }
1338
a4bd217b
JG
1339 return line;
1340}
1341
1342static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1343 struct pblk_line *line)
1344{
1345 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1346 struct pblk_line *retry_line;
1347
588726d3 1348retry:
a4bd217b
JG
1349 spin_lock(&l_mg->free_lock);
1350 retry_line = pblk_line_get(pblk);
1351 if (!retry_line) {
be388d9f 1352 l_mg->data_line = NULL;
a4bd217b
JG
1353 spin_unlock(&l_mg->free_lock);
1354 return NULL;
1355 }
1356
9cfd5a95
JG
1357 retry_line->map_bitmap = line->map_bitmap;
1358 retry_line->invalid_bitmap = line->invalid_bitmap;
a4bd217b
JG
1359 retry_line->smeta = line->smeta;
1360 retry_line->emeta = line->emeta;
1361 retry_line->meta_line = line->meta_line;
a4bd217b 1362
9cfd5a95
JG
1363 pblk_line_reinit(line);
1364
3dc001f3 1365 l_mg->data_line = retry_line;
a4bd217b
JG
1366 spin_unlock(&l_mg->free_lock);
1367
a7689938 1368 pblk_rl_free_lines_dec(&pblk->rl, line, false);
a4bd217b 1369
588726d3
JG
1370 if (pblk_line_erase(pblk, retry_line))
1371 goto retry;
1372
a4bd217b
JG
1373 return retry_line;
1374}
1375
588726d3
JG
1376static void pblk_set_space_limit(struct pblk *pblk)
1377{
1378 struct pblk_rl *rl = &pblk->rl;
1379
1380 atomic_set(&rl->rb_space, 0);
1381}
1382
a4bd217b
JG
1383struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1384{
1385 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1386 struct pblk_line *line;
a4bd217b
JG
1387
1388 spin_lock(&l_mg->free_lock);
1389 line = pblk_line_get(pblk);
1390 if (!line) {
1391 spin_unlock(&l_mg->free_lock);
1392 return NULL;
1393 }
1394
1395 line->seq_nr = l_mg->d_seq_nr++;
1396 line->type = PBLK_LINETYPE_DATA;
1397 l_mg->data_line = line;
1398
dd2a4343 1399 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
a4bd217b
JG
1400
1401 /* Allocate next line for preparation */
1402 l_mg->data_next = pblk_line_get(pblk);
588726d3
JG
1403 if (!l_mg->data_next) {
1404 /* If we cannot get a new line, we need to stop the pipeline.
1405 * Only allow as many writes in as we can store safely and then
1406 * fail gracefully
1407 */
1408 pblk_set_space_limit(pblk);
1409
1410 l_mg->data_next = NULL;
1411 } else {
a4bd217b
JG
1412 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1413 l_mg->data_next->type = PBLK_LINETYPE_DATA;
a4bd217b
JG
1414 }
1415 spin_unlock(&l_mg->free_lock);
1416
9cfd5a95
JG
1417 if (pblk_line_alloc_bitmaps(pblk, line))
1418 return NULL;
1419
588726d3
JG
1420 if (pblk_line_erase(pblk, line)) {
1421 line = pblk_line_retry(pblk, line);
1422 if (!line)
1423 return NULL;
1424 }
1425
a4bd217b 1426retry_setup:
dd2a4343 1427 if (!pblk_line_init_metadata(pblk, line, NULL)) {
a4bd217b
JG
1428 line = pblk_line_retry(pblk, line);
1429 if (!line)
1430 return NULL;
1431
1432 goto retry_setup;
1433 }
1434
1435 if (!pblk_line_init_bb(pblk, line, 1)) {
1436 line = pblk_line_retry(pblk, line);
1437 if (!line)
1438 return NULL;
1439
1440 goto retry_setup;
1441 }
1442
a7689938
JG
1443 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1444
a4bd217b
JG
1445 return line;
1446}
1447
588726d3
JG
1448static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1449{
1450 lockdep_assert_held(&pblk->l_mg.free_lock);
1451
1452 pblk_set_space_limit(pblk);
1453 pblk->state = PBLK_STATE_STOPPING;
1454}
1455
8bd40020
JG
1456static void pblk_line_close_meta_sync(struct pblk *pblk)
1457{
1458 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1459 struct pblk_line_meta *lm = &pblk->lm;
1460 struct pblk_line *line, *tline;
1461 LIST_HEAD(list);
1462
1463 spin_lock(&l_mg->close_lock);
1464 if (list_empty(&l_mg->emeta_list)) {
1465 spin_unlock(&l_mg->close_lock);
1466 return;
1467 }
1468
1469 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1470 spin_unlock(&l_mg->close_lock);
1471
1472 list_for_each_entry_safe(line, tline, &list, list) {
1473 struct pblk_emeta *emeta = line->emeta;
1474
1475 while (emeta->mem < lm->emeta_len[0]) {
1476 int ret;
1477
1478 ret = pblk_submit_meta_io(pblk, line);
1479 if (ret) {
1480 pr_err("pblk: sync meta line %d failed (%d)\n",
1481 line->id, ret);
1482 return;
1483 }
1484 }
1485 }
1486
1487 pblk_wait_for_meta(pblk);
1488 flush_workqueue(pblk->close_wq);
1489}
1490
a7c9e910 1491void __pblk_pipeline_flush(struct pblk *pblk)
588726d3
JG
1492{
1493 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1494 int ret;
1495
1496 spin_lock(&l_mg->free_lock);
1497 if (pblk->state == PBLK_STATE_RECOVERING ||
1498 pblk->state == PBLK_STATE_STOPPED) {
1499 spin_unlock(&l_mg->free_lock);
1500 return;
1501 }
1502 pblk->state = PBLK_STATE_RECOVERING;
1503 spin_unlock(&l_mg->free_lock);
1504
1505 pblk_flush_writer(pblk);
1506 pblk_wait_for_meta(pblk);
1507
1508 ret = pblk_recov_pad(pblk);
1509 if (ret) {
1510 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1511 return;
1512 }
1513
ee8d5c1a 1514 flush_workqueue(pblk->bb_wq);
588726d3 1515 pblk_line_close_meta_sync(pblk);
a7c9e910
JG
1516}
1517
1518void __pblk_pipeline_stop(struct pblk *pblk)
1519{
1520 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
588726d3
JG
1521
1522 spin_lock(&l_mg->free_lock);
1523 pblk->state = PBLK_STATE_STOPPED;
1524 l_mg->data_line = NULL;
1525 l_mg->data_next = NULL;
1526 spin_unlock(&l_mg->free_lock);
1527}
1528
a7c9e910
JG
1529void pblk_pipeline_stop(struct pblk *pblk)
1530{
1531 __pblk_pipeline_flush(pblk);
1532 __pblk_pipeline_stop(pblk);
1533}
1534
21d22871 1535struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
a4bd217b 1536{
a4bd217b 1537 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
21d22871 1538 struct pblk_line *cur, *new = NULL;
a4bd217b 1539 unsigned int left_seblks;
a4bd217b
JG
1540
1541 cur = l_mg->data_line;
1542 new = l_mg->data_next;
1543 if (!new)
21d22871 1544 goto out;
a4bd217b
JG
1545 l_mg->data_line = new;
1546
588726d3 1547 spin_lock(&l_mg->free_lock);
588726d3
JG
1548 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1549 spin_unlock(&l_mg->free_lock);
1550
1551retry_erase:
a4bd217b
JG
1552 left_seblks = atomic_read(&new->left_seblks);
1553 if (left_seblks) {
1554 /* If line is not fully erased, erase it */
a44f53fa 1555 if (atomic_read(&new->left_eblks)) {
a4bd217b 1556 if (pblk_line_erase(pblk, new))
21d22871 1557 goto out;
a4bd217b
JG
1558 } else {
1559 io_schedule();
1560 }
588726d3 1561 goto retry_erase;
a4bd217b
JG
1562 }
1563
9cfd5a95
JG
1564 if (pblk_line_alloc_bitmaps(pblk, new))
1565 return NULL;
1566
a4bd217b 1567retry_setup:
dd2a4343 1568 if (!pblk_line_init_metadata(pblk, new, cur)) {
a4bd217b 1569 new = pblk_line_retry(pblk, new);
f3236cef 1570 if (!new)
21d22871 1571 goto out;
a4bd217b
JG
1572
1573 goto retry_setup;
1574 }
1575
1576 if (!pblk_line_init_bb(pblk, new, 1)) {
1577 new = pblk_line_retry(pblk, new);
1578 if (!new)
21d22871 1579 goto out;
a4bd217b
JG
1580
1581 goto retry_setup;
1582 }
1583
a7689938
JG
1584 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1585
588726d3
JG
1586 /* Allocate next line for preparation */
1587 spin_lock(&l_mg->free_lock);
1588 l_mg->data_next = pblk_line_get(pblk);
1589 if (!l_mg->data_next) {
1590 /* If we cannot get a new line, we need to stop the pipeline.
1591 * Only allow as many writes in as we can store safely and then
1592 * fail gracefully
1593 */
1594 pblk_stop_writes(pblk, new);
1595 l_mg->data_next = NULL;
1596 } else {
1597 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1598 l_mg->data_next->type = PBLK_LINETYPE_DATA;
588726d3
JG
1599 }
1600 spin_unlock(&l_mg->free_lock);
1601
21d22871
JG
1602out:
1603 return new;
a4bd217b
JG
1604}
1605
7bd4d370 1606static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
a4bd217b 1607{
a4bd217b 1608 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
d6b992f7 1609 struct pblk_gc *gc = &pblk->gc;
a4bd217b
JG
1610
1611 spin_lock(&line->lock);
1612 WARN_ON(line->state != PBLK_LINESTATE_GC);
1613 line->state = PBLK_LINESTATE_FREE;
1614 line->gc_group = PBLK_LINEGC_NONE;
8e55c07b 1615 pblk_line_free(line);
a4bd217b 1616
48b8d208
HH
1617 if (line->w_err_gc->has_write_err) {
1618 pblk_rl_werr_line_out(&pblk->rl);
1619 line->w_err_gc->has_write_err = 0;
1620 }
1621
1622 spin_unlock(&line->lock);
d6b992f7
HH
1623 atomic_dec(&gc->pipeline_gc);
1624
a4bd217b
JG
1625 spin_lock(&l_mg->free_lock);
1626 list_add_tail(&line->list, &l_mg->free_list);
1627 l_mg->nr_free_lines++;
1628 spin_unlock(&l_mg->free_lock);
1629
1630 pblk_rl_free_lines_inc(&pblk->rl, line);
1631}
1632
7bd4d370
JG
1633static void pblk_line_put_ws(struct work_struct *work)
1634{
1635 struct pblk_line_ws *line_put_ws = container_of(work,
1636 struct pblk_line_ws, ws);
1637 struct pblk *pblk = line_put_ws->pblk;
1638 struct pblk_line *line = line_put_ws->line;
1639
1640 __pblk_line_put(pblk, line);
b906bbb6 1641 mempool_free(line_put_ws, &pblk->gen_ws_pool);
7bd4d370
JG
1642}
1643
1644void pblk_line_put(struct kref *ref)
1645{
1646 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1647 struct pblk *pblk = line->pblk;
1648
1649 __pblk_line_put(pblk, line);
1650}
1651
1652void pblk_line_put_wq(struct kref *ref)
1653{
1654 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1655 struct pblk *pblk = line->pblk;
1656 struct pblk_line_ws *line_put_ws;
1657
b906bbb6 1658 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
7bd4d370
JG
1659 if (!line_put_ws)
1660 return;
1661
1662 line_put_ws->pblk = pblk;
1663 line_put_ws->line = line;
1664 line_put_ws->priv = NULL;
1665
1666 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1667 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1668}
1669
a4bd217b
JG
1670int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1671{
1672 struct nvm_rq *rqd;
1673 int err;
1674
67bf26a3 1675 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
a4bd217b
JG
1676
1677 pblk_setup_e_rq(pblk, rqd, ppa);
1678
1679 rqd->end_io = pblk_end_io_erase;
1680 rqd->private = pblk;
1681
1682 /* The write thread schedules erases so that it minimizes disturbances
1683 * with writes. Thus, there is no need to take the LUN semaphore.
1684 */
1685 err = pblk_submit_io(pblk, rqd);
1686 if (err) {
1687 struct nvm_tgt_dev *dev = pblk->dev;
1688 struct nvm_geo *geo = &dev->geo;
1689
1690 pr_err("pblk: could not async erase line:%d,blk:%d\n",
b1bcfda1
JG
1691 pblk_ppa_to_line(ppa),
1692 pblk_ppa_to_pos(geo, ppa));
a4bd217b
JG
1693 }
1694
1695 return err;
1696}
1697
1698struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1699{
1700 return pblk->l_mg.data_line;
1701}
1702
d624f371
JG
1703/* For now, always erase next line */
1704struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
a4bd217b
JG
1705{
1706 return pblk->l_mg.data_next;
1707}
1708
1709int pblk_line_is_full(struct pblk_line *line)
1710{
1711 return (line->left_msecs == 0);
1712}
1713
588726d3
JG
1714static void pblk_line_should_sync_meta(struct pblk *pblk)
1715{
1716 if (pblk_rl_is_limit(&pblk->rl))
1717 pblk_line_close_meta_sync(pblk);
1718}
1719
a4bd217b
JG
1720void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1721{
32ef9412
JG
1722 struct nvm_tgt_dev *dev = pblk->dev;
1723 struct nvm_geo *geo = &dev->geo;
1724 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b
JG
1725 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1726 struct list_head *move_list;
32ef9412 1727 int i;
a4bd217b 1728
a84ebb83 1729#ifdef CONFIG_NVM_DEBUG
dd2a4343 1730 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
a4bd217b 1731 "pblk: corrupt closed line %d\n", line->id);
a84ebb83 1732#endif
a4bd217b
JG
1733
1734 spin_lock(&l_mg->free_lock);
1735 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1736 spin_unlock(&l_mg->free_lock);
1737
1738 spin_lock(&l_mg->gc_lock);
1739 spin_lock(&line->lock);
1740 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1741 line->state = PBLK_LINESTATE_CLOSED;
1742 move_list = pblk_line_gc_list(pblk, line);
1743
1744 list_add_tail(&line->list, move_list);
1745
e72ec1d3 1746 kfree(line->map_bitmap);
a4bd217b
JG
1747 line->map_bitmap = NULL;
1748 line->smeta = NULL;
1749 line->emeta = NULL;
1750
32ef9412
JG
1751 for (i = 0; i < lm->blk_per_line; i++) {
1752 struct pblk_lun *rlun = &pblk->luns[i];
1753 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1754 int state = line->chks[pos].state;
1755
1756 if (!(state & NVM_CHK_ST_OFFLINE))
1757 state = NVM_CHK_ST_CLOSED;
1758 }
1759
a4bd217b
JG
1760 spin_unlock(&line->lock);
1761 spin_unlock(&l_mg->gc_lock);
1762}
1763
dd2a4343
JG
1764void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1765{
1766 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1767 struct pblk_line_meta *lm = &pblk->lm;
1768 struct pblk_emeta *emeta = line->emeta;
1769 struct line_emeta *emeta_buf = emeta->buf;
76758390 1770 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
dd2a4343 1771
588726d3 1772 /* No need for exact vsc value; avoid a big line lock and take aprox. */
dd2a4343
JG
1773 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1774 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1775
76758390
HH
1776 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1777 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1778 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1779
dd2a4343
JG
1780 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1781 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1782
1783 spin_lock(&l_mg->close_lock);
1784 spin_lock(&line->lock);
48b8d208
HH
1785
1786 /* Update the in-memory start address for emeta, in case it has
1787 * shifted due to write errors
1788 */
1789 if (line->emeta_ssec != line->cur_sec)
1790 line->emeta_ssec = line->cur_sec;
1791
dd2a4343
JG
1792 list_add_tail(&line->list, &l_mg->emeta_list);
1793 spin_unlock(&line->lock);
1794 spin_unlock(&l_mg->close_lock);
588726d3
JG
1795
1796 pblk_line_should_sync_meta(pblk);
48b8d208
HH
1797
1798
1799}
1800
1801static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1802{
1803 struct pblk_line_meta *lm = &pblk->lm;
1804 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1805 unsigned int lba_list_size = lm->emeta_len[2];
1806 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1807 struct pblk_emeta *emeta = line->emeta;
1808
1809 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1810 l_mg->emeta_alloc_type, GFP_KERNEL);
1811 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1812 lba_list_size);
dd2a4343
JG
1813}
1814
a4bd217b
JG
1815void pblk_line_close_ws(struct work_struct *work)
1816{
1817 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1818 ws);
1819 struct pblk *pblk = line_ws->pblk;
1820 struct pblk_line *line = line_ws->line;
48b8d208
HH
1821 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1822
1823 /* Write errors makes the emeta start address stored in smeta invalid,
1824 * so keep a copy of the lba list until we've gc'd the line
1825 */
1826 if (w_err_gc->has_write_err)
1827 pblk_save_lba_list(pblk, line);
a4bd217b
JG
1828
1829 pblk_line_close(pblk, line);
b906bbb6 1830 mempool_free(line_ws, &pblk->gen_ws_pool);
a4bd217b
JG
1831}
1832
b84ae4a8
JG
1833void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1834 void (*work)(struct work_struct *), gfp_t gfp_mask,
ef576494 1835 struct workqueue_struct *wq)
a4bd217b
JG
1836{
1837 struct pblk_line_ws *line_ws;
1838
b906bbb6 1839 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
a4bd217b
JG
1840
1841 line_ws->pblk = pblk;
1842 line_ws->line = line;
1843 line_ws->priv = priv;
1844
1845 INIT_WORK(&line_ws->ws, work);
ef576494 1846 queue_work(wq, &line_ws->ws);
a4bd217b
JG
1847}
1848
3eaa11e2
JG
1849static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1850 int nr_ppas, int pos)
a4bd217b 1851{
3eaa11e2 1852 struct pblk_lun *rlun = &pblk->luns[pos];
a4bd217b
JG
1853 int ret;
1854
1855 /*
1856 * Only send one inflight I/O per LUN. Since we map at a page
1857 * granurality, all ppas in the I/O will map to the same LUN
1858 */
1859#ifdef CONFIG_NVM_DEBUG
1860 int i;
1861
1862 for (i = 1; i < nr_ppas; i++)
69471513
JG
1863 WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1864 ppa_list[0].a.ch != ppa_list[i].a.ch);
a4bd217b 1865#endif
a4bd217b 1866
3eaa11e2 1867 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
c5493845
RP
1868 if (ret == -ETIME || ret == -EINTR)
1869 pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
a4bd217b
JG
1870}
1871
3eaa11e2
JG
1872void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1873{
1874 struct nvm_tgt_dev *dev = pblk->dev;
1875 struct nvm_geo *geo = &dev->geo;
1876 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1877
1878 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1879}
1880
1881void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1882 unsigned long *lun_bitmap)
1883{
1884 struct nvm_tgt_dev *dev = pblk->dev;
1885 struct nvm_geo *geo = &dev->geo;
1886 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1887
1888 /* If the LUN has been locked for this same request, do no attempt to
1889 * lock it again
1890 */
1891 if (test_and_set_bit(pos, lun_bitmap))
1892 return;
1893
1894 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1895}
1896
1897void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1898{
1899 struct nvm_tgt_dev *dev = pblk->dev;
1900 struct nvm_geo *geo = &dev->geo;
1901 struct pblk_lun *rlun;
1902 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1903
1904#ifdef CONFIG_NVM_DEBUG
1905 int i;
1906
1907 for (i = 1; i < nr_ppas; i++)
69471513
JG
1908 WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1909 ppa_list[0].a.ch != ppa_list[i].a.ch);
3eaa11e2
JG
1910#endif
1911
1912 rlun = &pblk->luns[pos];
1913 up(&rlun->wr_sem);
1914}
1915
a4bd217b
JG
1916void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1917 unsigned long *lun_bitmap)
1918{
1919 struct nvm_tgt_dev *dev = pblk->dev;
1920 struct nvm_geo *geo = &dev->geo;
1921 struct pblk_lun *rlun;
a40afad9 1922 int num_lun = geo->all_luns;
a4bd217b
JG
1923 int bit = -1;
1924
a40afad9 1925 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
a4bd217b
JG
1926 rlun = &pblk->luns[bit];
1927 up(&rlun->wr_sem);
1928 }
a4bd217b
JG
1929}
1930
1931void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1932{
9f6cb13b 1933 struct ppa_addr ppa_l2p;
a4bd217b
JG
1934
1935 /* logic error: lba out-of-bounds. Ignore update */
1936 if (!(lba < pblk->rl.nr_secs)) {
1937 WARN(1, "pblk: corrupted L2P map request\n");
1938 return;
1939 }
1940
1941 spin_lock(&pblk->trans_lock);
9f6cb13b 1942 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b 1943
9f6cb13b
JG
1944 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1945 pblk_map_invalidate(pblk, ppa_l2p);
a4bd217b
JG
1946
1947 pblk_trans_map_set(pblk, lba, ppa);
1948 spin_unlock(&pblk->trans_lock);
1949}
1950
1951void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1952{
d340121e 1953
a4bd217b
JG
1954#ifdef CONFIG_NVM_DEBUG
1955 /* Callers must ensure that the ppa points to a cache address */
1956 BUG_ON(!pblk_addr_in_cache(ppa));
1957 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1958#endif
1959
1960 pblk_update_map(pblk, lba, ppa);
1961}
1962
9f6cb13b 1963int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
d340121e 1964 struct pblk_line *gc_line, u64 paddr_gc)
a4bd217b 1965{
d340121e 1966 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
1967 int ret = 1;
1968
1969#ifdef CONFIG_NVM_DEBUG
1970 /* Callers must ensure that the ppa points to a cache address */
9f6cb13b
JG
1971 BUG_ON(!pblk_addr_in_cache(ppa_new));
1972 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
a4bd217b
JG
1973#endif
1974
1975 /* logic error: lba out-of-bounds. Ignore update */
1976 if (!(lba < pblk->rl.nr_secs)) {
1977 WARN(1, "pblk: corrupted L2P map request\n");
1978 return 0;
1979 }
1980
1981 spin_lock(&pblk->trans_lock);
9f6cb13b 1982 ppa_l2p = pblk_trans_map_get(pblk, lba);
d340121e 1983 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
a4bd217b 1984
d340121e
JG
1985 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1986 spin_lock(&gc_line->lock);
1987 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1988 "pblk: corrupted GC update");
1989 spin_unlock(&gc_line->lock);
9f6cb13b 1990
a4bd217b
JG
1991 ret = 0;
1992 goto out;
1993 }
1994
9f6cb13b 1995 pblk_trans_map_set(pblk, lba, ppa_new);
a4bd217b
JG
1996out:
1997 spin_unlock(&pblk->trans_lock);
1998 return ret;
1999}
2000
9f6cb13b
JG
2001void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2002 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
a4bd217b 2003{
9f6cb13b 2004 struct ppa_addr ppa_l2p;
a4bd217b
JG
2005
2006#ifdef CONFIG_NVM_DEBUG
2007 /* Callers must ensure that the ppa points to a device address */
9f6cb13b 2008 BUG_ON(pblk_addr_in_cache(ppa_mapped));
a4bd217b
JG
2009#endif
2010 /* Invalidate and discard padded entries */
2011 if (lba == ADDR_EMPTY) {
76758390 2012 atomic64_inc(&pblk->pad_wa);
a4bd217b
JG
2013#ifdef CONFIG_NVM_DEBUG
2014 atomic_long_inc(&pblk->padded_wb);
2015#endif
9f6cb13b
JG
2016 if (!pblk_ppa_empty(ppa_mapped))
2017 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
2018 return;
2019 }
2020
2021 /* logic error: lba out-of-bounds. Ignore update */
2022 if (!(lba < pblk->rl.nr_secs)) {
2023 WARN(1, "pblk: corrupted L2P map request\n");
2024 return;
2025 }
2026
2027 spin_lock(&pblk->trans_lock);
9f6cb13b 2028 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
2029
2030 /* Do not update L2P if the cacheline has been updated. In this case,
2031 * the mapped ppa must be invalidated
2032 */
9f6cb13b
JG
2033 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2034 if (!pblk_ppa_empty(ppa_mapped))
2035 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
2036 goto out;
2037 }
2038
2039#ifdef CONFIG_NVM_DEBUG
9f6cb13b 2040 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
a4bd217b
JG
2041#endif
2042
9f6cb13b 2043 pblk_trans_map_set(pblk, lba, ppa_mapped);
a4bd217b
JG
2044out:
2045 spin_unlock(&pblk->trans_lock);
2046}
2047
2048void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2049 sector_t blba, int nr_secs)
2050{
2051 int i;
2052
2053 spin_lock(&pblk->trans_lock);
7bd4d370
JG
2054 for (i = 0; i < nr_secs; i++) {
2055 struct ppa_addr ppa;
2056
2057 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2058
2059 /* If the L2P entry maps to a line, the reference is valid */
2060 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
b1bcfda1 2061 int line_id = pblk_ppa_to_line(ppa);
7bd4d370
JG
2062 struct pblk_line *line = &pblk->lines[line_id];
2063
2064 kref_get(&line->ref);
2065 }
2066 }
a4bd217b
JG
2067 spin_unlock(&pblk->trans_lock);
2068}
2069
2070void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2071 u64 *lba_list, int nr_secs)
2072{
d340121e 2073 u64 lba;
a4bd217b
JG
2074 int i;
2075
2076 spin_lock(&pblk->trans_lock);
2077 for (i = 0; i < nr_secs; i++) {
2078 lba = lba_list[i];
d340121e 2079 if (lba != ADDR_EMPTY) {
a4bd217b
JG
2080 /* logic error: lba out-of-bounds. Ignore update */
2081 if (!(lba < pblk->rl.nr_secs)) {
2082 WARN(1, "pblk: corrupted L2P map request\n");
2083 continue;
2084 }
2085 ppas[i] = pblk_trans_map_get(pblk, lba);
2086 }
2087 }
2088 spin_unlock(&pblk->trans_lock);
2089}