]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk-core.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-core.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-core.c - pblk's core functionality
16 *
17 */
18
19#include "pblk.h"
a4bd217b 20
8bd40020
JG
21static void pblk_line_mark_bb(struct work_struct *work)
22{
23 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24 ws);
25 struct pblk *pblk = line_ws->pblk;
26 struct nvm_tgt_dev *dev = pblk->dev;
27 struct ppa_addr *ppa = line_ws->priv;
28 int ret;
29
30 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31 if (ret) {
32 struct pblk_line *line;
33 int pos;
34
35 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
36 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
37
38 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
39 line->id, pos);
40 }
41
42 kfree(ppa);
43 mempool_free(line_ws, pblk->gen_ws_pool);
44}
45
a4bd217b
JG
46static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47 struct ppa_addr *ppa)
48{
49 struct nvm_tgt_dev *dev = pblk->dev;
50 struct nvm_geo *geo = &dev->geo;
51 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
52
53 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
54 atomic_long_inc(&pblk->erase_failed);
55
a44f53fa 56 atomic_dec(&line->blk_in_line);
a4bd217b
JG
57 if (test_and_set_bit(pos, line->blk_bitmap))
58 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
59 line->id, pos);
60
b84ae4a8
JG
61 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
62 GFP_ATOMIC, pblk->bb_wq);
a4bd217b
JG
63}
64
65static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
66{
67 struct pblk_line *line;
68
69 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
70 atomic_dec(&line->left_seblks);
71
72 if (rqd->error) {
73 struct ppa_addr *ppa;
74
75 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
76 if (!ppa)
77 return;
78
79 *ppa = rqd->ppa_addr;
80 pblk_mark_bb(pblk, line, ppa);
81 }
588726d3
JG
82
83 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
84}
85
86/* Erase completion assumes that only one block is erased at the time */
87static void pblk_end_io_erase(struct nvm_rq *rqd)
88{
89 struct pblk *pblk = rqd->private;
90
a4bd217b 91 __pblk_end_io_erase(pblk, rqd);
0d880398 92 mempool_free(rqd, pblk->e_rq_pool);
a4bd217b
JG
93}
94
0880a9aa
JG
95void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
96 u64 paddr)
a4bd217b
JG
97{
98 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
99 struct list_head *move_list = NULL;
100
101 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
102 * table is modified with reclaimed sectors, a check is done to endure
103 * that newer updates are not overwritten.
104 */
105 spin_lock(&line->lock);
d340121e 106 WARN_ON(line->state == PBLK_LINESTATE_FREE);
a4bd217b
JG
107
108 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
109 WARN_ONCE(1, "pblk: double invalidate\n");
110 spin_unlock(&line->lock);
111 return;
112 }
dd2a4343 113 le32_add_cpu(line->vsc, -1);
a4bd217b
JG
114
115 if (line->state == PBLK_LINESTATE_CLOSED)
116 move_list = pblk_line_gc_list(pblk, line);
117 spin_unlock(&line->lock);
118
119 if (move_list) {
120 spin_lock(&l_mg->gc_lock);
121 spin_lock(&line->lock);
122 /* Prevent moving a line that has just been chosen for GC */
d340121e 123 if (line->state == PBLK_LINESTATE_GC) {
a4bd217b
JG
124 spin_unlock(&line->lock);
125 spin_unlock(&l_mg->gc_lock);
126 return;
127 }
128 spin_unlock(&line->lock);
129
130 list_move_tail(&line->list, move_list);
131 spin_unlock(&l_mg->gc_lock);
132 }
133}
134
135void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
136{
137 struct pblk_line *line;
138 u64 paddr;
139 int line_id;
140
141#ifdef CONFIG_NVM_DEBUG
142 /* Callers must ensure that the ppa points to a device address */
143 BUG_ON(pblk_addr_in_cache(ppa));
144 BUG_ON(pblk_ppa_empty(ppa));
145#endif
146
147 line_id = pblk_tgt_ppa_to_line(ppa);
148 line = &pblk->lines[line_id];
149 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
150
151 __pblk_map_invalidate(pblk, line, paddr);
152}
153
a4bd217b
JG
154static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
155 unsigned int nr_secs)
156{
157 sector_t lba;
158
159 spin_lock(&pblk->trans_lock);
160 for (lba = slba; lba < slba + nr_secs; lba++) {
161 struct ppa_addr ppa;
162
163 ppa = pblk_trans_map_get(pblk, lba);
164
165 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
166 pblk_map_invalidate(pblk, ppa);
167
168 pblk_ppa_set_empty(&ppa);
169 pblk_trans_map_set(pblk, lba, ppa);
170 }
171 spin_unlock(&pblk->trans_lock);
172}
173
67bf26a3
JG
174/* Caller must guarantee that the request is a valid type */
175struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
a4bd217b
JG
176{
177 mempool_t *pool;
178 struct nvm_rq *rqd;
179 int rq_size;
180
67bf26a3
JG
181 switch (type) {
182 case PBLK_WRITE:
183 case PBLK_WRITE_INT:
a4bd217b
JG
184 pool = pblk->w_rq_pool;
185 rq_size = pblk_w_rq_size;
67bf26a3
JG
186 break;
187 case PBLK_READ:
0d880398 188 pool = pblk->r_rq_pool;
084ec9ba 189 rq_size = pblk_g_rq_size;
67bf26a3
JG
190 break;
191 default:
192 pool = pblk->e_rq_pool;
193 rq_size = pblk_g_rq_size;
a4bd217b
JG
194 }
195
196 rqd = mempool_alloc(pool, GFP_KERNEL);
197 memset(rqd, 0, rq_size);
198
199 return rqd;
200}
201
67bf26a3
JG
202/* Typically used on completion path. Cannot guarantee request consistency */
203void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
a4bd217b 204{
67bf26a3 205 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
206 mempool_t *pool;
207
67bf26a3
JG
208 switch (type) {
209 case PBLK_WRITE:
210 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
211 case PBLK_WRITE_INT:
a4bd217b 212 pool = pblk->w_rq_pool;
67bf26a3
JG
213 break;
214 case PBLK_READ:
0d880398 215 pool = pblk->r_rq_pool;
67bf26a3
JG
216 break;
217 case PBLK_ERASE:
218 pool = pblk->e_rq_pool;
219 break;
220 default:
221 pr_err("pblk: trying to free unknown rqd type\n");
222 return;
223 }
a4bd217b 224
84a9a8fb
IK
225 if (rqd->meta_list)
226 nvm_dev_dma_free(dev->parent, rqd->meta_list,
227 rqd->dma_meta_list);
a4bd217b
JG
228 mempool_free(rqd, pool);
229}
230
231void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
232 int nr_pages)
233{
234 struct bio_vec bv;
235 int i;
236
237 WARN_ON(off + nr_pages != bio->bi_vcnt);
238
a4bd217b
JG
239 for (i = off; i < nr_pages + off; i++) {
240 bv = bio->bi_io_vec[i];
bd432417 241 mempool_free(bv.bv_page, pblk->page_bio_pool);
a4bd217b
JG
242 }
243}
244
245int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
246 int nr_pages)
247{
248 struct request_queue *q = pblk->dev->q;
249 struct page *page;
250 int i, ret;
251
252 for (i = 0; i < nr_pages; i++) {
bd432417 253 page = mempool_alloc(pblk->page_bio_pool, flags);
a4bd217b
JG
254
255 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
256 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
257 pr_err("pblk: could not add page to bio\n");
bd432417 258 mempool_free(page, pblk->page_bio_pool);
a4bd217b
JG
259 goto err;
260 }
261 }
262
263 return 0;
264err:
84a9a8fb 265 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
a4bd217b
JG
266 return -1;
267}
268
269static void pblk_write_kick(struct pblk *pblk)
270{
271 wake_up_process(pblk->writer_ts);
272 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
273}
274
87c1d2d3 275void pblk_write_timer_fn(struct timer_list *t)
a4bd217b 276{
87c1d2d3 277 struct pblk *pblk = from_timer(pblk, t, wtimer);
a4bd217b
JG
278
279 /* kick the write thread every tick to flush outstanding data */
280 pblk_write_kick(pblk);
281}
282
283void pblk_write_should_kick(struct pblk *pblk)
284{
285 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
286
287 if (secs_avail >= pblk->min_write_pgs)
288 pblk_write_kick(pblk);
289}
290
a4bd217b
JG
291void pblk_end_io_sync(struct nvm_rq *rqd)
292{
293 struct completion *waiting = rqd->private;
294
295 complete(waiting);
296}
297
8bd40020 298static void pblk_wait_for_meta(struct pblk *pblk)
a4bd217b 299{
588726d3
JG
300 do {
301 if (!atomic_read(&pblk->inflight_io))
302 break;
a4bd217b 303
588726d3
JG
304 schedule();
305 } while (1);
306}
a4bd217b 307
588726d3
JG
308static void pblk_flush_writer(struct pblk *pblk)
309{
310 pblk_rb_flush(&pblk->rwb);
311 do {
ee8d5c1a 312 if (!pblk_rb_sync_count(&pblk->rwb))
588726d3 313 break;
a4bd217b 314
ee8d5c1a 315 pblk_write_kick(pblk);
588726d3
JG
316 schedule();
317 } while (1);
a4bd217b
JG
318}
319
320struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
321{
322 struct pblk_line_meta *lm = &pblk->lm;
323 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
324 struct list_head *move_list = NULL;
dd2a4343 325 int vsc = le32_to_cpu(*line->vsc);
a4bd217b 326
476118c9
JG
327 lockdep_assert_held(&line->lock);
328
dd2a4343 329 if (!vsc) {
a4bd217b
JG
330 if (line->gc_group != PBLK_LINEGC_FULL) {
331 line->gc_group = PBLK_LINEGC_FULL;
332 move_list = &l_mg->gc_full_list;
333 }
b20ba1bc 334 } else if (vsc < lm->high_thrs) {
a4bd217b
JG
335 if (line->gc_group != PBLK_LINEGC_HIGH) {
336 line->gc_group = PBLK_LINEGC_HIGH;
337 move_list = &l_mg->gc_high_list;
338 }
b20ba1bc 339 } else if (vsc < lm->mid_thrs) {
a4bd217b
JG
340 if (line->gc_group != PBLK_LINEGC_MID) {
341 line->gc_group = PBLK_LINEGC_MID;
342 move_list = &l_mg->gc_mid_list;
343 }
dd2a4343 344 } else if (vsc < line->sec_in_line) {
a4bd217b
JG
345 if (line->gc_group != PBLK_LINEGC_LOW) {
346 line->gc_group = PBLK_LINEGC_LOW;
347 move_list = &l_mg->gc_low_list;
348 }
dd2a4343 349 } else if (vsc == line->sec_in_line) {
a4bd217b
JG
350 if (line->gc_group != PBLK_LINEGC_EMPTY) {
351 line->gc_group = PBLK_LINEGC_EMPTY;
352 move_list = &l_mg->gc_empty_list;
353 }
354 } else {
355 line->state = PBLK_LINESTATE_CORRUPT;
356 line->gc_group = PBLK_LINEGC_NONE;
357 move_list = &l_mg->corrupt_list;
358 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
dd2a4343 359 line->id, vsc,
a4bd217b
JG
360 line->sec_in_line,
361 lm->high_thrs, lm->mid_thrs);
362 }
363
364 return move_list;
365}
366
367void pblk_discard(struct pblk *pblk, struct bio *bio)
368{
369 sector_t slba = pblk_get_lba(bio);
370 sector_t nr_secs = pblk_get_secs(bio);
371
372 pblk_invalidate_range(pblk, slba, nr_secs);
373}
374
a4bd217b
JG
375void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
376{
377 atomic_long_inc(&pblk->write_failed);
378#ifdef CONFIG_NVM_DEBUG
379 pblk_print_failed_rqd(pblk, rqd, rqd->error);
380#endif
381}
382
383void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
384{
385 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
386 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
387 atomic_long_inc(&pblk->read_empty);
388 return;
389 }
390
391 switch (rqd->error) {
392 case NVM_RSP_WARN_HIGHECC:
393 atomic_long_inc(&pblk->read_high_ecc);
394 break;
395 case NVM_RSP_ERR_FAILECC:
396 case NVM_RSP_ERR_FAILCRC:
397 atomic_long_inc(&pblk->read_failed);
398 break;
399 default:
400 pr_err("pblk: unknown read error:%d\n", rqd->error);
401 }
402#ifdef CONFIG_NVM_DEBUG
403 pblk_print_failed_rqd(pblk, rqd, rqd->error);
404#endif
405}
406
c2e9f5d4
JG
407void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
408{
409 pblk->sec_per_write = sec_per_write;
410}
411
a4bd217b
JG
412int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
413{
414 struct nvm_tgt_dev *dev = pblk->dev;
415
416#ifdef CONFIG_NVM_DEBUG
1a94b2d4 417 int ret;
a4bd217b 418
1a94b2d4
JG
419 ret = pblk_check_io(pblk, rqd);
420 if (ret)
421 return ret;
422#endif
a4bd217b 423
1a94b2d4 424 atomic_inc(&pblk->inflight_io);
a4bd217b 425
1a94b2d4
JG
426 return nvm_submit_io(dev, rqd);
427}
a4bd217b 428
1a94b2d4
JG
429int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
430{
431 struct nvm_tgt_dev *dev = pblk->dev;
432
433#ifdef CONFIG_NVM_DEBUG
434 int ret;
435
436 ret = pblk_check_io(pblk, rqd);
437 if (ret)
438 return ret;
a4bd217b 439#endif
588726d3
JG
440
441 atomic_inc(&pblk->inflight_io);
442
1a94b2d4 443 return nvm_submit_io_sync(dev, rqd);
a4bd217b
JG
444}
445
55e836d4
JG
446static void pblk_bio_map_addr_endio(struct bio *bio)
447{
448 bio_put(bio);
449}
450
a4bd217b
JG
451struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
452 unsigned int nr_secs, unsigned int len,
de54e703 453 int alloc_type, gfp_t gfp_mask)
a4bd217b
JG
454{
455 struct nvm_tgt_dev *dev = pblk->dev;
a4bd217b
JG
456 void *kaddr = data;
457 struct page *page;
458 struct bio *bio;
459 int i, ret;
460
de54e703 461 if (alloc_type == PBLK_KMALLOC_META)
a4bd217b
JG
462 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
463
464 bio = bio_kmalloc(gfp_mask, nr_secs);
465 if (!bio)
466 return ERR_PTR(-ENOMEM);
467
468 for (i = 0; i < nr_secs; i++) {
469 page = vmalloc_to_page(kaddr);
470 if (!page) {
471 pr_err("pblk: could not map vmalloc bio\n");
472 bio_put(bio);
473 bio = ERR_PTR(-ENOMEM);
474 goto out;
475 }
476
477 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
478 if (ret != PAGE_SIZE) {
479 pr_err("pblk: could not add page to bio\n");
480 bio_put(bio);
481 bio = ERR_PTR(-ENOMEM);
482 goto out;
483 }
484
485 kaddr += PAGE_SIZE;
486 }
55e836d4
JG
487
488 bio->bi_end_io = pblk_bio_map_addr_endio;
a4bd217b
JG
489out:
490 return bio;
491}
492
493int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
494 unsigned long secs_to_flush)
495{
c2e9f5d4 496 int max = pblk->sec_per_write;
a4bd217b
JG
497 int min = pblk->min_write_pgs;
498 int secs_to_sync = 0;
499
500 if (secs_avail >= max)
501 secs_to_sync = max;
502 else if (secs_avail >= min)
503 secs_to_sync = min * (secs_avail / min);
504 else if (secs_to_flush)
505 secs_to_sync = min;
506
507 return secs_to_sync;
508}
509
dd2a4343
JG
510void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
511{
512 u64 addr;
513 int i;
514
e57903fd 515 spin_lock(&line->lock);
dd2a4343
JG
516 addr = find_next_zero_bit(line->map_bitmap,
517 pblk->lm.sec_per_line, line->cur_sec);
518 line->cur_sec = addr - nr_secs;
519
520 for (i = 0; i < nr_secs; i++, line->cur_sec--)
521 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
e57903fd 522 spin_unlock(&line->lock);
dd2a4343
JG
523}
524
525u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
a4bd217b
JG
526{
527 u64 addr;
528 int i;
529
476118c9
JG
530 lockdep_assert_held(&line->lock);
531
a4bd217b
JG
532 /* logic error: ppa out-of-bounds. Prevent generating bad address */
533 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
534 WARN(1, "pblk: page allocation out of bounds\n");
535 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
536 }
537
538 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
539 pblk->lm.sec_per_line, line->cur_sec);
540 for (i = 0; i < nr_secs; i++, line->cur_sec++)
541 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
542
543 return addr;
544}
545
546u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
547{
548 u64 addr;
549
550 /* Lock needed in case a write fails and a recovery needs to remap
551 * failed write buffer entries
552 */
553 spin_lock(&line->lock);
554 addr = __pblk_alloc_page(pblk, line, nr_secs);
555 line->left_msecs -= nr_secs;
556 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
557 spin_unlock(&line->lock);
558
559 return addr;
560}
561
dd2a4343
JG
562u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
563{
564 u64 paddr;
565
566 spin_lock(&line->lock);
567 paddr = find_next_zero_bit(line->map_bitmap,
568 pblk->lm.sec_per_line, line->cur_sec);
569 spin_unlock(&line->lock);
570
571 return paddr;
572}
573
a4bd217b
JG
574/*
575 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
576 * taking the per LUN semaphore.
577 */
578static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
dd2a4343 579 void *emeta_buf, u64 paddr, int dir)
a4bd217b
JG
580{
581 struct nvm_tgt_dev *dev = pblk->dev;
582 struct nvm_geo *geo = &dev->geo;
de54e703 583 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
a4bd217b 584 struct pblk_line_meta *lm = &pblk->lm;
63e3809c 585 void *ppa_list, *meta_list;
a4bd217b
JG
586 struct bio *bio;
587 struct nvm_rq rqd;
63e3809c 588 dma_addr_t dma_ppa_list, dma_meta_list;
a4bd217b 589 int min = pblk->min_write_pgs;
dd2a4343 590 int left_ppas = lm->emeta_sec[0];
a4bd217b
JG
591 int id = line->id;
592 int rq_ppas, rq_len;
593 int cmd_op, bio_op;
a4bd217b
JG
594 int i, j;
595 int ret;
a4bd217b 596
e2cddf20 597 if (dir == PBLK_WRITE) {
a4bd217b
JG
598 bio_op = REQ_OP_WRITE;
599 cmd_op = NVM_OP_PWRITE;
e2cddf20 600 } else if (dir == PBLK_READ) {
a4bd217b
JG
601 bio_op = REQ_OP_READ;
602 cmd_op = NVM_OP_PREAD;
a4bd217b
JG
603 } else
604 return -EINVAL;
605
63e3809c
JG
606 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
607 &dma_meta_list);
608 if (!meta_list)
a4bd217b
JG
609 return -ENOMEM;
610
63e3809c
JG
611 ppa_list = meta_list + pblk_dma_meta_size;
612 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
613
a4bd217b
JG
614next_rq:
615 memset(&rqd, 0, sizeof(struct nvm_rq));
616
617 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
618 rq_len = rq_ppas * geo->sec_size;
619
de54e703
JG
620 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
621 l_mg->emeta_alloc_type, GFP_KERNEL);
a4bd217b
JG
622 if (IS_ERR(bio)) {
623 ret = PTR_ERR(bio);
624 goto free_rqd_dma;
625 }
626
627 bio->bi_iter.bi_sector = 0; /* internal bio */
628 bio_set_op_attrs(bio, bio_op, 0);
629
630 rqd.bio = bio;
63e3809c 631 rqd.meta_list = meta_list;
a4bd217b 632 rqd.ppa_list = ppa_list;
63e3809c 633 rqd.dma_meta_list = dma_meta_list;
a4bd217b 634 rqd.dma_ppa_list = dma_ppa_list;
63e3809c
JG
635 rqd.opcode = cmd_op;
636 rqd.nr_ppas = rq_ppas;
a4bd217b 637
e2cddf20 638 if (dir == PBLK_WRITE) {
63e3809c
JG
639 struct pblk_sec_meta *meta_list = rqd.meta_list;
640
e2cddf20 641 rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
a4bd217b
JG
642 for (i = 0; i < rqd.nr_ppas; ) {
643 spin_lock(&line->lock);
644 paddr = __pblk_alloc_page(pblk, line, min);
645 spin_unlock(&line->lock);
63e3809c
JG
646 for (j = 0; j < min; j++, i++, paddr++) {
647 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
a4bd217b
JG
648 rqd.ppa_list[i] =
649 addr_to_gen_ppa(pblk, paddr, id);
63e3809c 650 }
a4bd217b
JG
651 }
652 } else {
653 for (i = 0; i < rqd.nr_ppas; ) {
654 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
655 int pos = pblk_dev_ppa_to_pos(geo, ppa);
f9c10152
JG
656 int read_type = PBLK_READ_RANDOM;
657
658 if (pblk_io_aligned(pblk, rq_ppas))
659 read_type = PBLK_READ_SEQUENTIAL;
660 rqd.flags = pblk_set_read_mode(pblk, read_type);
a4bd217b
JG
661
662 while (test_bit(pos, line->blk_bitmap)) {
663 paddr += min;
664 if (pblk_boundary_paddr_checks(pblk, paddr)) {
665 pr_err("pblk: corrupt emeta line:%d\n",
666 line->id);
667 bio_put(bio);
668 ret = -EINTR;
669 goto free_rqd_dma;
670 }
671
672 ppa = addr_to_gen_ppa(pblk, paddr, id);
673 pos = pblk_dev_ppa_to_pos(geo, ppa);
674 }
675
676 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
677 pr_err("pblk: corrupt emeta line:%d\n",
678 line->id);
679 bio_put(bio);
680 ret = -EINTR;
681 goto free_rqd_dma;
682 }
683
684 for (j = 0; j < min; j++, i++, paddr++)
685 rqd.ppa_list[i] =
686 addr_to_gen_ppa(pblk, paddr, line->id);
687 }
688 }
689
1a94b2d4 690 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
691 if (ret) {
692 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
693 bio_put(bio);
694 goto free_rqd_dma;
695 }
696
588726d3 697 atomic_dec(&pblk->inflight_io);
a4bd217b 698
a4bd217b 699 if (rqd.error) {
e2cddf20 700 if (dir == PBLK_WRITE)
a4bd217b
JG
701 pblk_log_write_err(pblk, &rqd);
702 else
703 pblk_log_read_err(pblk, &rqd);
704 }
705
dd2a4343 706 emeta_buf += rq_len;
a4bd217b
JG
707 left_ppas -= rq_ppas;
708 if (left_ppas)
709 goto next_rq;
710free_rqd_dma:
63e3809c 711 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
712 return ret;
713}
714
715u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
716{
717 struct nvm_tgt_dev *dev = pblk->dev;
718 struct nvm_geo *geo = &dev->geo;
719 struct pblk_line_meta *lm = &pblk->lm;
720 int bit;
721
722 /* This usually only happens on bad lines */
723 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
724 if (bit >= lm->blk_per_line)
725 return -1;
726
727 return bit * geo->sec_per_pl;
728}
729
730static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
731 u64 paddr, int dir)
732{
733 struct nvm_tgt_dev *dev = pblk->dev;
734 struct pblk_line_meta *lm = &pblk->lm;
735 struct bio *bio;
736 struct nvm_rq rqd;
737 __le64 *lba_list = NULL;
738 int i, ret;
739 int cmd_op, bio_op;
740 int flags;
a4bd217b 741
e2cddf20 742 if (dir == PBLK_WRITE) {
a4bd217b
JG
743 bio_op = REQ_OP_WRITE;
744 cmd_op = NVM_OP_PWRITE;
e2cddf20 745 flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
dd2a4343 746 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
e2cddf20 747 } else if (dir == PBLK_READ) {
a4bd217b
JG
748 bio_op = REQ_OP_READ;
749 cmd_op = NVM_OP_PREAD;
f9c10152 750 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
a4bd217b
JG
751 } else
752 return -EINVAL;
753
754 memset(&rqd, 0, sizeof(struct nvm_rq));
755
63e3809c
JG
756 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
757 &rqd.dma_meta_list);
758 if (!rqd.meta_list)
a4bd217b
JG
759 return -ENOMEM;
760
63e3809c
JG
761 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
762 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
763
a4bd217b
JG
764 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
765 if (IS_ERR(bio)) {
766 ret = PTR_ERR(bio);
767 goto free_ppa_list;
768 }
769
770 bio->bi_iter.bi_sector = 0; /* internal bio */
771 bio_set_op_attrs(bio, bio_op, 0);
772
773 rqd.bio = bio;
774 rqd.opcode = cmd_op;
775 rqd.flags = flags;
776 rqd.nr_ppas = lm->smeta_sec;
a4bd217b
JG
777
778 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
63e3809c
JG
779 struct pblk_sec_meta *meta_list = rqd.meta_list;
780
a4bd217b 781 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
63e3809c 782
e2cddf20 783 if (dir == PBLK_WRITE) {
f417aa0b 784 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63e3809c
JG
785
786 meta_list[i].lba = lba_list[paddr] = addr_empty;
787 }
a4bd217b
JG
788 }
789
790 /*
791 * This I/O is sent by the write thread when a line is replace. Since
792 * the write thread is the only one sending write and erase commands,
793 * there is no need to take the LUN semaphore.
794 */
1a94b2d4 795 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
796 if (ret) {
797 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
798 bio_put(bio);
799 goto free_ppa_list;
800 }
801
588726d3 802 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
803
804 if (rqd.error) {
e2cddf20 805 if (dir == PBLK_WRITE)
a4bd217b
JG
806 pblk_log_write_err(pblk, &rqd);
807 else
808 pblk_log_read_err(pblk, &rqd);
809 }
810
811free_ppa_list:
63e3809c 812 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
a4bd217b
JG
813
814 return ret;
815}
816
817int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
818{
819 u64 bpaddr = pblk_line_smeta_start(pblk, line);
820
e2cddf20 821 return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
a4bd217b
JG
822}
823
dd2a4343
JG
824int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
825 void *emeta_buf)
a4bd217b 826{
dd2a4343 827 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
e2cddf20 828 line->emeta_ssec, PBLK_READ);
a4bd217b
JG
829}
830
831static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
832 struct ppa_addr ppa)
833{
834 rqd->opcode = NVM_OP_ERASE;
835 rqd->ppa_addr = ppa;
836 rqd->nr_ppas = 1;
e2cddf20 837 rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
a4bd217b
JG
838 rqd->bio = NULL;
839}
840
841static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
842{
b805bf80
MB
843 struct nvm_rq rqd = {NULL};
844 int ret;
a4bd217b
JG
845
846 pblk_setup_e_rq(pblk, &rqd, ppa);
847
a4bd217b
JG
848 /* The write thread schedules erases so that it minimizes disturbances
849 * with writes. Thus, there is no need to take the LUN semaphore.
850 */
1a94b2d4 851 ret = pblk_submit_io_sync(pblk, &rqd);
a4bd217b
JG
852 rqd.private = pblk;
853 __pblk_end_io_erase(pblk, &rqd);
854
588726d3 855 return ret;
a4bd217b
JG
856}
857
858int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
859{
860 struct pblk_line_meta *lm = &pblk->lm;
861 struct ppa_addr ppa;
588726d3 862 int ret, bit = -1;
a4bd217b 863
a44f53fa
JG
864 /* Erase only good blocks, one at a time */
865 do {
866 spin_lock(&line->lock);
867 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
868 bit + 1);
869 if (bit >= lm->blk_per_line) {
870 spin_unlock(&line->lock);
871 break;
872 }
873
a4bd217b
JG
874 ppa = pblk->luns[bit].bppa; /* set ch and lun */
875 ppa.g.blk = line->id;
876
a44f53fa 877 atomic_dec(&line->left_eblks);
a4bd217b 878 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
a44f53fa 879 spin_unlock(&line->lock);
a4bd217b 880
588726d3
JG
881 ret = pblk_blk_erase_sync(pblk, ppa);
882 if (ret) {
a4bd217b 883 pr_err("pblk: failed to erase line %d\n", line->id);
588726d3 884 return ret;
a4bd217b 885 }
a44f53fa 886 } while (1);
a4bd217b
JG
887
888 return 0;
889}
890
dd2a4343
JG
891static void pblk_line_setup_metadata(struct pblk_line *line,
892 struct pblk_line_mgmt *l_mg,
893 struct pblk_line_meta *lm)
894{
895 int meta_line;
896
588726d3
JG
897 lockdep_assert_held(&l_mg->free_lock);
898
dd2a4343
JG
899retry_meta:
900 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
901 if (meta_line == PBLK_DATA_LINES) {
902 spin_unlock(&l_mg->free_lock);
903 io_schedule();
904 spin_lock(&l_mg->free_lock);
905 goto retry_meta;
906 }
907
908 set_bit(meta_line, &l_mg->meta_bitmap);
909 line->meta_line = meta_line;
910
911 line->smeta = l_mg->sline_meta[meta_line];
912 line->emeta = l_mg->eline_meta[meta_line];
913
914 memset(line->smeta, 0, lm->smeta_len);
915 memset(line->emeta->buf, 0, lm->emeta_len[0]);
916
917 line->emeta->mem = 0;
918 atomic_set(&line->emeta->sync, 0);
919}
920
a4bd217b
JG
921/* For now lines are always assumed full lines. Thus, smeta former and current
922 * lun bitmaps are omitted.
923 */
dd2a4343 924static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
a4bd217b
JG
925 struct pblk_line *cur)
926{
927 struct nvm_tgt_dev *dev = pblk->dev;
928 struct nvm_geo *geo = &dev->geo;
929 struct pblk_line_meta *lm = &pblk->lm;
930 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
dd2a4343
JG
931 struct pblk_emeta *emeta = line->emeta;
932 struct line_emeta *emeta_buf = emeta->buf;
933 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
a4bd217b
JG
934 int nr_blk_line;
935
936 /* After erasing the line, new bad blocks might appear and we risk
937 * having an invalid line
938 */
939 nr_blk_line = lm->blk_per_line -
940 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
941 if (nr_blk_line < lm->min_blk_line) {
942 spin_lock(&l_mg->free_lock);
943 spin_lock(&line->lock);
944 line->state = PBLK_LINESTATE_BAD;
945 spin_unlock(&line->lock);
946
947 list_add_tail(&line->list, &l_mg->bad_list);
948 spin_unlock(&l_mg->free_lock);
949
950 pr_debug("pblk: line %d is bad\n", line->id);
951
952 return 0;
953 }
954
955 /* Run-time metadata */
dd2a4343 956 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
a4bd217b
JG
957
958 /* Mark LUNs allocated in this line (all for now) */
959 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
960
dd2a4343
JG
961 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
962 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
963 smeta_buf->header.id = cpu_to_le32(line->id);
964 smeta_buf->header.type = cpu_to_le16(line->type);
c79819bc 965 smeta_buf->header.version = SMETA_VERSION;
a4bd217b
JG
966
967 /* Start metadata */
dd2a4343
JG
968 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
969 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
a4bd217b
JG
970
971 /* Fill metadata among lines */
972 if (cur) {
973 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
dd2a4343
JG
974 smeta_buf->prev_id = cpu_to_le32(cur->id);
975 cur->emeta->buf->next_id = cpu_to_le32(line->id);
a4bd217b 976 } else {
dd2a4343 977 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
a4bd217b
JG
978 }
979
980 /* All smeta must be set at this point */
dd2a4343
JG
981 smeta_buf->header.crc = cpu_to_le32(
982 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
983 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
a4bd217b
JG
984
985 /* End metadata */
dd2a4343
JG
986 memcpy(&emeta_buf->header, &smeta_buf->header,
987 sizeof(struct line_header));
988 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
989 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
990 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
991 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
992 emeta_buf->crc = cpu_to_le32(0);
993 emeta_buf->prev_id = smeta_buf->prev_id;
a4bd217b
JG
994
995 return 1;
996}
997
998/* For now lines are always assumed full lines. Thus, smeta former and current
999 * lun bitmaps are omitted.
1000 */
1001static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1002 int init)
1003{
1004 struct nvm_tgt_dev *dev = pblk->dev;
1005 struct nvm_geo *geo = &dev->geo;
1006 struct pblk_line_meta *lm = &pblk->lm;
1007 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1008 int nr_bb = 0;
1009 u64 off;
1010 int bit = -1;
1011
1012 line->sec_in_line = lm->sec_per_line;
1013
1014 /* Capture bad block information on line mapping bitmaps */
1015 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1016 bit + 1)) < lm->blk_per_line) {
1017 off = bit * geo->sec_per_pl;
1018 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1019 lm->sec_per_line);
1020 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1021 lm->sec_per_line);
1022 line->sec_in_line -= geo->sec_per_blk;
1023 if (bit >= lm->emeta_bb)
1024 nr_bb++;
1025 }
1026
1027 /* Mark smeta metadata sectors as bad sectors */
1028 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1029 off = bit * geo->sec_per_pl;
a4bd217b
JG
1030 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1031 line->sec_in_line -= lm->smeta_sec;
1032 line->smeta_ssec = off;
1033 line->cur_sec = off + lm->smeta_sec;
1034
e2cddf20 1035 if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
a4bd217b 1036 pr_debug("pblk: line smeta I/O failed. Retry\n");
588726d3 1037 return 1;
a4bd217b
JG
1038 }
1039
1040 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1041
1042 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1043 * blocks to make sure that there are enough sectors to store emeta
1044 */
dd2a4343
JG
1045 off = lm->sec_per_line - lm->emeta_sec[0];
1046 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
a4bd217b
JG
1047 while (nr_bb) {
1048 off -= geo->sec_per_pl;
1049 if (!test_bit(off, line->invalid_bitmap)) {
1050 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1051 nr_bb--;
1052 }
1053 }
1054
dd2a4343 1055 line->sec_in_line -= lm->emeta_sec[0];
a4bd217b 1056 line->emeta_ssec = off;
dd2a4343 1057 line->nr_valid_lbas = 0;
0880a9aa 1058 line->left_msecs = line->sec_in_line;
dd2a4343 1059 *line->vsc = cpu_to_le32(line->sec_in_line);
a4bd217b
JG
1060
1061 if (lm->sec_per_line - line->sec_in_line !=
1062 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1063 spin_lock(&line->lock);
1064 line->state = PBLK_LINESTATE_BAD;
1065 spin_unlock(&line->lock);
1066
1067 list_add_tail(&line->list, &l_mg->bad_list);
1068 pr_err("pblk: unexpected line %d is bad\n", line->id);
1069
1070 return 0;
1071 }
1072
1073 return 1;
1074}
1075
1076static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1077{
1078 struct pblk_line_meta *lm = &pblk->lm;
a44f53fa 1079 int blk_in_line = atomic_read(&line->blk_in_line);
a4bd217b 1080
e72ec1d3 1081 line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
a4bd217b
JG
1082 if (!line->map_bitmap)
1083 return -ENOMEM;
a4bd217b 1084
e72ec1d3
JG
1085 /* will be initialized using bb info from map_bitmap */
1086 line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
a4bd217b 1087 if (!line->invalid_bitmap) {
e72ec1d3 1088 kfree(line->map_bitmap);
a4bd217b
JG
1089 return -ENOMEM;
1090 }
1091
1092 spin_lock(&line->lock);
1093 if (line->state != PBLK_LINESTATE_FREE) {
e72ec1d3
JG
1094 kfree(line->map_bitmap);
1095 kfree(line->invalid_bitmap);
a4bd217b 1096 spin_unlock(&line->lock);
588726d3
JG
1097 WARN(1, "pblk: corrupted line %d, state %d\n",
1098 line->id, line->state);
1099 return -EAGAIN;
a4bd217b 1100 }
588726d3 1101
a4bd217b 1102 line->state = PBLK_LINESTATE_OPEN;
a44f53fa
JG
1103
1104 atomic_set(&line->left_eblks, blk_in_line);
1105 atomic_set(&line->left_seblks, blk_in_line);
dd2a4343
JG
1106
1107 line->meta_distance = lm->meta_distance;
a4bd217b
JG
1108 spin_unlock(&line->lock);
1109
1110 /* Bad blocks do not need to be erased */
1111 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
a4bd217b
JG
1112
1113 kref_init(&line->ref);
1114
1115 return 0;
1116}
1117
1118int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1119{
1120 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1121 int ret;
1122
1123 spin_lock(&l_mg->free_lock);
1124 l_mg->data_line = line;
1125 list_del(&line->list);
a4bd217b
JG
1126
1127 ret = pblk_line_prepare(pblk, line);
1128 if (ret) {
1129 list_add(&line->list, &l_mg->free_list);
3dc001f3 1130 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1131 return ret;
1132 }
3dc001f3 1133 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
1134
1135 pblk_rl_free_lines_dec(&pblk->rl, line);
1136
1137 if (!pblk_line_init_bb(pblk, line, 0)) {
1138 list_add(&line->list, &l_mg->free_list);
1139 return -EINTR;
1140 }
1141
1142 return 0;
1143}
1144
1145void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1146{
e72ec1d3 1147 kfree(line->map_bitmap);
a4bd217b
JG
1148 line->map_bitmap = NULL;
1149 line->smeta = NULL;
1150 line->emeta = NULL;
1151}
1152
1153struct pblk_line *pblk_line_get(struct pblk *pblk)
1154{
1155 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1156 struct pblk_line_meta *lm = &pblk->lm;
588726d3
JG
1157 struct pblk_line *line;
1158 int ret, bit;
a4bd217b
JG
1159
1160 lockdep_assert_held(&l_mg->free_lock);
1161
588726d3 1162retry:
a4bd217b
JG
1163 if (list_empty(&l_mg->free_list)) {
1164 pr_err("pblk: no free lines\n");
588726d3 1165 return NULL;
a4bd217b
JG
1166 }
1167
1168 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1169 list_del(&line->list);
1170 l_mg->nr_free_lines--;
1171
1172 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1173 if (unlikely(bit >= lm->blk_per_line)) {
1174 spin_lock(&line->lock);
1175 line->state = PBLK_LINESTATE_BAD;
1176 spin_unlock(&line->lock);
1177
1178 list_add_tail(&line->list, &l_mg->bad_list);
1179
1180 pr_debug("pblk: line %d is bad\n", line->id);
588726d3 1181 goto retry;
a4bd217b
JG
1182 }
1183
588726d3
JG
1184 ret = pblk_line_prepare(pblk, line);
1185 if (ret) {
1186 if (ret == -EAGAIN) {
1187 list_add(&line->list, &l_mg->corrupt_list);
1188 goto retry;
1189 } else {
1190 pr_err("pblk: failed to prepare line %d\n", line->id);
1191 list_add(&line->list, &l_mg->free_list);
1192 l_mg->nr_free_lines++;
1193 return NULL;
1194 }
a4bd217b
JG
1195 }
1196
a4bd217b
JG
1197 return line;
1198}
1199
1200static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1201 struct pblk_line *line)
1202{
1203 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1204 struct pblk_line *retry_line;
1205
588726d3 1206retry:
a4bd217b
JG
1207 spin_lock(&l_mg->free_lock);
1208 retry_line = pblk_line_get(pblk);
1209 if (!retry_line) {
be388d9f 1210 l_mg->data_line = NULL;
a4bd217b
JG
1211 spin_unlock(&l_mg->free_lock);
1212 return NULL;
1213 }
1214
1215 retry_line->smeta = line->smeta;
1216 retry_line->emeta = line->emeta;
1217 retry_line->meta_line = line->meta_line;
a4bd217b 1218
be388d9f 1219 pblk_line_free(pblk, line);
3dc001f3 1220 l_mg->data_line = retry_line;
a4bd217b
JG
1221 spin_unlock(&l_mg->free_lock);
1222
a4bd217b
JG
1223 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1224
588726d3
JG
1225 if (pblk_line_erase(pblk, retry_line))
1226 goto retry;
1227
a4bd217b
JG
1228 return retry_line;
1229}
1230
588726d3
JG
1231static void pblk_set_space_limit(struct pblk *pblk)
1232{
1233 struct pblk_rl *rl = &pblk->rl;
1234
1235 atomic_set(&rl->rb_space, 0);
1236}
1237
a4bd217b
JG
1238struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1239{
1240 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1241 struct pblk_line *line;
a4bd217b
JG
1242 int is_next = 0;
1243
1244 spin_lock(&l_mg->free_lock);
1245 line = pblk_line_get(pblk);
1246 if (!line) {
1247 spin_unlock(&l_mg->free_lock);
1248 return NULL;
1249 }
1250
1251 line->seq_nr = l_mg->d_seq_nr++;
1252 line->type = PBLK_LINETYPE_DATA;
1253 l_mg->data_line = line;
1254
dd2a4343 1255 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
a4bd217b
JG
1256
1257 /* Allocate next line for preparation */
1258 l_mg->data_next = pblk_line_get(pblk);
588726d3
JG
1259 if (!l_mg->data_next) {
1260 /* If we cannot get a new line, we need to stop the pipeline.
1261 * Only allow as many writes in as we can store safely and then
1262 * fail gracefully
1263 */
1264 pblk_set_space_limit(pblk);
1265
1266 l_mg->data_next = NULL;
1267 } else {
a4bd217b
JG
1268 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1269 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1270 is_next = 1;
1271 }
1272 spin_unlock(&l_mg->free_lock);
1273
588726d3
JG
1274 if (pblk_line_erase(pblk, line)) {
1275 line = pblk_line_retry(pblk, line);
1276 if (!line)
1277 return NULL;
1278 }
1279
a4bd217b
JG
1280 pblk_rl_free_lines_dec(&pblk->rl, line);
1281 if (is_next)
1282 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1283
a4bd217b 1284retry_setup:
dd2a4343 1285 if (!pblk_line_init_metadata(pblk, line, NULL)) {
a4bd217b
JG
1286 line = pblk_line_retry(pblk, line);
1287 if (!line)
1288 return NULL;
1289
1290 goto retry_setup;
1291 }
1292
1293 if (!pblk_line_init_bb(pblk, line, 1)) {
1294 line = pblk_line_retry(pblk, line);
1295 if (!line)
1296 return NULL;
1297
1298 goto retry_setup;
1299 }
1300
1301 return line;
1302}
1303
588726d3
JG
1304static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1305{
1306 lockdep_assert_held(&pblk->l_mg.free_lock);
1307
1308 pblk_set_space_limit(pblk);
1309 pblk->state = PBLK_STATE_STOPPING;
1310}
1311
8bd40020
JG
1312static void pblk_line_close_meta_sync(struct pblk *pblk)
1313{
1314 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1315 struct pblk_line_meta *lm = &pblk->lm;
1316 struct pblk_line *line, *tline;
1317 LIST_HEAD(list);
1318
1319 spin_lock(&l_mg->close_lock);
1320 if (list_empty(&l_mg->emeta_list)) {
1321 spin_unlock(&l_mg->close_lock);
1322 return;
1323 }
1324
1325 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1326 spin_unlock(&l_mg->close_lock);
1327
1328 list_for_each_entry_safe(line, tline, &list, list) {
1329 struct pblk_emeta *emeta = line->emeta;
1330
1331 while (emeta->mem < lm->emeta_len[0]) {
1332 int ret;
1333
1334 ret = pblk_submit_meta_io(pblk, line);
1335 if (ret) {
1336 pr_err("pblk: sync meta line %d failed (%d)\n",
1337 line->id, ret);
1338 return;
1339 }
1340 }
1341 }
1342
1343 pblk_wait_for_meta(pblk);
1344 flush_workqueue(pblk->close_wq);
1345}
1346
588726d3
JG
1347void pblk_pipeline_stop(struct pblk *pblk)
1348{
1349 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1350 int ret;
1351
1352 spin_lock(&l_mg->free_lock);
1353 if (pblk->state == PBLK_STATE_RECOVERING ||
1354 pblk->state == PBLK_STATE_STOPPED) {
1355 spin_unlock(&l_mg->free_lock);
1356 return;
1357 }
1358 pblk->state = PBLK_STATE_RECOVERING;
1359 spin_unlock(&l_mg->free_lock);
1360
1361 pblk_flush_writer(pblk);
1362 pblk_wait_for_meta(pblk);
1363
1364 ret = pblk_recov_pad(pblk);
1365 if (ret) {
1366 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1367 return;
1368 }
1369
ee8d5c1a 1370 flush_workqueue(pblk->bb_wq);
588726d3
JG
1371 pblk_line_close_meta_sync(pblk);
1372
1373 spin_lock(&l_mg->free_lock);
1374 pblk->state = PBLK_STATE_STOPPED;
1375 l_mg->data_line = NULL;
1376 l_mg->data_next = NULL;
1377 spin_unlock(&l_mg->free_lock);
1378}
1379
21d22871 1380struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
a4bd217b 1381{
a4bd217b 1382 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
21d22871 1383 struct pblk_line *cur, *new = NULL;
a4bd217b 1384 unsigned int left_seblks;
a4bd217b
JG
1385 int is_next = 0;
1386
a4bd217b
JG
1387 new = l_mg->data_next;
1388 if (!new)
21d22871 1389 goto out;
a4bd217b 1390
588726d3
JG
1391 spin_lock(&l_mg->free_lock);
1392 if (pblk->state != PBLK_STATE_RUNNING) {
1393 l_mg->data_line = NULL;
1394 l_mg->data_next = NULL;
1395 spin_unlock(&l_mg->free_lock);
21d22871 1396 goto out;
588726d3 1397 }
c667919d
JG
1398 cur = l_mg->data_line;
1399 l_mg->data_line = new;
588726d3
JG
1400
1401 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1402 spin_unlock(&l_mg->free_lock);
1403
1404retry_erase:
a4bd217b
JG
1405 left_seblks = atomic_read(&new->left_seblks);
1406 if (left_seblks) {
1407 /* If line is not fully erased, erase it */
a44f53fa 1408 if (atomic_read(&new->left_eblks)) {
a4bd217b 1409 if (pblk_line_erase(pblk, new))
21d22871 1410 goto out;
a4bd217b
JG
1411 } else {
1412 io_schedule();
1413 }
588726d3 1414 goto retry_erase;
a4bd217b
JG
1415 }
1416
a4bd217b 1417retry_setup:
dd2a4343 1418 if (!pblk_line_init_metadata(pblk, new, cur)) {
a4bd217b 1419 new = pblk_line_retry(pblk, new);
f3236cef 1420 if (!new)
21d22871 1421 goto out;
a4bd217b
JG
1422
1423 goto retry_setup;
1424 }
1425
1426 if (!pblk_line_init_bb(pblk, new, 1)) {
1427 new = pblk_line_retry(pblk, new);
1428 if (!new)
21d22871 1429 goto out;
a4bd217b
JG
1430
1431 goto retry_setup;
1432 }
1433
588726d3
JG
1434 /* Allocate next line for preparation */
1435 spin_lock(&l_mg->free_lock);
1436 l_mg->data_next = pblk_line_get(pblk);
1437 if (!l_mg->data_next) {
1438 /* If we cannot get a new line, we need to stop the pipeline.
1439 * Only allow as many writes in as we can store safely and then
1440 * fail gracefully
1441 */
1442 pblk_stop_writes(pblk, new);
1443 l_mg->data_next = NULL;
1444 } else {
1445 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1446 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1447 is_next = 1;
1448 }
1449 spin_unlock(&l_mg->free_lock);
1450
1451 if (is_next)
1452 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
21d22871
JG
1453
1454out:
1455 return new;
a4bd217b
JG
1456}
1457
1458void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1459{
e72ec1d3
JG
1460 kfree(line->map_bitmap);
1461 kfree(line->invalid_bitmap);
a4bd217b 1462
dd2a4343
JG
1463 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1464
a4bd217b
JG
1465 line->map_bitmap = NULL;
1466 line->invalid_bitmap = NULL;
be388d9f
JG
1467 line->smeta = NULL;
1468 line->emeta = NULL;
a4bd217b
JG
1469}
1470
7bd4d370 1471static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
a4bd217b 1472{
a4bd217b 1473 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
d6b992f7 1474 struct pblk_gc *gc = &pblk->gc;
a4bd217b
JG
1475
1476 spin_lock(&line->lock);
1477 WARN_ON(line->state != PBLK_LINESTATE_GC);
1478 line->state = PBLK_LINESTATE_FREE;
1479 line->gc_group = PBLK_LINEGC_NONE;
1480 pblk_line_free(pblk, line);
1481 spin_unlock(&line->lock);
1482
d6b992f7
HH
1483 atomic_dec(&gc->pipeline_gc);
1484
a4bd217b
JG
1485 spin_lock(&l_mg->free_lock);
1486 list_add_tail(&line->list, &l_mg->free_list);
1487 l_mg->nr_free_lines++;
1488 spin_unlock(&l_mg->free_lock);
1489
1490 pblk_rl_free_lines_inc(&pblk->rl, line);
1491}
1492
7bd4d370
JG
1493static void pblk_line_put_ws(struct work_struct *work)
1494{
1495 struct pblk_line_ws *line_put_ws = container_of(work,
1496 struct pblk_line_ws, ws);
1497 struct pblk *pblk = line_put_ws->pblk;
1498 struct pblk_line *line = line_put_ws->line;
1499
1500 __pblk_line_put(pblk, line);
1501 mempool_free(line_put_ws, pblk->gen_ws_pool);
1502}
1503
1504void pblk_line_put(struct kref *ref)
1505{
1506 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1507 struct pblk *pblk = line->pblk;
1508
1509 __pblk_line_put(pblk, line);
1510}
1511
1512void pblk_line_put_wq(struct kref *ref)
1513{
1514 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1515 struct pblk *pblk = line->pblk;
1516 struct pblk_line_ws *line_put_ws;
1517
1518 line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
1519 if (!line_put_ws)
1520 return;
1521
1522 line_put_ws->pblk = pblk;
1523 line_put_ws->line = line;
1524 line_put_ws->priv = NULL;
1525
1526 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1527 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1528}
1529
a4bd217b
JG
1530int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1531{
1532 struct nvm_rq *rqd;
1533 int err;
1534
67bf26a3 1535 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
a4bd217b
JG
1536
1537 pblk_setup_e_rq(pblk, rqd, ppa);
1538
1539 rqd->end_io = pblk_end_io_erase;
1540 rqd->private = pblk;
1541
1542 /* The write thread schedules erases so that it minimizes disturbances
1543 * with writes. Thus, there is no need to take the LUN semaphore.
1544 */
1545 err = pblk_submit_io(pblk, rqd);
1546 if (err) {
1547 struct nvm_tgt_dev *dev = pblk->dev;
1548 struct nvm_geo *geo = &dev->geo;
1549
1550 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1551 pblk_dev_ppa_to_line(ppa),
1552 pblk_dev_ppa_to_pos(geo, ppa));
1553 }
1554
1555 return err;
1556}
1557
1558struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1559{
1560 return pblk->l_mg.data_line;
1561}
1562
d624f371
JG
1563/* For now, always erase next line */
1564struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
a4bd217b
JG
1565{
1566 return pblk->l_mg.data_next;
1567}
1568
1569int pblk_line_is_full(struct pblk_line *line)
1570{
1571 return (line->left_msecs == 0);
1572}
1573
588726d3
JG
1574static void pblk_line_should_sync_meta(struct pblk *pblk)
1575{
1576 if (pblk_rl_is_limit(&pblk->rl))
1577 pblk_line_close_meta_sync(pblk);
1578}
1579
a4bd217b
JG
1580void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1581{
1582 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1583 struct list_head *move_list;
1584
a84ebb83
JG
1585#ifdef CONFIG_NVM_DEBUG
1586 struct pblk_line_meta *lm = &pblk->lm;
1587
dd2a4343 1588 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
a4bd217b 1589 "pblk: corrupt closed line %d\n", line->id);
a84ebb83 1590#endif
a4bd217b
JG
1591
1592 spin_lock(&l_mg->free_lock);
1593 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1594 spin_unlock(&l_mg->free_lock);
1595
1596 spin_lock(&l_mg->gc_lock);
1597 spin_lock(&line->lock);
1598 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1599 line->state = PBLK_LINESTATE_CLOSED;
1600 move_list = pblk_line_gc_list(pblk, line);
1601
1602 list_add_tail(&line->list, move_list);
1603
e72ec1d3 1604 kfree(line->map_bitmap);
a4bd217b
JG
1605 line->map_bitmap = NULL;
1606 line->smeta = NULL;
1607 line->emeta = NULL;
1608
1609 spin_unlock(&line->lock);
1610 spin_unlock(&l_mg->gc_lock);
1611}
1612
dd2a4343
JG
1613void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1614{
1615 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1616 struct pblk_line_meta *lm = &pblk->lm;
1617 struct pblk_emeta *emeta = line->emeta;
1618 struct line_emeta *emeta_buf = emeta->buf;
1619
588726d3 1620 /* No need for exact vsc value; avoid a big line lock and take aprox. */
dd2a4343
JG
1621 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1622 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1623
1624 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1625 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1626
1627 spin_lock(&l_mg->close_lock);
1628 spin_lock(&line->lock);
1629 list_add_tail(&line->list, &l_mg->emeta_list);
1630 spin_unlock(&line->lock);
1631 spin_unlock(&l_mg->close_lock);
588726d3
JG
1632
1633 pblk_line_should_sync_meta(pblk);
dd2a4343
JG
1634}
1635
a4bd217b
JG
1636void pblk_line_close_ws(struct work_struct *work)
1637{
1638 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1639 ws);
1640 struct pblk *pblk = line_ws->pblk;
1641 struct pblk_line *line = line_ws->line;
1642
1643 pblk_line_close(pblk, line);
b84ae4a8 1644 mempool_free(line_ws, pblk->gen_ws_pool);
a4bd217b
JG
1645}
1646
b84ae4a8
JG
1647void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1648 void (*work)(struct work_struct *), gfp_t gfp_mask,
ef576494 1649 struct workqueue_struct *wq)
a4bd217b
JG
1650{
1651 struct pblk_line_ws *line_ws;
1652
b84ae4a8 1653 line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
a4bd217b
JG
1654
1655 line_ws->pblk = pblk;
1656 line_ws->line = line;
1657 line_ws->priv = priv;
1658
1659 INIT_WORK(&line_ws->ws, work);
ef576494 1660 queue_work(wq, &line_ws->ws);
a4bd217b
JG
1661}
1662
3eaa11e2
JG
1663static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1664 int nr_ppas, int pos)
a4bd217b 1665{
3eaa11e2 1666 struct pblk_lun *rlun = &pblk->luns[pos];
a4bd217b
JG
1667 int ret;
1668
1669 /*
1670 * Only send one inflight I/O per LUN. Since we map at a page
1671 * granurality, all ppas in the I/O will map to the same LUN
1672 */
1673#ifdef CONFIG_NVM_DEBUG
1674 int i;
1675
1676 for (i = 1; i < nr_ppas; i++)
1677 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1678 ppa_list[0].g.ch != ppa_list[i].g.ch);
1679#endif
a4bd217b 1680
3eaa11e2 1681 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
c5493845
RP
1682 if (ret == -ETIME || ret == -EINTR)
1683 pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
a4bd217b
JG
1684}
1685
3eaa11e2
JG
1686void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1687{
1688 struct nvm_tgt_dev *dev = pblk->dev;
1689 struct nvm_geo *geo = &dev->geo;
1690 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1691
1692 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1693}
1694
1695void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1696 unsigned long *lun_bitmap)
1697{
1698 struct nvm_tgt_dev *dev = pblk->dev;
1699 struct nvm_geo *geo = &dev->geo;
1700 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1701
1702 /* If the LUN has been locked for this same request, do no attempt to
1703 * lock it again
1704 */
1705 if (test_and_set_bit(pos, lun_bitmap))
1706 return;
1707
1708 __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1709}
1710
1711void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1712{
1713 struct nvm_tgt_dev *dev = pblk->dev;
1714 struct nvm_geo *geo = &dev->geo;
1715 struct pblk_lun *rlun;
1716 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1717
1718#ifdef CONFIG_NVM_DEBUG
1719 int i;
1720
1721 for (i = 1; i < nr_ppas; i++)
1722 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1723 ppa_list[0].g.ch != ppa_list[i].g.ch);
1724#endif
1725
1726 rlun = &pblk->luns[pos];
1727 up(&rlun->wr_sem);
1728}
1729
a4bd217b
JG
1730void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1731 unsigned long *lun_bitmap)
1732{
1733 struct nvm_tgt_dev *dev = pblk->dev;
1734 struct nvm_geo *geo = &dev->geo;
1735 struct pblk_lun *rlun;
1736 int nr_luns = geo->nr_luns;
1737 int bit = -1;
1738
1739 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1740 rlun = &pblk->luns[bit];
1741 up(&rlun->wr_sem);
1742 }
a4bd217b
JG
1743}
1744
1745void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1746{
9f6cb13b 1747 struct ppa_addr ppa_l2p;
a4bd217b
JG
1748
1749 /* logic error: lba out-of-bounds. Ignore update */
1750 if (!(lba < pblk->rl.nr_secs)) {
1751 WARN(1, "pblk: corrupted L2P map request\n");
1752 return;
1753 }
1754
1755 spin_lock(&pblk->trans_lock);
9f6cb13b 1756 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b 1757
9f6cb13b
JG
1758 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1759 pblk_map_invalidate(pblk, ppa_l2p);
a4bd217b
JG
1760
1761 pblk_trans_map_set(pblk, lba, ppa);
1762 spin_unlock(&pblk->trans_lock);
1763}
1764
1765void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1766{
d340121e 1767
a4bd217b
JG
1768#ifdef CONFIG_NVM_DEBUG
1769 /* Callers must ensure that the ppa points to a cache address */
1770 BUG_ON(!pblk_addr_in_cache(ppa));
1771 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1772#endif
1773
1774 pblk_update_map(pblk, lba, ppa);
1775}
1776
9f6cb13b 1777int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
d340121e 1778 struct pblk_line *gc_line, u64 paddr_gc)
a4bd217b 1779{
d340121e 1780 struct ppa_addr ppa_l2p, ppa_gc;
a4bd217b
JG
1781 int ret = 1;
1782
1783#ifdef CONFIG_NVM_DEBUG
1784 /* Callers must ensure that the ppa points to a cache address */
9f6cb13b
JG
1785 BUG_ON(!pblk_addr_in_cache(ppa_new));
1786 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
a4bd217b
JG
1787#endif
1788
1789 /* logic error: lba out-of-bounds. Ignore update */
1790 if (!(lba < pblk->rl.nr_secs)) {
1791 WARN(1, "pblk: corrupted L2P map request\n");
1792 return 0;
1793 }
1794
1795 spin_lock(&pblk->trans_lock);
9f6cb13b 1796 ppa_l2p = pblk_trans_map_get(pblk, lba);
d340121e 1797 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
a4bd217b 1798
d340121e
JG
1799 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1800 spin_lock(&gc_line->lock);
1801 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1802 "pblk: corrupted GC update");
1803 spin_unlock(&gc_line->lock);
9f6cb13b 1804
a4bd217b
JG
1805 ret = 0;
1806 goto out;
1807 }
1808
9f6cb13b 1809 pblk_trans_map_set(pblk, lba, ppa_new);
a4bd217b
JG
1810out:
1811 spin_unlock(&pblk->trans_lock);
1812 return ret;
1813}
1814
9f6cb13b
JG
1815void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
1816 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
a4bd217b 1817{
9f6cb13b 1818 struct ppa_addr ppa_l2p;
a4bd217b
JG
1819
1820#ifdef CONFIG_NVM_DEBUG
1821 /* Callers must ensure that the ppa points to a device address */
9f6cb13b 1822 BUG_ON(pblk_addr_in_cache(ppa_mapped));
a4bd217b
JG
1823#endif
1824 /* Invalidate and discard padded entries */
1825 if (lba == ADDR_EMPTY) {
1826#ifdef CONFIG_NVM_DEBUG
1827 atomic_long_inc(&pblk->padded_wb);
1828#endif
9f6cb13b
JG
1829 if (!pblk_ppa_empty(ppa_mapped))
1830 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
1831 return;
1832 }
1833
1834 /* logic error: lba out-of-bounds. Ignore update */
1835 if (!(lba < pblk->rl.nr_secs)) {
1836 WARN(1, "pblk: corrupted L2P map request\n");
1837 return;
1838 }
1839
1840 spin_lock(&pblk->trans_lock);
9f6cb13b 1841 ppa_l2p = pblk_trans_map_get(pblk, lba);
a4bd217b
JG
1842
1843 /* Do not update L2P if the cacheline has been updated. In this case,
1844 * the mapped ppa must be invalidated
1845 */
9f6cb13b
JG
1846 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
1847 if (!pblk_ppa_empty(ppa_mapped))
1848 pblk_map_invalidate(pblk, ppa_mapped);
a4bd217b
JG
1849 goto out;
1850 }
1851
1852#ifdef CONFIG_NVM_DEBUG
9f6cb13b 1853 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
a4bd217b
JG
1854#endif
1855
9f6cb13b 1856 pblk_trans_map_set(pblk, lba, ppa_mapped);
a4bd217b
JG
1857out:
1858 spin_unlock(&pblk->trans_lock);
1859}
1860
1861void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1862 sector_t blba, int nr_secs)
1863{
1864 int i;
1865
1866 spin_lock(&pblk->trans_lock);
7bd4d370
JG
1867 for (i = 0; i < nr_secs; i++) {
1868 struct ppa_addr ppa;
1869
1870 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
1871
1872 /* If the L2P entry maps to a line, the reference is valid */
1873 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
1874 int line_id = pblk_dev_ppa_to_line(ppa);
1875 struct pblk_line *line = &pblk->lines[line_id];
1876
1877 kref_get(&line->ref);
1878 }
1879 }
a4bd217b
JG
1880 spin_unlock(&pblk->trans_lock);
1881}
1882
1883void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1884 u64 *lba_list, int nr_secs)
1885{
d340121e 1886 u64 lba;
a4bd217b
JG
1887 int i;
1888
1889 spin_lock(&pblk->trans_lock);
1890 for (i = 0; i < nr_secs; i++) {
1891 lba = lba_list[i];
d340121e 1892 if (lba != ADDR_EMPTY) {
a4bd217b
JG
1893 /* logic error: lba out-of-bounds. Ignore update */
1894 if (!(lba < pblk->rl.nr_secs)) {
1895 WARN(1, "pblk: corrupted L2P map request\n");
1896 continue;
1897 }
1898 ppas[i] = pblk_trans_map_get(pblk, lba);
1899 }
1900 }
1901 spin_unlock(&pblk->trans_lock);
1902}