]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/rrpc.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / lightnvm / rrpc.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17 #include "rrpc.h"
18
19 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20 static DECLARE_RWSEM(rrpc_lock);
21
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
24
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30 {
31 struct nvm_tgt_dev *dev = rrpc->dev;
32 struct rrpc_block *rblk = a->rblk;
33 unsigned int pg_offset;
34
35 lockdep_assert_held(&rrpc->rev_lock);
36
37 if (a->addr == ADDR_EMPTY || !rblk)
38 return;
39
40 spin_lock(&rblk->lock);
41
42 div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
43 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
44 rblk->nr_invalid_pages++;
45
46 spin_unlock(&rblk->lock);
47
48 rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
49 }
50
51 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
52 unsigned int len)
53 {
54 sector_t i;
55
56 spin_lock(&rrpc->rev_lock);
57 for (i = slba; i < slba + len; i++) {
58 struct rrpc_addr *gp = &rrpc->trans_map[i];
59
60 rrpc_page_invalidate(rrpc, gp);
61 gp->rblk = NULL;
62 }
63 spin_unlock(&rrpc->rev_lock);
64 }
65
66 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
67 sector_t laddr, unsigned int pages)
68 {
69 struct nvm_rq *rqd;
70 struct rrpc_inflight_rq *inf;
71
72 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
73 if (!rqd)
74 return ERR_PTR(-ENOMEM);
75
76 inf = rrpc_get_inflight_rq(rqd);
77 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
78 mempool_free(rqd, rrpc->rq_pool);
79 return NULL;
80 }
81
82 return rqd;
83 }
84
85 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
86 {
87 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
88
89 rrpc_unlock_laddr(rrpc, inf);
90
91 mempool_free(rqd, rrpc->rq_pool);
92 }
93
94 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
95 {
96 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
97 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
98 struct nvm_rq *rqd;
99
100 while (1) {
101 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
102 if (rqd)
103 break;
104
105 schedule();
106 }
107
108 if (IS_ERR(rqd)) {
109 pr_err("rrpc: unable to acquire inflight IO\n");
110 bio_io_error(bio);
111 return;
112 }
113
114 rrpc_invalidate_range(rrpc, slba, len);
115 rrpc_inflight_laddr_release(rrpc, rqd);
116 }
117
118 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
119 {
120 struct nvm_tgt_dev *dev = rrpc->dev;
121
122 return (rblk->next_page == dev->geo.sec_per_blk);
123 }
124
125 /* Calculate relative addr for the given block, considering instantiated LUNs */
126 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
127 {
128 struct nvm_tgt_dev *dev = rrpc->dev;
129 struct rrpc_lun *rlun = rblk->rlun;
130
131 return rlun->id * dev->geo.sec_per_blk;
132 }
133
134 static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
135 struct rrpc_addr *gp)
136 {
137 struct rrpc_block *rblk = gp->rblk;
138 struct rrpc_lun *rlun = rblk->rlun;
139 u64 addr = gp->addr;
140 struct ppa_addr paddr;
141
142 paddr.ppa = addr;
143 paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
144 paddr.g.ch = rlun->bppa.g.ch;
145 paddr.g.lun = rlun->bppa.g.lun;
146 paddr.g.blk = rblk->id;
147
148 return paddr;
149 }
150
151 /* requires lun->lock taken */
152 static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
153 struct rrpc_block **cur_rblk)
154 {
155 struct rrpc *rrpc = rlun->rrpc;
156
157 if (*cur_rblk) {
158 spin_lock(&(*cur_rblk)->lock);
159 WARN_ON(!block_is_full(rrpc, *cur_rblk));
160 spin_unlock(&(*cur_rblk)->lock);
161 }
162 *cur_rblk = new_rblk;
163 }
164
165 static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
166 struct rrpc_lun *rlun)
167 {
168 struct rrpc_block *rblk = NULL;
169
170 if (list_empty(&rlun->free_list))
171 goto out;
172
173 rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
174
175 list_move_tail(&rblk->list, &rlun->used_list);
176 rblk->state = NVM_BLK_ST_TGT;
177 rlun->nr_free_blocks--;
178
179 out:
180 return rblk;
181 }
182
183 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
184 unsigned long flags)
185 {
186 struct nvm_tgt_dev *dev = rrpc->dev;
187 struct rrpc_block *rblk;
188 int is_gc = flags & NVM_IOTYPE_GC;
189
190 spin_lock(&rlun->lock);
191 if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
192 pr_err("nvm: rrpc: cannot give block to non GC request\n");
193 spin_unlock(&rlun->lock);
194 return NULL;
195 }
196
197 rblk = __rrpc_get_blk(rrpc, rlun);
198 if (!rblk) {
199 pr_err("nvm: rrpc: cannot get new block\n");
200 spin_unlock(&rlun->lock);
201 return NULL;
202 }
203 spin_unlock(&rlun->lock);
204
205 bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
206 rblk->next_page = 0;
207 rblk->nr_invalid_pages = 0;
208 atomic_set(&rblk->data_cmnt_size, 0);
209
210 return rblk;
211 }
212
213 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
214 {
215 struct rrpc_lun *rlun = rblk->rlun;
216
217 spin_lock(&rlun->lock);
218 if (rblk->state & NVM_BLK_ST_TGT) {
219 list_move_tail(&rblk->list, &rlun->free_list);
220 rlun->nr_free_blocks++;
221 rblk->state = NVM_BLK_ST_FREE;
222 } else if (rblk->state & NVM_BLK_ST_BAD) {
223 list_move_tail(&rblk->list, &rlun->bb_list);
224 rblk->state = NVM_BLK_ST_BAD;
225 } else {
226 WARN_ON_ONCE(1);
227 pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
228 rlun->bppa.g.ch, rlun->bppa.g.lun,
229 rblk->id, rblk->state);
230 list_move_tail(&rblk->list, &rlun->bb_list);
231 }
232 spin_unlock(&rlun->lock);
233 }
234
235 static void rrpc_put_blks(struct rrpc *rrpc)
236 {
237 struct rrpc_lun *rlun;
238 int i;
239
240 for (i = 0; i < rrpc->nr_luns; i++) {
241 rlun = &rrpc->luns[i];
242 if (rlun->cur)
243 rrpc_put_blk(rrpc, rlun->cur);
244 if (rlun->gc_cur)
245 rrpc_put_blk(rrpc, rlun->gc_cur);
246 }
247 }
248
249 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
250 {
251 int next = atomic_inc_return(&rrpc->next_lun);
252
253 return &rrpc->luns[next % rrpc->nr_luns];
254 }
255
256 static void rrpc_gc_kick(struct rrpc *rrpc)
257 {
258 struct rrpc_lun *rlun;
259 unsigned int i;
260
261 for (i = 0; i < rrpc->nr_luns; i++) {
262 rlun = &rrpc->luns[i];
263 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
264 }
265 }
266
267 /*
268 * timed GC every interval.
269 */
270 static void rrpc_gc_timer(unsigned long data)
271 {
272 struct rrpc *rrpc = (struct rrpc *)data;
273
274 rrpc_gc_kick(rrpc);
275 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
276 }
277
278 static void rrpc_end_sync_bio(struct bio *bio)
279 {
280 struct completion *waiting = bio->bi_private;
281
282 if (bio->bi_error)
283 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
284
285 complete(waiting);
286 }
287
288 /*
289 * rrpc_move_valid_pages -- migrate live data off the block
290 * @rrpc: the 'rrpc' structure
291 * @block: the block from which to migrate live pages
292 *
293 * Description:
294 * GC algorithms may call this function to migrate remaining live
295 * pages off the block prior to erasing it. This function blocks
296 * further execution until the operation is complete.
297 */
298 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
299 {
300 struct nvm_tgt_dev *dev = rrpc->dev;
301 struct request_queue *q = dev->q;
302 struct rrpc_rev_addr *rev;
303 struct nvm_rq *rqd;
304 struct bio *bio;
305 struct page *page;
306 int slot;
307 int nr_sec_per_blk = dev->geo.sec_per_blk;
308 u64 phys_addr;
309 DECLARE_COMPLETION_ONSTACK(wait);
310
311 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
312 return 0;
313
314 bio = bio_alloc(GFP_NOIO, 1);
315 if (!bio) {
316 pr_err("nvm: could not alloc bio to gc\n");
317 return -ENOMEM;
318 }
319
320 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
321 if (!page) {
322 bio_put(bio);
323 return -ENOMEM;
324 }
325
326 while ((slot = find_first_zero_bit(rblk->invalid_pages,
327 nr_sec_per_blk)) < nr_sec_per_blk) {
328
329 /* Lock laddr */
330 phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
331
332 try:
333 spin_lock(&rrpc->rev_lock);
334 /* Get logical address from physical to logical table */
335 rev = &rrpc->rev_trans_map[phys_addr];
336 /* already updated by previous regular write */
337 if (rev->addr == ADDR_EMPTY) {
338 spin_unlock(&rrpc->rev_lock);
339 continue;
340 }
341
342 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
343 if (IS_ERR_OR_NULL(rqd)) {
344 spin_unlock(&rrpc->rev_lock);
345 schedule();
346 goto try;
347 }
348
349 spin_unlock(&rrpc->rev_lock);
350
351 /* Perform read to do GC */
352 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
353 bio_set_op_attrs(bio, REQ_OP_READ, 0);
354 bio->bi_private = &wait;
355 bio->bi_end_io = rrpc_end_sync_bio;
356
357 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
358 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
359
360 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
361 pr_err("rrpc: gc read failed.\n");
362 rrpc_inflight_laddr_release(rrpc, rqd);
363 goto finished;
364 }
365 wait_for_completion_io(&wait);
366 if (bio->bi_error) {
367 rrpc_inflight_laddr_release(rrpc, rqd);
368 goto finished;
369 }
370
371 bio_reset(bio);
372 reinit_completion(&wait);
373
374 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
375 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
376 bio->bi_private = &wait;
377 bio->bi_end_io = rrpc_end_sync_bio;
378
379 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
380
381 /* turn the command around and write the data back to a new
382 * address
383 */
384 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
385 pr_err("rrpc: gc write failed.\n");
386 rrpc_inflight_laddr_release(rrpc, rqd);
387 goto finished;
388 }
389 wait_for_completion_io(&wait);
390
391 rrpc_inflight_laddr_release(rrpc, rqd);
392 if (bio->bi_error)
393 goto finished;
394
395 bio_reset(bio);
396 }
397
398 finished:
399 mempool_free(page, rrpc->page_pool);
400 bio_put(bio);
401
402 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
403 pr_err("nvm: failed to garbage collect block\n");
404 return -EIO;
405 }
406
407 return 0;
408 }
409
410 static void rrpc_block_gc(struct work_struct *work)
411 {
412 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
413 ws_gc);
414 struct rrpc *rrpc = gcb->rrpc;
415 struct rrpc_block *rblk = gcb->rblk;
416 struct rrpc_lun *rlun = rblk->rlun;
417 struct nvm_tgt_dev *dev = rrpc->dev;
418 struct ppa_addr ppa;
419
420 mempool_free(gcb, rrpc->gcb_pool);
421 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
422 rlun->bppa.g.ch, rlun->bppa.g.lun,
423 rblk->id);
424
425 if (rrpc_move_valid_pages(rrpc, rblk))
426 goto put_back;
427
428 ppa.ppa = 0;
429 ppa.g.ch = rlun->bppa.g.ch;
430 ppa.g.lun = rlun->bppa.g.lun;
431 ppa.g.blk = rblk->id;
432
433 if (nvm_erase_blk(dev, &ppa, 0))
434 goto put_back;
435
436 rrpc_put_blk(rrpc, rblk);
437
438 return;
439
440 put_back:
441 spin_lock(&rlun->lock);
442 list_add_tail(&rblk->prio, &rlun->prio_list);
443 spin_unlock(&rlun->lock);
444 }
445
446 /* the block with highest number of invalid pages, will be in the beginning
447 * of the list
448 */
449 static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
450 struct rrpc_block *rb)
451 {
452 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
453 return ra;
454
455 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
456 }
457
458 /* linearly find the block with highest number of invalid pages
459 * requires lun->lock
460 */
461 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
462 {
463 struct list_head *prio_list = &rlun->prio_list;
464 struct rrpc_block *rblk, *max;
465
466 BUG_ON(list_empty(prio_list));
467
468 max = list_first_entry(prio_list, struct rrpc_block, prio);
469 list_for_each_entry(rblk, prio_list, prio)
470 max = rblk_max_invalid(max, rblk);
471
472 return max;
473 }
474
475 static void rrpc_lun_gc(struct work_struct *work)
476 {
477 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
478 struct rrpc *rrpc = rlun->rrpc;
479 struct nvm_tgt_dev *dev = rrpc->dev;
480 struct rrpc_block_gc *gcb;
481 unsigned int nr_blocks_need;
482
483 nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
484
485 if (nr_blocks_need < rrpc->nr_luns)
486 nr_blocks_need = rrpc->nr_luns;
487
488 spin_lock(&rlun->lock);
489 while (nr_blocks_need > rlun->nr_free_blocks &&
490 !list_empty(&rlun->prio_list)) {
491 struct rrpc_block *rblk = block_prio_find_max(rlun);
492
493 if (!rblk->nr_invalid_pages)
494 break;
495
496 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
497 if (!gcb)
498 break;
499
500 list_del_init(&rblk->prio);
501
502 WARN_ON(!block_is_full(rrpc, rblk));
503
504 pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
505 rlun->bppa.g.ch, rlun->bppa.g.lun,
506 rblk->id);
507
508 gcb->rrpc = rrpc;
509 gcb->rblk = rblk;
510 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
511
512 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
513
514 nr_blocks_need--;
515 }
516 spin_unlock(&rlun->lock);
517
518 /* TODO: Hint that request queue can be started again */
519 }
520
521 static void rrpc_gc_queue(struct work_struct *work)
522 {
523 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
524 ws_gc);
525 struct rrpc *rrpc = gcb->rrpc;
526 struct rrpc_block *rblk = gcb->rblk;
527 struct rrpc_lun *rlun = rblk->rlun;
528
529 spin_lock(&rlun->lock);
530 list_add_tail(&rblk->prio, &rlun->prio_list);
531 spin_unlock(&rlun->lock);
532
533 mempool_free(gcb, rrpc->gcb_pool);
534 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
535 rlun->bppa.g.ch, rlun->bppa.g.lun,
536 rblk->id);
537 }
538
539 static const struct block_device_operations rrpc_fops = {
540 .owner = THIS_MODULE,
541 };
542
543 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
544 {
545 unsigned int i;
546 struct rrpc_lun *rlun, *max_free;
547
548 if (!is_gc)
549 return get_next_lun(rrpc);
550
551 /* during GC, we don't care about RR, instead we want to make
552 * sure that we maintain evenness between the block luns.
553 */
554 max_free = &rrpc->luns[0];
555 /* prevent GC-ing lun from devouring pages of a lun with
556 * little free blocks. We don't take the lock as we only need an
557 * estimate.
558 */
559 rrpc_for_each_lun(rrpc, rlun, i) {
560 if (rlun->nr_free_blocks > max_free->nr_free_blocks)
561 max_free = rlun;
562 }
563
564 return max_free;
565 }
566
567 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
568 struct rrpc_block *rblk, u64 paddr)
569 {
570 struct rrpc_addr *gp;
571 struct rrpc_rev_addr *rev;
572
573 BUG_ON(laddr >= rrpc->nr_sects);
574
575 gp = &rrpc->trans_map[laddr];
576 spin_lock(&rrpc->rev_lock);
577 if (gp->rblk)
578 rrpc_page_invalidate(rrpc, gp);
579
580 gp->addr = paddr;
581 gp->rblk = rblk;
582
583 rev = &rrpc->rev_trans_map[gp->addr];
584 rev->addr = laddr;
585 spin_unlock(&rrpc->rev_lock);
586
587 return gp;
588 }
589
590 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
591 {
592 u64 addr = ADDR_EMPTY;
593
594 spin_lock(&rblk->lock);
595 if (block_is_full(rrpc, rblk))
596 goto out;
597
598 addr = rblk->next_page;
599
600 rblk->next_page++;
601 out:
602 spin_unlock(&rblk->lock);
603 return addr;
604 }
605
606 /* Map logical address to a physical page. The mapping implements a round robin
607 * approach and allocates a page from the next lun available.
608 *
609 * Returns rrpc_addr with the physical address and block. Returns NULL if no
610 * blocks in the next rlun are available.
611 */
612 static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
613 int is_gc)
614 {
615 struct nvm_tgt_dev *tgt_dev = rrpc->dev;
616 struct rrpc_lun *rlun;
617 struct rrpc_block *rblk, **cur_rblk;
618 struct rrpc_addr *p;
619 struct ppa_addr ppa;
620 u64 paddr;
621 int gc_force = 0;
622
623 ppa.ppa = ADDR_EMPTY;
624 rlun = rrpc_get_lun_rr(rrpc, is_gc);
625
626 if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
627 return ppa;
628
629 /*
630 * page allocation steps:
631 * 1. Try to allocate new page from current rblk
632 * 2a. If succeed, proceed to map it in and return
633 * 2b. If fail, first try to allocate a new block from media manger,
634 * and then retry step 1. Retry until the normal block pool is
635 * exhausted.
636 * 3. If exhausted, and garbage collector is requesting the block,
637 * go to the reserved block and retry step 1.
638 * In the case that this fails as well, or it is not GC
639 * requesting, report not able to retrieve a block and let the
640 * caller handle further processing.
641 */
642
643 spin_lock(&rlun->lock);
644 cur_rblk = &rlun->cur;
645 rblk = rlun->cur;
646 retry:
647 paddr = rrpc_alloc_addr(rrpc, rblk);
648
649 if (paddr != ADDR_EMPTY)
650 goto done;
651
652 if (!list_empty(&rlun->wblk_list)) {
653 new_blk:
654 rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
655 prio);
656 rrpc_set_lun_cur(rlun, rblk, cur_rblk);
657 list_del(&rblk->prio);
658 goto retry;
659 }
660 spin_unlock(&rlun->lock);
661
662 rblk = rrpc_get_blk(rrpc, rlun, gc_force);
663 if (rblk) {
664 spin_lock(&rlun->lock);
665 list_add_tail(&rblk->prio, &rlun->wblk_list);
666 /*
667 * another thread might already have added a new block,
668 * Therefore, make sure that one is used, instead of the
669 * one just added.
670 */
671 goto new_blk;
672 }
673
674 if (unlikely(is_gc) && !gc_force) {
675 /* retry from emergency gc block */
676 cur_rblk = &rlun->gc_cur;
677 rblk = rlun->gc_cur;
678 gc_force = 1;
679 spin_lock(&rlun->lock);
680 goto retry;
681 }
682
683 pr_err("rrpc: failed to allocate new block\n");
684 return ppa;
685 done:
686 spin_unlock(&rlun->lock);
687 p = rrpc_update_map(rrpc, laddr, rblk, paddr);
688 if (!p)
689 return ppa;
690
691 /* return global address */
692 return rrpc_ppa_to_gaddr(tgt_dev, p);
693 }
694
695 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
696 {
697 struct rrpc_block_gc *gcb;
698
699 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
700 if (!gcb) {
701 pr_err("rrpc: unable to queue block for gc.");
702 return;
703 }
704
705 gcb->rrpc = rrpc;
706 gcb->rblk = rblk;
707
708 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
709 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
710 }
711
712 static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
713 {
714 struct rrpc_lun *rlun = NULL;
715 int i;
716
717 for (i = 0; i < rrpc->nr_luns; i++) {
718 if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
719 rrpc->luns[i].bppa.g.lun == p.g.lun) {
720 rlun = &rrpc->luns[i];
721 break;
722 }
723 }
724
725 return rlun;
726 }
727
728 static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
729 {
730 struct nvm_tgt_dev *dev = rrpc->dev;
731 struct rrpc_lun *rlun;
732 struct rrpc_block *rblk;
733
734 rlun = rrpc_ppa_to_lun(rrpc, ppa);
735 rblk = &rlun->blocks[ppa.g.blk];
736 rblk->state = NVM_BLK_ST_BAD;
737
738 nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
739 }
740
741 static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
742 {
743 void *comp_bits = &rqd->ppa_status;
744 struct ppa_addr ppa, prev_ppa;
745 int nr_ppas = rqd->nr_ppas;
746 int bit;
747
748 if (rqd->nr_ppas == 1)
749 __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
750
751 ppa_set_empty(&prev_ppa);
752 bit = -1;
753 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
754 ppa = rqd->ppa_list[bit];
755 if (ppa_cmp_blk(ppa, prev_ppa))
756 continue;
757
758 __rrpc_mark_bad_block(rrpc, ppa);
759 }
760 }
761
762 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
763 sector_t laddr, uint8_t npages)
764 {
765 struct nvm_tgt_dev *dev = rrpc->dev;
766 struct rrpc_addr *p;
767 struct rrpc_block *rblk;
768 int cmnt_size, i;
769
770 for (i = 0; i < npages; i++) {
771 p = &rrpc->trans_map[laddr + i];
772 rblk = p->rblk;
773
774 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
775 if (unlikely(cmnt_size == dev->geo.sec_per_blk))
776 rrpc_run_gc(rrpc, rblk);
777 }
778 }
779
780 static void rrpc_end_io(struct nvm_rq *rqd)
781 {
782 struct rrpc *rrpc = rqd->private;
783 struct nvm_tgt_dev *dev = rrpc->dev;
784 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
785 uint8_t npages = rqd->nr_ppas;
786 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
787
788 if (bio_data_dir(rqd->bio) == WRITE) {
789 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
790 rrpc_mark_bad_block(rrpc, rqd);
791
792 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
793 }
794
795 bio_put(rqd->bio);
796
797 if (rrqd->flags & NVM_IOTYPE_GC)
798 return;
799
800 rrpc_unlock_rq(rrpc, rqd);
801
802 if (npages > 1)
803 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
804
805 mempool_free(rqd, rrpc->rq_pool);
806 }
807
808 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
809 struct nvm_rq *rqd, unsigned long flags, int npages)
810 {
811 struct nvm_tgt_dev *dev = rrpc->dev;
812 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
813 struct rrpc_addr *gp;
814 sector_t laddr = rrpc_get_laddr(bio);
815 int is_gc = flags & NVM_IOTYPE_GC;
816 int i;
817
818 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
819 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
820 return NVM_IO_REQUEUE;
821 }
822
823 for (i = 0; i < npages; i++) {
824 /* We assume that mapping occurs at 4KB granularity */
825 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
826 gp = &rrpc->trans_map[laddr + i];
827
828 if (gp->rblk) {
829 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
830 } else {
831 BUG_ON(is_gc);
832 rrpc_unlock_laddr(rrpc, r);
833 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
834 rqd->dma_ppa_list);
835 return NVM_IO_DONE;
836 }
837 }
838
839 rqd->opcode = NVM_OP_HBREAD;
840
841 return NVM_IO_OK;
842 }
843
844 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
845 unsigned long flags)
846 {
847 int is_gc = flags & NVM_IOTYPE_GC;
848 sector_t laddr = rrpc_get_laddr(bio);
849 struct rrpc_addr *gp;
850
851 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
852 return NVM_IO_REQUEUE;
853
854 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
855 gp = &rrpc->trans_map[laddr];
856
857 if (gp->rblk) {
858 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
859 } else {
860 BUG_ON(is_gc);
861 rrpc_unlock_rq(rrpc, rqd);
862 return NVM_IO_DONE;
863 }
864
865 rqd->opcode = NVM_OP_HBREAD;
866
867 return NVM_IO_OK;
868 }
869
870 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
871 struct nvm_rq *rqd, unsigned long flags, int npages)
872 {
873 struct nvm_tgt_dev *dev = rrpc->dev;
874 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
875 struct ppa_addr p;
876 sector_t laddr = rrpc_get_laddr(bio);
877 int is_gc = flags & NVM_IOTYPE_GC;
878 int i;
879
880 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
881 nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
882 return NVM_IO_REQUEUE;
883 }
884
885 for (i = 0; i < npages; i++) {
886 /* We assume that mapping occurs at 4KB granularity */
887 p = rrpc_map_page(rrpc, laddr + i, is_gc);
888 if (p.ppa == ADDR_EMPTY) {
889 BUG_ON(is_gc);
890 rrpc_unlock_laddr(rrpc, r);
891 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
892 rqd->dma_ppa_list);
893 rrpc_gc_kick(rrpc);
894 return NVM_IO_REQUEUE;
895 }
896
897 rqd->ppa_list[i] = p;
898 }
899
900 rqd->opcode = NVM_OP_HBWRITE;
901
902 return NVM_IO_OK;
903 }
904
905 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
906 struct nvm_rq *rqd, unsigned long flags)
907 {
908 struct ppa_addr p;
909 int is_gc = flags & NVM_IOTYPE_GC;
910 sector_t laddr = rrpc_get_laddr(bio);
911
912 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
913 return NVM_IO_REQUEUE;
914
915 p = rrpc_map_page(rrpc, laddr, is_gc);
916 if (p.ppa == ADDR_EMPTY) {
917 BUG_ON(is_gc);
918 rrpc_unlock_rq(rrpc, rqd);
919 rrpc_gc_kick(rrpc);
920 return NVM_IO_REQUEUE;
921 }
922
923 rqd->ppa_addr = p;
924 rqd->opcode = NVM_OP_HBWRITE;
925
926 return NVM_IO_OK;
927 }
928
929 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
930 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
931 {
932 struct nvm_tgt_dev *dev = rrpc->dev;
933
934 if (npages > 1) {
935 rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
936 &rqd->dma_ppa_list);
937 if (!rqd->ppa_list) {
938 pr_err("rrpc: not able to allocate ppa list\n");
939 return NVM_IO_ERR;
940 }
941
942 if (bio_op(bio) == REQ_OP_WRITE)
943 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
944 npages);
945
946 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
947 }
948
949 if (bio_op(bio) == REQ_OP_WRITE)
950 return rrpc_write_rq(rrpc, bio, rqd, flags);
951
952 return rrpc_read_rq(rrpc, bio, rqd, flags);
953 }
954
955 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
956 struct nvm_rq *rqd, unsigned long flags)
957 {
958 struct nvm_tgt_dev *dev = rrpc->dev;
959 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
960 uint8_t nr_pages = rrpc_get_pages(bio);
961 int bio_size = bio_sectors(bio) << 9;
962 int err;
963
964 if (bio_size < dev->geo.sec_size)
965 return NVM_IO_ERR;
966 else if (bio_size > dev->geo.max_rq_size)
967 return NVM_IO_ERR;
968
969 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
970 if (err)
971 return err;
972
973 bio_get(bio);
974 rqd->bio = bio;
975 rqd->private = rrpc;
976 rqd->nr_ppas = nr_pages;
977 rqd->end_io = rrpc_end_io;
978 rrq->flags = flags;
979
980 err = nvm_submit_io(dev, rqd);
981 if (err) {
982 pr_err("rrpc: I/O submission failed: %d\n", err);
983 bio_put(bio);
984 if (!(flags & NVM_IOTYPE_GC)) {
985 rrpc_unlock_rq(rrpc, rqd);
986 if (rqd->nr_ppas > 1)
987 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
988 rqd->dma_ppa_list);
989 }
990 return NVM_IO_ERR;
991 }
992
993 return NVM_IO_OK;
994 }
995
996 static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
997 {
998 struct rrpc *rrpc = q->queuedata;
999 struct nvm_rq *rqd;
1000 int err;
1001
1002 blk_queue_split(q, &bio, q->bio_split);
1003
1004 if (bio_op(bio) == REQ_OP_DISCARD) {
1005 rrpc_discard(rrpc, bio);
1006 return BLK_QC_T_NONE;
1007 }
1008
1009 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
1010 if (!rqd) {
1011 pr_err_ratelimited("rrpc: not able to queue bio.");
1012 bio_io_error(bio);
1013 return BLK_QC_T_NONE;
1014 }
1015 memset(rqd, 0, sizeof(struct nvm_rq));
1016
1017 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
1018 switch (err) {
1019 case NVM_IO_OK:
1020 return BLK_QC_T_NONE;
1021 case NVM_IO_ERR:
1022 bio_io_error(bio);
1023 break;
1024 case NVM_IO_DONE:
1025 bio_endio(bio);
1026 break;
1027 case NVM_IO_REQUEUE:
1028 spin_lock(&rrpc->bio_lock);
1029 bio_list_add(&rrpc->requeue_bios, bio);
1030 spin_unlock(&rrpc->bio_lock);
1031 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
1032 break;
1033 }
1034
1035 mempool_free(rqd, rrpc->rq_pool);
1036 return BLK_QC_T_NONE;
1037 }
1038
1039 static void rrpc_requeue(struct work_struct *work)
1040 {
1041 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
1042 struct bio_list bios;
1043 struct bio *bio;
1044
1045 bio_list_init(&bios);
1046
1047 spin_lock(&rrpc->bio_lock);
1048 bio_list_merge(&bios, &rrpc->requeue_bios);
1049 bio_list_init(&rrpc->requeue_bios);
1050 spin_unlock(&rrpc->bio_lock);
1051
1052 while ((bio = bio_list_pop(&bios)))
1053 rrpc_make_rq(rrpc->disk->queue, bio);
1054 }
1055
1056 static void rrpc_gc_free(struct rrpc *rrpc)
1057 {
1058 if (rrpc->krqd_wq)
1059 destroy_workqueue(rrpc->krqd_wq);
1060
1061 if (rrpc->kgc_wq)
1062 destroy_workqueue(rrpc->kgc_wq);
1063 }
1064
1065 static int rrpc_gc_init(struct rrpc *rrpc)
1066 {
1067 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
1068 rrpc->nr_luns);
1069 if (!rrpc->krqd_wq)
1070 return -ENOMEM;
1071
1072 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
1073 if (!rrpc->kgc_wq)
1074 return -ENOMEM;
1075
1076 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
1077
1078 return 0;
1079 }
1080
1081 static void rrpc_map_free(struct rrpc *rrpc)
1082 {
1083 vfree(rrpc->rev_trans_map);
1084 vfree(rrpc->trans_map);
1085 }
1086
1087 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1088 {
1089 struct rrpc *rrpc = (struct rrpc *)private;
1090 struct nvm_tgt_dev *dev = rrpc->dev;
1091 struct rrpc_addr *addr = rrpc->trans_map + slba;
1092 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1093 struct rrpc_lun *rlun;
1094 struct rrpc_block *rblk;
1095 u64 i;
1096
1097 for (i = 0; i < nlb; i++) {
1098 struct ppa_addr gaddr;
1099 u64 pba = le64_to_cpu(entries[i]);
1100 unsigned int mod;
1101
1102 /* LNVM treats address-spaces as silos, LBA and PBA are
1103 * equally large and zero-indexed.
1104 */
1105 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1106 pr_err("nvm: L2P data entry is out of bounds!\n");
1107 pr_err("nvm: Maybe loaded an old target L2P\n");
1108 return -EINVAL;
1109 }
1110
1111 /* Address zero is a special one. The first page on a disk is
1112 * protected. As it often holds internal device boot
1113 * information.
1114 */
1115 if (!pba)
1116 continue;
1117
1118 div_u64_rem(pba, rrpc->nr_sects, &mod);
1119
1120 gaddr = rrpc_recov_addr(dev, pba);
1121 rlun = rrpc_ppa_to_lun(rrpc, gaddr);
1122 if (!rlun) {
1123 pr_err("rrpc: l2p corruption on lba %llu\n",
1124 slba + i);
1125 return -EINVAL;
1126 }
1127
1128 rblk = &rlun->blocks[gaddr.g.blk];
1129 if (!rblk->state) {
1130 /* at this point, we don't know anything about the
1131 * block. It's up to the FTL on top to re-etablish the
1132 * block state. The block is assumed to be open.
1133 */
1134 list_move_tail(&rblk->list, &rlun->used_list);
1135 rblk->state = NVM_BLK_ST_TGT;
1136 rlun->nr_free_blocks--;
1137 }
1138
1139 addr[i].addr = pba;
1140 addr[i].rblk = rblk;
1141 raddr[mod].addr = slba + i;
1142 }
1143
1144 return 0;
1145 }
1146
1147 static int rrpc_map_init(struct rrpc *rrpc)
1148 {
1149 struct nvm_tgt_dev *dev = rrpc->dev;
1150 sector_t i;
1151 int ret;
1152
1153 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1154 if (!rrpc->trans_map)
1155 return -ENOMEM;
1156
1157 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1158 * rrpc->nr_sects);
1159 if (!rrpc->rev_trans_map)
1160 return -ENOMEM;
1161
1162 for (i = 0; i < rrpc->nr_sects; i++) {
1163 struct rrpc_addr *p = &rrpc->trans_map[i];
1164 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1165
1166 p->addr = ADDR_EMPTY;
1167 r->addr = ADDR_EMPTY;
1168 }
1169
1170 /* Bring up the mapping table from device */
1171 ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1172 rrpc_l2p_update, rrpc);
1173 if (ret) {
1174 pr_err("nvm: rrpc: could not read L2P table.\n");
1175 return -EINVAL;
1176 }
1177
1178 return 0;
1179 }
1180
1181 /* Minimum pages needed within a lun */
1182 #define PAGE_POOL_SIZE 16
1183 #define ADDR_POOL_SIZE 64
1184
1185 static int rrpc_core_init(struct rrpc *rrpc)
1186 {
1187 down_write(&rrpc_lock);
1188 if (!rrpc_gcb_cache) {
1189 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1190 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1191 if (!rrpc_gcb_cache) {
1192 up_write(&rrpc_lock);
1193 return -ENOMEM;
1194 }
1195
1196 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1197 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1198 0, 0, NULL);
1199 if (!rrpc_rq_cache) {
1200 kmem_cache_destroy(rrpc_gcb_cache);
1201 up_write(&rrpc_lock);
1202 return -ENOMEM;
1203 }
1204 }
1205 up_write(&rrpc_lock);
1206
1207 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1208 if (!rrpc->page_pool)
1209 return -ENOMEM;
1210
1211 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
1212 rrpc_gcb_cache);
1213 if (!rrpc->gcb_pool)
1214 return -ENOMEM;
1215
1216 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1217 if (!rrpc->rq_pool)
1218 return -ENOMEM;
1219
1220 spin_lock_init(&rrpc->inflights.lock);
1221 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1222
1223 return 0;
1224 }
1225
1226 static void rrpc_core_free(struct rrpc *rrpc)
1227 {
1228 mempool_destroy(rrpc->page_pool);
1229 mempool_destroy(rrpc->gcb_pool);
1230 mempool_destroy(rrpc->rq_pool);
1231 }
1232
1233 static void rrpc_luns_free(struct rrpc *rrpc)
1234 {
1235 struct rrpc_lun *rlun;
1236 int i;
1237
1238 if (!rrpc->luns)
1239 return;
1240
1241 for (i = 0; i < rrpc->nr_luns; i++) {
1242 rlun = &rrpc->luns[i];
1243 vfree(rlun->blocks);
1244 }
1245
1246 kfree(rrpc->luns);
1247 }
1248
1249 static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
1250 {
1251 struct nvm_geo *geo = &dev->geo;
1252 struct rrpc_block *rblk;
1253 struct ppa_addr ppa;
1254 u8 *blks;
1255 int nr_blks;
1256 int i;
1257 int ret;
1258
1259 if (!dev->parent->ops->get_bb_tbl)
1260 return 0;
1261
1262 nr_blks = geo->blks_per_lun * geo->plane_mode;
1263 blks = kmalloc(nr_blks, GFP_KERNEL);
1264 if (!blks)
1265 return -ENOMEM;
1266
1267 ppa.ppa = 0;
1268 ppa.g.ch = rlun->bppa.g.ch;
1269 ppa.g.lun = rlun->bppa.g.lun;
1270
1271 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
1272 if (ret) {
1273 pr_err("rrpc: could not get BB table\n");
1274 goto out;
1275 }
1276
1277 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
1278 if (nr_blks < 0)
1279 return nr_blks;
1280
1281 for (i = 0; i < nr_blks; i++) {
1282 if (blks[i] == NVM_BLK_T_FREE)
1283 continue;
1284
1285 rblk = &rlun->blocks[i];
1286 list_move_tail(&rblk->list, &rlun->bb_list);
1287 rblk->state = NVM_BLK_ST_BAD;
1288 rlun->nr_free_blocks--;
1289 }
1290
1291 out:
1292 kfree(blks);
1293 return ret;
1294 }
1295
1296 static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
1297 {
1298 rlun->bppa.ppa = 0;
1299 rlun->bppa.g.ch = ppa.g.ch;
1300 rlun->bppa.g.lun = ppa.g.lun;
1301 }
1302
1303 static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
1304 {
1305 struct nvm_tgt_dev *dev = rrpc->dev;
1306 struct nvm_geo *geo = &dev->geo;
1307 struct rrpc_lun *rlun;
1308 int i, j, ret = -EINVAL;
1309
1310 if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1311 pr_err("rrpc: number of pages per block too high.");
1312 return -EINVAL;
1313 }
1314
1315 spin_lock_init(&rrpc->rev_lock);
1316
1317 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1318 GFP_KERNEL);
1319 if (!rrpc->luns)
1320 return -ENOMEM;
1321
1322 /* 1:1 mapping */
1323 for (i = 0; i < rrpc->nr_luns; i++) {
1324 rlun = &rrpc->luns[i];
1325 rlun->id = i;
1326 rrpc_set_lun_ppa(rlun, luns[i]);
1327 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1328 geo->blks_per_lun);
1329 if (!rlun->blocks) {
1330 ret = -ENOMEM;
1331 goto err;
1332 }
1333
1334 INIT_LIST_HEAD(&rlun->free_list);
1335 INIT_LIST_HEAD(&rlun->used_list);
1336 INIT_LIST_HEAD(&rlun->bb_list);
1337
1338 for (j = 0; j < geo->blks_per_lun; j++) {
1339 struct rrpc_block *rblk = &rlun->blocks[j];
1340
1341 rblk->id = j;
1342 rblk->rlun = rlun;
1343 rblk->state = NVM_BLK_T_FREE;
1344 INIT_LIST_HEAD(&rblk->prio);
1345 INIT_LIST_HEAD(&rblk->list);
1346 spin_lock_init(&rblk->lock);
1347
1348 list_add_tail(&rblk->list, &rlun->free_list);
1349 }
1350
1351 rlun->rrpc = rrpc;
1352 rlun->nr_free_blocks = geo->blks_per_lun;
1353 rlun->reserved_blocks = 2; /* for GC only */
1354
1355 INIT_LIST_HEAD(&rlun->prio_list);
1356 INIT_LIST_HEAD(&rlun->wblk_list);
1357
1358 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1359 spin_lock_init(&rlun->lock);
1360
1361 if (rrpc_bb_discovery(dev, rlun))
1362 goto err;
1363
1364 }
1365
1366 return 0;
1367 err:
1368 return ret;
1369 }
1370
1371 /* returns 0 on success and stores the beginning address in *begin */
1372 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1373 {
1374 struct nvm_tgt_dev *dev = rrpc->dev;
1375 sector_t size = rrpc->nr_sects * dev->geo.sec_size;
1376 int ret;
1377
1378 size >>= 9;
1379
1380 ret = nvm_get_area(dev, begin, size);
1381 if (!ret)
1382 *begin >>= (ilog2(dev->geo.sec_size) - 9);
1383
1384 return ret;
1385 }
1386
1387 static void rrpc_area_free(struct rrpc *rrpc)
1388 {
1389 struct nvm_tgt_dev *dev = rrpc->dev;
1390 sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
1391
1392 nvm_put_area(dev, begin);
1393 }
1394
1395 static void rrpc_free(struct rrpc *rrpc)
1396 {
1397 rrpc_gc_free(rrpc);
1398 rrpc_map_free(rrpc);
1399 rrpc_core_free(rrpc);
1400 rrpc_luns_free(rrpc);
1401 rrpc_area_free(rrpc);
1402
1403 kfree(rrpc);
1404 }
1405
1406 static void rrpc_exit(void *private)
1407 {
1408 struct rrpc *rrpc = private;
1409
1410 del_timer(&rrpc->gc_timer);
1411
1412 flush_workqueue(rrpc->krqd_wq);
1413 flush_workqueue(rrpc->kgc_wq);
1414
1415 rrpc_free(rrpc);
1416 }
1417
1418 static sector_t rrpc_capacity(void *private)
1419 {
1420 struct rrpc *rrpc = private;
1421 struct nvm_tgt_dev *dev = rrpc->dev;
1422 sector_t reserved, provisioned;
1423
1424 /* cur, gc, and two emergency blocks for each lun */
1425 reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
1426 provisioned = rrpc->nr_sects - reserved;
1427
1428 if (reserved > rrpc->nr_sects) {
1429 pr_err("rrpc: not enough space available to expose storage.\n");
1430 return 0;
1431 }
1432
1433 sector_div(provisioned, 10);
1434 return provisioned * 9 * NR_PHY_IN_LOG;
1435 }
1436
1437 /*
1438 * Looks up the logical address from reverse trans map and check if its valid by
1439 * comparing the logical to physical address with the physical address.
1440 * Returns 0 on free, otherwise 1 if in use
1441 */
1442 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1443 {
1444 struct nvm_tgt_dev *dev = rrpc->dev;
1445 int offset;
1446 struct rrpc_addr *laddr;
1447 u64 bpaddr, paddr, pladdr;
1448
1449 bpaddr = block_to_rel_addr(rrpc, rblk);
1450 for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
1451 paddr = bpaddr + offset;
1452
1453 pladdr = rrpc->rev_trans_map[paddr].addr;
1454 if (pladdr == ADDR_EMPTY)
1455 continue;
1456
1457 laddr = &rrpc->trans_map[pladdr];
1458
1459 if (paddr == laddr->addr) {
1460 laddr->rblk = rblk;
1461 } else {
1462 set_bit(offset, rblk->invalid_pages);
1463 rblk->nr_invalid_pages++;
1464 }
1465 }
1466 }
1467
1468 static int rrpc_blocks_init(struct rrpc *rrpc)
1469 {
1470 struct nvm_tgt_dev *dev = rrpc->dev;
1471 struct rrpc_lun *rlun;
1472 struct rrpc_block *rblk;
1473 int lun_iter, blk_iter;
1474
1475 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1476 rlun = &rrpc->luns[lun_iter];
1477
1478 for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
1479 blk_iter++) {
1480 rblk = &rlun->blocks[blk_iter];
1481 rrpc_block_map_update(rrpc, rblk);
1482 }
1483 }
1484
1485 return 0;
1486 }
1487
1488 static int rrpc_luns_configure(struct rrpc *rrpc)
1489 {
1490 struct rrpc_lun *rlun;
1491 struct rrpc_block *rblk;
1492 int i;
1493
1494 for (i = 0; i < rrpc->nr_luns; i++) {
1495 rlun = &rrpc->luns[i];
1496
1497 rblk = rrpc_get_blk(rrpc, rlun, 0);
1498 if (!rblk)
1499 goto err;
1500 rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1501
1502 /* Emergency gc block */
1503 rblk = rrpc_get_blk(rrpc, rlun, 1);
1504 if (!rblk)
1505 goto err;
1506 rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1507 }
1508
1509 return 0;
1510 err:
1511 rrpc_put_blks(rrpc);
1512 return -EINVAL;
1513 }
1514
1515 static struct nvm_tgt_type tt_rrpc;
1516
1517 static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
1518 {
1519 struct request_queue *bqueue = dev->q;
1520 struct request_queue *tqueue = tdisk->queue;
1521 struct nvm_geo *geo = &dev->geo;
1522 struct rrpc *rrpc;
1523 sector_t soffset;
1524 int ret;
1525
1526 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1527 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1528 dev->identity.dom);
1529 return ERR_PTR(-EINVAL);
1530 }
1531
1532 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1533 if (!rrpc)
1534 return ERR_PTR(-ENOMEM);
1535
1536 rrpc->dev = dev;
1537 rrpc->disk = tdisk;
1538
1539 bio_list_init(&rrpc->requeue_bios);
1540 spin_lock_init(&rrpc->bio_lock);
1541 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1542
1543 rrpc->nr_luns = geo->nr_luns;
1544 rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
1545
1546 /* simple round-robin strategy */
1547 atomic_set(&rrpc->next_lun, -1);
1548
1549 ret = rrpc_area_init(rrpc, &soffset);
1550 if (ret < 0) {
1551 pr_err("nvm: rrpc: could not initialize area\n");
1552 return ERR_PTR(ret);
1553 }
1554 rrpc->soffset = soffset;
1555
1556 ret = rrpc_luns_init(rrpc, dev->luns);
1557 if (ret) {
1558 pr_err("nvm: rrpc: could not initialize luns\n");
1559 goto err;
1560 }
1561
1562 ret = rrpc_core_init(rrpc);
1563 if (ret) {
1564 pr_err("nvm: rrpc: could not initialize core\n");
1565 goto err;
1566 }
1567
1568 ret = rrpc_map_init(rrpc);
1569 if (ret) {
1570 pr_err("nvm: rrpc: could not initialize maps\n");
1571 goto err;
1572 }
1573
1574 ret = rrpc_blocks_init(rrpc);
1575 if (ret) {
1576 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1577 goto err;
1578 }
1579
1580 ret = rrpc_luns_configure(rrpc);
1581 if (ret) {
1582 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1583 goto err;
1584 }
1585
1586 ret = rrpc_gc_init(rrpc);
1587 if (ret) {
1588 pr_err("nvm: rrpc: could not initialize gc\n");
1589 goto err;
1590 }
1591
1592 /* inherit the size from the underlying device */
1593 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1594 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1595
1596 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1597 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1598
1599 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1600
1601 return rrpc;
1602 err:
1603 rrpc_free(rrpc);
1604 return ERR_PTR(ret);
1605 }
1606
1607 /* round robin, page-based FTL, and cost-based GC */
1608 static struct nvm_tgt_type tt_rrpc = {
1609 .name = "rrpc",
1610 .version = {1, 0, 0},
1611
1612 .make_rq = rrpc_make_rq,
1613 .capacity = rrpc_capacity,
1614
1615 .init = rrpc_init,
1616 .exit = rrpc_exit,
1617 };
1618
1619 static int __init rrpc_module_init(void)
1620 {
1621 return nvm_register_tgt_type(&tt_rrpc);
1622 }
1623
1624 static void rrpc_module_exit(void)
1625 {
1626 nvm_unregister_tgt_type(&tt_rrpc);
1627 }
1628
1629 module_init(rrpc_module_init);
1630 module_exit(rrpc_module_exit);
1631 MODULE_LICENSE("GPL v2");
1632 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");