2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
19 static struct kmem_cache
*rrpc_gcb_cache
, *rrpc_rq_cache
;
20 static DECLARE_RWSEM(rrpc_lock
);
22 static int rrpc_submit_io(struct rrpc
*rrpc
, struct bio
*bio
,
23 struct nvm_rq
*rqd
, unsigned long flags
);
25 #define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc
*rrpc
, struct rrpc_addr
*a
)
31 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
32 struct rrpc_block
*rblk
= a
->rblk
;
33 unsigned int pg_offset
;
35 lockdep_assert_held(&rrpc
->rev_lock
);
37 if (a
->addr
== ADDR_EMPTY
|| !rblk
)
40 spin_lock(&rblk
->lock
);
42 div_u64_rem(a
->addr
, dev
->geo
.sec_per_blk
, &pg_offset
);
43 WARN_ON(test_and_set_bit(pg_offset
, rblk
->invalid_pages
));
44 rblk
->nr_invalid_pages
++;
46 spin_unlock(&rblk
->lock
);
48 rrpc
->rev_trans_map
[a
->addr
].addr
= ADDR_EMPTY
;
51 static void rrpc_invalidate_range(struct rrpc
*rrpc
, sector_t slba
,
56 spin_lock(&rrpc
->rev_lock
);
57 for (i
= slba
; i
< slba
+ len
; i
++) {
58 struct rrpc_addr
*gp
= &rrpc
->trans_map
[i
];
60 rrpc_page_invalidate(rrpc
, gp
);
63 spin_unlock(&rrpc
->rev_lock
);
66 static struct nvm_rq
*rrpc_inflight_laddr_acquire(struct rrpc
*rrpc
,
67 sector_t laddr
, unsigned int pages
)
70 struct rrpc_inflight_rq
*inf
;
72 rqd
= mempool_alloc(rrpc
->rq_pool
, GFP_ATOMIC
);
74 return ERR_PTR(-ENOMEM
);
76 inf
= rrpc_get_inflight_rq(rqd
);
77 if (rrpc_lock_laddr(rrpc
, laddr
, pages
, inf
)) {
78 mempool_free(rqd
, rrpc
->rq_pool
);
85 static void rrpc_inflight_laddr_release(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
87 struct rrpc_inflight_rq
*inf
= rrpc_get_inflight_rq(rqd
);
89 rrpc_unlock_laddr(rrpc
, inf
);
91 mempool_free(rqd
, rrpc
->rq_pool
);
94 static void rrpc_discard(struct rrpc
*rrpc
, struct bio
*bio
)
96 sector_t slba
= bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
97 sector_t len
= bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
101 rqd
= rrpc_inflight_laddr_acquire(rrpc
, slba
, len
);
109 pr_err("rrpc: unable to acquire inflight IO\n");
114 rrpc_invalidate_range(rrpc
, slba
, len
);
115 rrpc_inflight_laddr_release(rrpc
, rqd
);
118 static int block_is_full(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
120 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
122 return (rblk
->next_page
== dev
->geo
.sec_per_blk
);
125 /* Calculate relative addr for the given block, considering instantiated LUNs */
126 static u64
block_to_rel_addr(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
128 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
129 struct rrpc_lun
*rlun
= rblk
->rlun
;
131 return rlun
->id
* dev
->geo
.sec_per_blk
;
134 static struct ppa_addr
rrpc_ppa_to_gaddr(struct nvm_tgt_dev
*dev
,
135 struct rrpc_addr
*gp
)
137 struct rrpc_block
*rblk
= gp
->rblk
;
138 struct rrpc_lun
*rlun
= rblk
->rlun
;
140 struct ppa_addr paddr
;
143 paddr
= rrpc_linear_to_generic_addr(&dev
->geo
, paddr
);
144 paddr
.g
.ch
= rlun
->bppa
.g
.ch
;
145 paddr
.g
.lun
= rlun
->bppa
.g
.lun
;
146 paddr
.g
.blk
= rblk
->id
;
151 /* requires lun->lock taken */
152 static void rrpc_set_lun_cur(struct rrpc_lun
*rlun
, struct rrpc_block
*new_rblk
,
153 struct rrpc_block
**cur_rblk
)
155 struct rrpc
*rrpc
= rlun
->rrpc
;
158 spin_lock(&(*cur_rblk
)->lock
);
159 WARN_ON(!block_is_full(rrpc
, *cur_rblk
));
160 spin_unlock(&(*cur_rblk
)->lock
);
162 *cur_rblk
= new_rblk
;
165 static struct rrpc_block
*__rrpc_get_blk(struct rrpc
*rrpc
,
166 struct rrpc_lun
*rlun
)
168 struct rrpc_block
*rblk
= NULL
;
170 if (list_empty(&rlun
->free_list
))
173 rblk
= list_first_entry(&rlun
->free_list
, struct rrpc_block
, list
);
175 list_move_tail(&rblk
->list
, &rlun
->used_list
);
176 rblk
->state
= NVM_BLK_ST_TGT
;
177 rlun
->nr_free_blocks
--;
183 static struct rrpc_block
*rrpc_get_blk(struct rrpc
*rrpc
, struct rrpc_lun
*rlun
,
186 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
187 struct rrpc_block
*rblk
;
188 int is_gc
= flags
& NVM_IOTYPE_GC
;
190 spin_lock(&rlun
->lock
);
191 if (!is_gc
&& rlun
->nr_free_blocks
< rlun
->reserved_blocks
) {
192 pr_err("nvm: rrpc: cannot give block to non GC request\n");
193 spin_unlock(&rlun
->lock
);
197 rblk
= __rrpc_get_blk(rrpc
, rlun
);
199 pr_err("nvm: rrpc: cannot get new block\n");
200 spin_unlock(&rlun
->lock
);
203 spin_unlock(&rlun
->lock
);
205 bitmap_zero(rblk
->invalid_pages
, dev
->geo
.sec_per_blk
);
207 rblk
->nr_invalid_pages
= 0;
208 atomic_set(&rblk
->data_cmnt_size
, 0);
213 static void rrpc_put_blk(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
215 struct rrpc_lun
*rlun
= rblk
->rlun
;
217 spin_lock(&rlun
->lock
);
218 if (rblk
->state
& NVM_BLK_ST_TGT
) {
219 list_move_tail(&rblk
->list
, &rlun
->free_list
);
220 rlun
->nr_free_blocks
++;
221 rblk
->state
= NVM_BLK_ST_FREE
;
222 } else if (rblk
->state
& NVM_BLK_ST_BAD
) {
223 list_move_tail(&rblk
->list
, &rlun
->bb_list
);
224 rblk
->state
= NVM_BLK_ST_BAD
;
227 pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
228 rlun
->bppa
.g
.ch
, rlun
->bppa
.g
.lun
,
229 rblk
->id
, rblk
->state
);
230 list_move_tail(&rblk
->list
, &rlun
->bb_list
);
232 spin_unlock(&rlun
->lock
);
235 static void rrpc_put_blks(struct rrpc
*rrpc
)
237 struct rrpc_lun
*rlun
;
240 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
241 rlun
= &rrpc
->luns
[i
];
243 rrpc_put_blk(rrpc
, rlun
->cur
);
245 rrpc_put_blk(rrpc
, rlun
->gc_cur
);
249 static struct rrpc_lun
*get_next_lun(struct rrpc
*rrpc
)
251 int next
= atomic_inc_return(&rrpc
->next_lun
);
253 return &rrpc
->luns
[next
% rrpc
->nr_luns
];
256 static void rrpc_gc_kick(struct rrpc
*rrpc
)
258 struct rrpc_lun
*rlun
;
261 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
262 rlun
= &rrpc
->luns
[i
];
263 queue_work(rrpc
->krqd_wq
, &rlun
->ws_gc
);
268 * timed GC every interval.
270 static void rrpc_gc_timer(unsigned long data
)
272 struct rrpc
*rrpc
= (struct rrpc
*)data
;
275 mod_timer(&rrpc
->gc_timer
, jiffies
+ msecs_to_jiffies(10));
278 static void rrpc_end_sync_bio(struct bio
*bio
)
280 struct completion
*waiting
= bio
->bi_private
;
283 pr_err("nvm: gc request failed (%u).\n", bio
->bi_error
);
289 * rrpc_move_valid_pages -- migrate live data off the block
290 * @rrpc: the 'rrpc' structure
291 * @block: the block from which to migrate live pages
294 * GC algorithms may call this function to migrate remaining live
295 * pages off the block prior to erasing it. This function blocks
296 * further execution until the operation is complete.
298 static int rrpc_move_valid_pages(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
300 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
301 struct request_queue
*q
= dev
->q
;
302 struct rrpc_rev_addr
*rev
;
307 int nr_sec_per_blk
= dev
->geo
.sec_per_blk
;
309 DECLARE_COMPLETION_ONSTACK(wait
);
311 if (bitmap_full(rblk
->invalid_pages
, nr_sec_per_blk
))
314 bio
= bio_alloc(GFP_NOIO
, 1);
316 pr_err("nvm: could not alloc bio to gc\n");
320 page
= mempool_alloc(rrpc
->page_pool
, GFP_NOIO
);
326 while ((slot
= find_first_zero_bit(rblk
->invalid_pages
,
327 nr_sec_per_blk
)) < nr_sec_per_blk
) {
330 phys_addr
= rrpc_blk_to_ppa(rrpc
, rblk
) + slot
;
333 spin_lock(&rrpc
->rev_lock
);
334 /* Get logical address from physical to logical table */
335 rev
= &rrpc
->rev_trans_map
[phys_addr
];
336 /* already updated by previous regular write */
337 if (rev
->addr
== ADDR_EMPTY
) {
338 spin_unlock(&rrpc
->rev_lock
);
342 rqd
= rrpc_inflight_laddr_acquire(rrpc
, rev
->addr
, 1);
343 if (IS_ERR_OR_NULL(rqd
)) {
344 spin_unlock(&rrpc
->rev_lock
);
349 spin_unlock(&rrpc
->rev_lock
);
351 /* Perform read to do GC */
352 bio
->bi_iter
.bi_sector
= rrpc_get_sector(rev
->addr
);
353 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
354 bio
->bi_private
= &wait
;
355 bio
->bi_end_io
= rrpc_end_sync_bio
;
357 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
358 bio_add_pc_page(q
, bio
, page
, RRPC_EXPOSED_PAGE_SIZE
, 0);
360 if (rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_GC
)) {
361 pr_err("rrpc: gc read failed.\n");
362 rrpc_inflight_laddr_release(rrpc
, rqd
);
365 wait_for_completion_io(&wait
);
367 rrpc_inflight_laddr_release(rrpc
, rqd
);
372 reinit_completion(&wait
);
374 bio
->bi_iter
.bi_sector
= rrpc_get_sector(rev
->addr
);
375 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
376 bio
->bi_private
= &wait
;
377 bio
->bi_end_io
= rrpc_end_sync_bio
;
379 bio_add_pc_page(q
, bio
, page
, RRPC_EXPOSED_PAGE_SIZE
, 0);
381 /* turn the command around and write the data back to a new
384 if (rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_GC
)) {
385 pr_err("rrpc: gc write failed.\n");
386 rrpc_inflight_laddr_release(rrpc
, rqd
);
389 wait_for_completion_io(&wait
);
391 rrpc_inflight_laddr_release(rrpc
, rqd
);
399 mempool_free(page
, rrpc
->page_pool
);
402 if (!bitmap_full(rblk
->invalid_pages
, nr_sec_per_blk
)) {
403 pr_err("nvm: failed to garbage collect block\n");
410 static void rrpc_block_gc(struct work_struct
*work
)
412 struct rrpc_block_gc
*gcb
= container_of(work
, struct rrpc_block_gc
,
414 struct rrpc
*rrpc
= gcb
->rrpc
;
415 struct rrpc_block
*rblk
= gcb
->rblk
;
416 struct rrpc_lun
*rlun
= rblk
->rlun
;
417 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
420 mempool_free(gcb
, rrpc
->gcb_pool
);
421 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
422 rlun
->bppa
.g
.ch
, rlun
->bppa
.g
.lun
,
425 if (rrpc_move_valid_pages(rrpc
, rblk
))
429 ppa
.g
.ch
= rlun
->bppa
.g
.ch
;
430 ppa
.g
.lun
= rlun
->bppa
.g
.lun
;
431 ppa
.g
.blk
= rblk
->id
;
433 if (nvm_erase_blk(dev
, &ppa
, 0))
436 rrpc_put_blk(rrpc
, rblk
);
441 spin_lock(&rlun
->lock
);
442 list_add_tail(&rblk
->prio
, &rlun
->prio_list
);
443 spin_unlock(&rlun
->lock
);
446 /* the block with highest number of invalid pages, will be in the beginning
449 static struct rrpc_block
*rblk_max_invalid(struct rrpc_block
*ra
,
450 struct rrpc_block
*rb
)
452 if (ra
->nr_invalid_pages
== rb
->nr_invalid_pages
)
455 return (ra
->nr_invalid_pages
< rb
->nr_invalid_pages
) ? rb
: ra
;
458 /* linearly find the block with highest number of invalid pages
461 static struct rrpc_block
*block_prio_find_max(struct rrpc_lun
*rlun
)
463 struct list_head
*prio_list
= &rlun
->prio_list
;
464 struct rrpc_block
*rblk
, *max
;
466 BUG_ON(list_empty(prio_list
));
468 max
= list_first_entry(prio_list
, struct rrpc_block
, prio
);
469 list_for_each_entry(rblk
, prio_list
, prio
)
470 max
= rblk_max_invalid(max
, rblk
);
475 static void rrpc_lun_gc(struct work_struct
*work
)
477 struct rrpc_lun
*rlun
= container_of(work
, struct rrpc_lun
, ws_gc
);
478 struct rrpc
*rrpc
= rlun
->rrpc
;
479 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
480 struct rrpc_block_gc
*gcb
;
481 unsigned int nr_blocks_need
;
483 nr_blocks_need
= dev
->geo
.blks_per_lun
/ GC_LIMIT_INVERSE
;
485 if (nr_blocks_need
< rrpc
->nr_luns
)
486 nr_blocks_need
= rrpc
->nr_luns
;
488 spin_lock(&rlun
->lock
);
489 while (nr_blocks_need
> rlun
->nr_free_blocks
&&
490 !list_empty(&rlun
->prio_list
)) {
491 struct rrpc_block
*rblk
= block_prio_find_max(rlun
);
493 if (!rblk
->nr_invalid_pages
)
496 gcb
= mempool_alloc(rrpc
->gcb_pool
, GFP_ATOMIC
);
500 list_del_init(&rblk
->prio
);
502 WARN_ON(!block_is_full(rrpc
, rblk
));
504 pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
505 rlun
->bppa
.g
.ch
, rlun
->bppa
.g
.lun
,
510 INIT_WORK(&gcb
->ws_gc
, rrpc_block_gc
);
512 queue_work(rrpc
->kgc_wq
, &gcb
->ws_gc
);
516 spin_unlock(&rlun
->lock
);
518 /* TODO: Hint that request queue can be started again */
521 static void rrpc_gc_queue(struct work_struct
*work
)
523 struct rrpc_block_gc
*gcb
= container_of(work
, struct rrpc_block_gc
,
525 struct rrpc
*rrpc
= gcb
->rrpc
;
526 struct rrpc_block
*rblk
= gcb
->rblk
;
527 struct rrpc_lun
*rlun
= rblk
->rlun
;
529 spin_lock(&rlun
->lock
);
530 list_add_tail(&rblk
->prio
, &rlun
->prio_list
);
531 spin_unlock(&rlun
->lock
);
533 mempool_free(gcb
, rrpc
->gcb_pool
);
534 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
535 rlun
->bppa
.g
.ch
, rlun
->bppa
.g
.lun
,
539 static const struct block_device_operations rrpc_fops
= {
540 .owner
= THIS_MODULE
,
543 static struct rrpc_lun
*rrpc_get_lun_rr(struct rrpc
*rrpc
, int is_gc
)
546 struct rrpc_lun
*rlun
, *max_free
;
549 return get_next_lun(rrpc
);
551 /* during GC, we don't care about RR, instead we want to make
552 * sure that we maintain evenness between the block luns.
554 max_free
= &rrpc
->luns
[0];
555 /* prevent GC-ing lun from devouring pages of a lun with
556 * little free blocks. We don't take the lock as we only need an
559 rrpc_for_each_lun(rrpc
, rlun
, i
) {
560 if (rlun
->nr_free_blocks
> max_free
->nr_free_blocks
)
567 static struct rrpc_addr
*rrpc_update_map(struct rrpc
*rrpc
, sector_t laddr
,
568 struct rrpc_block
*rblk
, u64 paddr
)
570 struct rrpc_addr
*gp
;
571 struct rrpc_rev_addr
*rev
;
573 BUG_ON(laddr
>= rrpc
->nr_sects
);
575 gp
= &rrpc
->trans_map
[laddr
];
576 spin_lock(&rrpc
->rev_lock
);
578 rrpc_page_invalidate(rrpc
, gp
);
583 rev
= &rrpc
->rev_trans_map
[gp
->addr
];
585 spin_unlock(&rrpc
->rev_lock
);
590 static u64
rrpc_alloc_addr(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
592 u64 addr
= ADDR_EMPTY
;
594 spin_lock(&rblk
->lock
);
595 if (block_is_full(rrpc
, rblk
))
598 addr
= rblk
->next_page
;
602 spin_unlock(&rblk
->lock
);
606 /* Map logical address to a physical page. The mapping implements a round robin
607 * approach and allocates a page from the next lun available.
609 * Returns rrpc_addr with the physical address and block. Returns NULL if no
610 * blocks in the next rlun are available.
612 static struct ppa_addr
rrpc_map_page(struct rrpc
*rrpc
, sector_t laddr
,
615 struct nvm_tgt_dev
*tgt_dev
= rrpc
->dev
;
616 struct rrpc_lun
*rlun
;
617 struct rrpc_block
*rblk
, **cur_rblk
;
623 ppa
.ppa
= ADDR_EMPTY
;
624 rlun
= rrpc_get_lun_rr(rrpc
, is_gc
);
626 if (!is_gc
&& rlun
->nr_free_blocks
< rrpc
->nr_luns
* 4)
630 * page allocation steps:
631 * 1. Try to allocate new page from current rblk
632 * 2a. If succeed, proceed to map it in and return
633 * 2b. If fail, first try to allocate a new block from media manger,
634 * and then retry step 1. Retry until the normal block pool is
636 * 3. If exhausted, and garbage collector is requesting the block,
637 * go to the reserved block and retry step 1.
638 * In the case that this fails as well, or it is not GC
639 * requesting, report not able to retrieve a block and let the
640 * caller handle further processing.
643 spin_lock(&rlun
->lock
);
644 cur_rblk
= &rlun
->cur
;
647 paddr
= rrpc_alloc_addr(rrpc
, rblk
);
649 if (paddr
!= ADDR_EMPTY
)
652 if (!list_empty(&rlun
->wblk_list
)) {
654 rblk
= list_first_entry(&rlun
->wblk_list
, struct rrpc_block
,
656 rrpc_set_lun_cur(rlun
, rblk
, cur_rblk
);
657 list_del(&rblk
->prio
);
660 spin_unlock(&rlun
->lock
);
662 rblk
= rrpc_get_blk(rrpc
, rlun
, gc_force
);
664 spin_lock(&rlun
->lock
);
665 list_add_tail(&rblk
->prio
, &rlun
->wblk_list
);
667 * another thread might already have added a new block,
668 * Therefore, make sure that one is used, instead of the
674 if (unlikely(is_gc
) && !gc_force
) {
675 /* retry from emergency gc block */
676 cur_rblk
= &rlun
->gc_cur
;
679 spin_lock(&rlun
->lock
);
683 pr_err("rrpc: failed to allocate new block\n");
686 spin_unlock(&rlun
->lock
);
687 p
= rrpc_update_map(rrpc
, laddr
, rblk
, paddr
);
691 /* return global address */
692 return rrpc_ppa_to_gaddr(tgt_dev
, p
);
695 static void rrpc_run_gc(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
697 struct rrpc_block_gc
*gcb
;
699 gcb
= mempool_alloc(rrpc
->gcb_pool
, GFP_ATOMIC
);
701 pr_err("rrpc: unable to queue block for gc.");
708 INIT_WORK(&gcb
->ws_gc
, rrpc_gc_queue
);
709 queue_work(rrpc
->kgc_wq
, &gcb
->ws_gc
);
712 static struct rrpc_lun
*rrpc_ppa_to_lun(struct rrpc
*rrpc
, struct ppa_addr p
)
714 struct rrpc_lun
*rlun
= NULL
;
717 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
718 if (rrpc
->luns
[i
].bppa
.g
.ch
== p
.g
.ch
&&
719 rrpc
->luns
[i
].bppa
.g
.lun
== p
.g
.lun
) {
720 rlun
= &rrpc
->luns
[i
];
728 static void __rrpc_mark_bad_block(struct rrpc
*rrpc
, struct ppa_addr ppa
)
730 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
731 struct rrpc_lun
*rlun
;
732 struct rrpc_block
*rblk
;
734 rlun
= rrpc_ppa_to_lun(rrpc
, ppa
);
735 rblk
= &rlun
->blocks
[ppa
.g
.blk
];
736 rblk
->state
= NVM_BLK_ST_BAD
;
738 nvm_set_tgt_bb_tbl(dev
, &ppa
, 1, NVM_BLK_T_GRWN_BAD
);
741 static void rrpc_mark_bad_block(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
743 void *comp_bits
= &rqd
->ppa_status
;
744 struct ppa_addr ppa
, prev_ppa
;
745 int nr_ppas
= rqd
->nr_ppas
;
748 if (rqd
->nr_ppas
== 1)
749 __rrpc_mark_bad_block(rrpc
, rqd
->ppa_addr
);
751 ppa_set_empty(&prev_ppa
);
753 while ((bit
= find_next_bit(comp_bits
, nr_ppas
, bit
+ 1)) < nr_ppas
) {
754 ppa
= rqd
->ppa_list
[bit
];
755 if (ppa_cmp_blk(ppa
, prev_ppa
))
758 __rrpc_mark_bad_block(rrpc
, ppa
);
762 static void rrpc_end_io_write(struct rrpc
*rrpc
, struct rrpc_rq
*rrqd
,
763 sector_t laddr
, uint8_t npages
)
765 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
767 struct rrpc_block
*rblk
;
770 for (i
= 0; i
< npages
; i
++) {
771 p
= &rrpc
->trans_map
[laddr
+ i
];
774 cmnt_size
= atomic_inc_return(&rblk
->data_cmnt_size
);
775 if (unlikely(cmnt_size
== dev
->geo
.sec_per_blk
))
776 rrpc_run_gc(rrpc
, rblk
);
780 static void rrpc_end_io(struct nvm_rq
*rqd
)
782 struct rrpc
*rrpc
= container_of(rqd
->ins
, struct rrpc
, instance
);
783 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
784 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
785 uint8_t npages
= rqd
->nr_ppas
;
786 sector_t laddr
= rrpc_get_laddr(rqd
->bio
) - npages
;
788 if (bio_data_dir(rqd
->bio
) == WRITE
) {
789 if (rqd
->error
== NVM_RSP_ERR_FAILWRITE
)
790 rrpc_mark_bad_block(rrpc
, rqd
);
792 rrpc_end_io_write(rrpc
, rrqd
, laddr
, npages
);
797 if (rrqd
->flags
& NVM_IOTYPE_GC
)
800 rrpc_unlock_rq(rrpc
, rqd
);
803 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
805 mempool_free(rqd
, rrpc
->rq_pool
);
808 static int rrpc_read_ppalist_rq(struct rrpc
*rrpc
, struct bio
*bio
,
809 struct nvm_rq
*rqd
, unsigned long flags
, int npages
)
811 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
812 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
813 struct rrpc_addr
*gp
;
814 sector_t laddr
= rrpc_get_laddr(bio
);
815 int is_gc
= flags
& NVM_IOTYPE_GC
;
818 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
)) {
819 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
820 return NVM_IO_REQUEUE
;
823 for (i
= 0; i
< npages
; i
++) {
824 /* We assume that mapping occurs at 4KB granularity */
825 BUG_ON(!(laddr
+ i
>= 0 && laddr
+ i
< rrpc
->nr_sects
));
826 gp
= &rrpc
->trans_map
[laddr
+ i
];
829 rqd
->ppa_list
[i
] = rrpc_ppa_to_gaddr(dev
, gp
);
832 rrpc_unlock_laddr(rrpc
, r
);
833 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
,
839 rqd
->opcode
= NVM_OP_HBREAD
;
844 static int rrpc_read_rq(struct rrpc
*rrpc
, struct bio
*bio
, struct nvm_rq
*rqd
,
847 int is_gc
= flags
& NVM_IOTYPE_GC
;
848 sector_t laddr
= rrpc_get_laddr(bio
);
849 struct rrpc_addr
*gp
;
851 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
))
852 return NVM_IO_REQUEUE
;
854 BUG_ON(!(laddr
>= 0 && laddr
< rrpc
->nr_sects
));
855 gp
= &rrpc
->trans_map
[laddr
];
858 rqd
->ppa_addr
= rrpc_ppa_to_gaddr(rrpc
->dev
, gp
);
861 rrpc_unlock_rq(rrpc
, rqd
);
865 rqd
->opcode
= NVM_OP_HBREAD
;
870 static int rrpc_write_ppalist_rq(struct rrpc
*rrpc
, struct bio
*bio
,
871 struct nvm_rq
*rqd
, unsigned long flags
, int npages
)
873 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
874 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
876 sector_t laddr
= rrpc_get_laddr(bio
);
877 int is_gc
= flags
& NVM_IOTYPE_GC
;
880 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
)) {
881 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
882 return NVM_IO_REQUEUE
;
885 for (i
= 0; i
< npages
; i
++) {
886 /* We assume that mapping occurs at 4KB granularity */
887 p
= rrpc_map_page(rrpc
, laddr
+ i
, is_gc
);
888 if (p
.ppa
== ADDR_EMPTY
) {
890 rrpc_unlock_laddr(rrpc
, r
);
891 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
,
894 return NVM_IO_REQUEUE
;
897 rqd
->ppa_list
[i
] = p
;
900 rqd
->opcode
= NVM_OP_HBWRITE
;
905 static int rrpc_write_rq(struct rrpc
*rrpc
, struct bio
*bio
,
906 struct nvm_rq
*rqd
, unsigned long flags
)
909 int is_gc
= flags
& NVM_IOTYPE_GC
;
910 sector_t laddr
= rrpc_get_laddr(bio
);
912 if (!is_gc
&& rrpc_lock_rq(rrpc
, bio
, rqd
))
913 return NVM_IO_REQUEUE
;
915 p
= rrpc_map_page(rrpc
, laddr
, is_gc
);
916 if (p
.ppa
== ADDR_EMPTY
) {
918 rrpc_unlock_rq(rrpc
, rqd
);
920 return NVM_IO_REQUEUE
;
924 rqd
->opcode
= NVM_OP_HBWRITE
;
929 static int rrpc_setup_rq(struct rrpc
*rrpc
, struct bio
*bio
,
930 struct nvm_rq
*rqd
, unsigned long flags
, uint8_t npages
)
932 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
935 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
937 if (!rqd
->ppa_list
) {
938 pr_err("rrpc: not able to allocate ppa list\n");
942 if (bio_op(bio
) == REQ_OP_WRITE
)
943 return rrpc_write_ppalist_rq(rrpc
, bio
, rqd
, flags
,
946 return rrpc_read_ppalist_rq(rrpc
, bio
, rqd
, flags
, npages
);
949 if (bio_op(bio
) == REQ_OP_WRITE
)
950 return rrpc_write_rq(rrpc
, bio
, rqd
, flags
);
952 return rrpc_read_rq(rrpc
, bio
, rqd
, flags
);
955 static int rrpc_submit_io(struct rrpc
*rrpc
, struct bio
*bio
,
956 struct nvm_rq
*rqd
, unsigned long flags
)
958 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
959 struct rrpc_rq
*rrq
= nvm_rq_to_pdu(rqd
);
960 uint8_t nr_pages
= rrpc_get_pages(bio
);
961 int bio_size
= bio_sectors(bio
) << 9;
964 if (bio_size
< dev
->geo
.sec_size
)
966 else if (bio_size
> dev
->geo
.max_rq_size
)
969 err
= rrpc_setup_rq(rrpc
, bio
, rqd
, flags
, nr_pages
);
975 rqd
->ins
= &rrpc
->instance
;
976 rqd
->nr_ppas
= nr_pages
;
979 err
= nvm_submit_io(dev
, rqd
);
981 pr_err("rrpc: I/O submission failed: %d\n", err
);
983 if (!(flags
& NVM_IOTYPE_GC
)) {
984 rrpc_unlock_rq(rrpc
, rqd
);
985 if (rqd
->nr_ppas
> 1)
986 nvm_dev_dma_free(dev
->parent
, rqd
->ppa_list
,
995 static blk_qc_t
rrpc_make_rq(struct request_queue
*q
, struct bio
*bio
)
997 struct rrpc
*rrpc
= q
->queuedata
;
1001 blk_queue_split(q
, &bio
, q
->bio_split
);
1003 if (bio_op(bio
) == REQ_OP_DISCARD
) {
1004 rrpc_discard(rrpc
, bio
);
1005 return BLK_QC_T_NONE
;
1008 rqd
= mempool_alloc(rrpc
->rq_pool
, GFP_KERNEL
);
1010 pr_err_ratelimited("rrpc: not able to queue bio.");
1012 return BLK_QC_T_NONE
;
1014 memset(rqd
, 0, sizeof(struct nvm_rq
));
1016 err
= rrpc_submit_io(rrpc
, bio
, rqd
, NVM_IOTYPE_NONE
);
1019 return BLK_QC_T_NONE
;
1026 case NVM_IO_REQUEUE
:
1027 spin_lock(&rrpc
->bio_lock
);
1028 bio_list_add(&rrpc
->requeue_bios
, bio
);
1029 spin_unlock(&rrpc
->bio_lock
);
1030 queue_work(rrpc
->kgc_wq
, &rrpc
->ws_requeue
);
1034 mempool_free(rqd
, rrpc
->rq_pool
);
1035 return BLK_QC_T_NONE
;
1038 static void rrpc_requeue(struct work_struct
*work
)
1040 struct rrpc
*rrpc
= container_of(work
, struct rrpc
, ws_requeue
);
1041 struct bio_list bios
;
1044 bio_list_init(&bios
);
1046 spin_lock(&rrpc
->bio_lock
);
1047 bio_list_merge(&bios
, &rrpc
->requeue_bios
);
1048 bio_list_init(&rrpc
->requeue_bios
);
1049 spin_unlock(&rrpc
->bio_lock
);
1051 while ((bio
= bio_list_pop(&bios
)))
1052 rrpc_make_rq(rrpc
->disk
->queue
, bio
);
1055 static void rrpc_gc_free(struct rrpc
*rrpc
)
1058 destroy_workqueue(rrpc
->krqd_wq
);
1061 destroy_workqueue(rrpc
->kgc_wq
);
1064 static int rrpc_gc_init(struct rrpc
*rrpc
)
1066 rrpc
->krqd_wq
= alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM
|WQ_UNBOUND
,
1071 rrpc
->kgc_wq
= alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM
, 1);
1075 setup_timer(&rrpc
->gc_timer
, rrpc_gc_timer
, (unsigned long)rrpc
);
1080 static void rrpc_map_free(struct rrpc
*rrpc
)
1082 vfree(rrpc
->rev_trans_map
);
1083 vfree(rrpc
->trans_map
);
1086 static int rrpc_l2p_update(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
1088 struct rrpc
*rrpc
= (struct rrpc
*)private;
1089 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1090 struct rrpc_addr
*addr
= rrpc
->trans_map
+ slba
;
1091 struct rrpc_rev_addr
*raddr
= rrpc
->rev_trans_map
;
1092 struct rrpc_lun
*rlun
;
1093 struct rrpc_block
*rblk
;
1096 for (i
= 0; i
< nlb
; i
++) {
1097 struct ppa_addr gaddr
;
1098 u64 pba
= le64_to_cpu(entries
[i
]);
1101 /* LNVM treats address-spaces as silos, LBA and PBA are
1102 * equally large and zero-indexed.
1104 if (unlikely(pba
>= dev
->total_secs
&& pba
!= U64_MAX
)) {
1105 pr_err("nvm: L2P data entry is out of bounds!\n");
1106 pr_err("nvm: Maybe loaded an old target L2P\n");
1110 /* Address zero is a special one. The first page on a disk is
1111 * protected. As it often holds internal device boot
1117 div_u64_rem(pba
, rrpc
->nr_sects
, &mod
);
1119 gaddr
= rrpc_recov_addr(dev
, pba
);
1120 rlun
= rrpc_ppa_to_lun(rrpc
, gaddr
);
1122 pr_err("rrpc: l2p corruption on lba %llu\n",
1127 rblk
= &rlun
->blocks
[gaddr
.g
.blk
];
1129 /* at this point, we don't know anything about the
1130 * block. It's up to the FTL on top to re-etablish the
1131 * block state. The block is assumed to be open.
1133 list_move_tail(&rblk
->list
, &rlun
->used_list
);
1134 rblk
->state
= NVM_BLK_ST_TGT
;
1135 rlun
->nr_free_blocks
--;
1139 addr
[i
].rblk
= rblk
;
1140 raddr
[mod
].addr
= slba
+ i
;
1146 static int rrpc_map_init(struct rrpc
*rrpc
)
1148 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1152 rrpc
->trans_map
= vzalloc(sizeof(struct rrpc_addr
) * rrpc
->nr_sects
);
1153 if (!rrpc
->trans_map
)
1156 rrpc
->rev_trans_map
= vmalloc(sizeof(struct rrpc_rev_addr
)
1158 if (!rrpc
->rev_trans_map
)
1161 for (i
= 0; i
< rrpc
->nr_sects
; i
++) {
1162 struct rrpc_addr
*p
= &rrpc
->trans_map
[i
];
1163 struct rrpc_rev_addr
*r
= &rrpc
->rev_trans_map
[i
];
1165 p
->addr
= ADDR_EMPTY
;
1166 r
->addr
= ADDR_EMPTY
;
1169 /* Bring up the mapping table from device */
1170 ret
= nvm_get_l2p_tbl(dev
, rrpc
->soffset
, rrpc
->nr_sects
,
1171 rrpc_l2p_update
, rrpc
);
1173 pr_err("nvm: rrpc: could not read L2P table.\n");
1180 /* Minimum pages needed within a lun */
1181 #define PAGE_POOL_SIZE 16
1182 #define ADDR_POOL_SIZE 64
1184 static int rrpc_core_init(struct rrpc
*rrpc
)
1186 down_write(&rrpc_lock
);
1187 if (!rrpc_gcb_cache
) {
1188 rrpc_gcb_cache
= kmem_cache_create("rrpc_gcb",
1189 sizeof(struct rrpc_block_gc
), 0, 0, NULL
);
1190 if (!rrpc_gcb_cache
) {
1191 up_write(&rrpc_lock
);
1195 rrpc_rq_cache
= kmem_cache_create("rrpc_rq",
1196 sizeof(struct nvm_rq
) + sizeof(struct rrpc_rq
),
1198 if (!rrpc_rq_cache
) {
1199 kmem_cache_destroy(rrpc_gcb_cache
);
1200 up_write(&rrpc_lock
);
1204 up_write(&rrpc_lock
);
1206 rrpc
->page_pool
= mempool_create_page_pool(PAGE_POOL_SIZE
, 0);
1207 if (!rrpc
->page_pool
)
1210 rrpc
->gcb_pool
= mempool_create_slab_pool(rrpc
->dev
->geo
.nr_luns
,
1212 if (!rrpc
->gcb_pool
)
1215 rrpc
->rq_pool
= mempool_create_slab_pool(64, rrpc_rq_cache
);
1219 spin_lock_init(&rrpc
->inflights
.lock
);
1220 INIT_LIST_HEAD(&rrpc
->inflights
.reqs
);
1225 static void rrpc_core_free(struct rrpc
*rrpc
)
1227 mempool_destroy(rrpc
->page_pool
);
1228 mempool_destroy(rrpc
->gcb_pool
);
1229 mempool_destroy(rrpc
->rq_pool
);
1232 static void rrpc_luns_free(struct rrpc
*rrpc
)
1234 struct rrpc_lun
*rlun
;
1240 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1241 rlun
= &rrpc
->luns
[i
];
1242 vfree(rlun
->blocks
);
1248 static int rrpc_bb_discovery(struct nvm_tgt_dev
*dev
, struct rrpc_lun
*rlun
)
1250 struct nvm_geo
*geo
= &dev
->geo
;
1251 struct rrpc_block
*rblk
;
1252 struct ppa_addr ppa
;
1258 if (!dev
->parent
->ops
->get_bb_tbl
)
1261 nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
1262 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
1267 ppa
.g
.ch
= rlun
->bppa
.g
.ch
;
1268 ppa
.g
.lun
= rlun
->bppa
.g
.lun
;
1270 ret
= nvm_get_tgt_bb_tbl(dev
, ppa
, blks
);
1272 pr_err("rrpc: could not get BB table\n");
1276 nr_blks
= nvm_bb_tbl_fold(dev
->parent
, blks
, nr_blks
);
1280 for (i
= 0; i
< nr_blks
; i
++) {
1281 if (blks
[i
] == NVM_BLK_T_FREE
)
1284 rblk
= &rlun
->blocks
[i
];
1285 list_move_tail(&rblk
->list
, &rlun
->bb_list
);
1286 rblk
->state
= NVM_BLK_ST_BAD
;
1287 rlun
->nr_free_blocks
--;
1295 static void rrpc_set_lun_ppa(struct rrpc_lun
*rlun
, struct ppa_addr ppa
)
1298 rlun
->bppa
.g
.ch
= ppa
.g
.ch
;
1299 rlun
->bppa
.g
.lun
= ppa
.g
.lun
;
1302 static int rrpc_luns_init(struct rrpc
*rrpc
, struct ppa_addr
*luns
)
1304 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1305 struct nvm_geo
*geo
= &dev
->geo
;
1306 struct rrpc_lun
*rlun
;
1307 int i
, j
, ret
= -EINVAL
;
1309 if (geo
->sec_per_blk
> MAX_INVALID_PAGES_STORAGE
* BITS_PER_LONG
) {
1310 pr_err("rrpc: number of pages per block too high.");
1314 spin_lock_init(&rrpc
->rev_lock
);
1316 rrpc
->luns
= kcalloc(rrpc
->nr_luns
, sizeof(struct rrpc_lun
),
1322 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1323 rlun
= &rrpc
->luns
[i
];
1325 rrpc_set_lun_ppa(rlun
, luns
[i
]);
1326 rlun
->blocks
= vzalloc(sizeof(struct rrpc_block
) *
1328 if (!rlun
->blocks
) {
1333 INIT_LIST_HEAD(&rlun
->free_list
);
1334 INIT_LIST_HEAD(&rlun
->used_list
);
1335 INIT_LIST_HEAD(&rlun
->bb_list
);
1337 for (j
= 0; j
< geo
->blks_per_lun
; j
++) {
1338 struct rrpc_block
*rblk
= &rlun
->blocks
[j
];
1342 rblk
->state
= NVM_BLK_T_FREE
;
1343 INIT_LIST_HEAD(&rblk
->prio
);
1344 INIT_LIST_HEAD(&rblk
->list
);
1345 spin_lock_init(&rblk
->lock
);
1347 list_add_tail(&rblk
->list
, &rlun
->free_list
);
1351 rlun
->nr_free_blocks
= geo
->blks_per_lun
;
1352 rlun
->reserved_blocks
= 2; /* for GC only */
1354 INIT_LIST_HEAD(&rlun
->prio_list
);
1355 INIT_LIST_HEAD(&rlun
->wblk_list
);
1357 INIT_WORK(&rlun
->ws_gc
, rrpc_lun_gc
);
1358 spin_lock_init(&rlun
->lock
);
1360 if (rrpc_bb_discovery(dev
, rlun
))
1370 /* returns 0 on success and stores the beginning address in *begin */
1371 static int rrpc_area_init(struct rrpc
*rrpc
, sector_t
*begin
)
1373 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1374 sector_t size
= rrpc
->nr_sects
* dev
->geo
.sec_size
;
1379 ret
= nvm_get_area(dev
, begin
, size
);
1381 *begin
>>= (ilog2(dev
->geo
.sec_size
) - 9);
1386 static void rrpc_area_free(struct rrpc
*rrpc
)
1388 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1389 sector_t begin
= rrpc
->soffset
<< (ilog2(dev
->geo
.sec_size
) - 9);
1391 nvm_put_area(dev
, begin
);
1394 static void rrpc_free(struct rrpc
*rrpc
)
1397 rrpc_map_free(rrpc
);
1398 rrpc_core_free(rrpc
);
1399 rrpc_luns_free(rrpc
);
1400 rrpc_area_free(rrpc
);
1405 static void rrpc_exit(void *private)
1407 struct rrpc
*rrpc
= private;
1409 del_timer(&rrpc
->gc_timer
);
1411 flush_workqueue(rrpc
->krqd_wq
);
1412 flush_workqueue(rrpc
->kgc_wq
);
1417 static sector_t
rrpc_capacity(void *private)
1419 struct rrpc
*rrpc
= private;
1420 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1421 sector_t reserved
, provisioned
;
1423 /* cur, gc, and two emergency blocks for each lun */
1424 reserved
= rrpc
->nr_luns
* dev
->geo
.sec_per_blk
* 4;
1425 provisioned
= rrpc
->nr_sects
- reserved
;
1427 if (reserved
> rrpc
->nr_sects
) {
1428 pr_err("rrpc: not enough space available to expose storage.\n");
1432 sector_div(provisioned
, 10);
1433 return provisioned
* 9 * NR_PHY_IN_LOG
;
1437 * Looks up the logical address from reverse trans map and check if its valid by
1438 * comparing the logical to physical address with the physical address.
1439 * Returns 0 on free, otherwise 1 if in use
1441 static void rrpc_block_map_update(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
1443 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1445 struct rrpc_addr
*laddr
;
1446 u64 bpaddr
, paddr
, pladdr
;
1448 bpaddr
= block_to_rel_addr(rrpc
, rblk
);
1449 for (offset
= 0; offset
< dev
->geo
.sec_per_blk
; offset
++) {
1450 paddr
= bpaddr
+ offset
;
1452 pladdr
= rrpc
->rev_trans_map
[paddr
].addr
;
1453 if (pladdr
== ADDR_EMPTY
)
1456 laddr
= &rrpc
->trans_map
[pladdr
];
1458 if (paddr
== laddr
->addr
) {
1461 set_bit(offset
, rblk
->invalid_pages
);
1462 rblk
->nr_invalid_pages
++;
1467 static int rrpc_blocks_init(struct rrpc
*rrpc
)
1469 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
1470 struct rrpc_lun
*rlun
;
1471 struct rrpc_block
*rblk
;
1472 int lun_iter
, blk_iter
;
1474 for (lun_iter
= 0; lun_iter
< rrpc
->nr_luns
; lun_iter
++) {
1475 rlun
= &rrpc
->luns
[lun_iter
];
1477 for (blk_iter
= 0; blk_iter
< dev
->geo
.blks_per_lun
;
1479 rblk
= &rlun
->blocks
[blk_iter
];
1480 rrpc_block_map_update(rrpc
, rblk
);
1487 static int rrpc_luns_configure(struct rrpc
*rrpc
)
1489 struct rrpc_lun
*rlun
;
1490 struct rrpc_block
*rblk
;
1493 for (i
= 0; i
< rrpc
->nr_luns
; i
++) {
1494 rlun
= &rrpc
->luns
[i
];
1496 rblk
= rrpc_get_blk(rrpc
, rlun
, 0);
1499 rrpc_set_lun_cur(rlun
, rblk
, &rlun
->cur
);
1501 /* Emergency gc block */
1502 rblk
= rrpc_get_blk(rrpc
, rlun
, 1);
1505 rrpc_set_lun_cur(rlun
, rblk
, &rlun
->gc_cur
);
1510 rrpc_put_blks(rrpc
);
1514 static struct nvm_tgt_type tt_rrpc
;
1516 static void *rrpc_init(struct nvm_tgt_dev
*dev
, struct gendisk
*tdisk
)
1518 struct request_queue
*bqueue
= dev
->q
;
1519 struct request_queue
*tqueue
= tdisk
->queue
;
1520 struct nvm_geo
*geo
= &dev
->geo
;
1525 if (!(dev
->identity
.dom
& NVM_RSP_L2P
)) {
1526 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1528 return ERR_PTR(-EINVAL
);
1531 rrpc
= kzalloc(sizeof(struct rrpc
), GFP_KERNEL
);
1533 return ERR_PTR(-ENOMEM
);
1535 rrpc
->instance
.tt
= &tt_rrpc
;
1539 bio_list_init(&rrpc
->requeue_bios
);
1540 spin_lock_init(&rrpc
->bio_lock
);
1541 INIT_WORK(&rrpc
->ws_requeue
, rrpc_requeue
);
1543 rrpc
->nr_luns
= geo
->nr_luns
;
1544 rrpc
->nr_sects
= (unsigned long long)geo
->sec_per_lun
* rrpc
->nr_luns
;
1546 /* simple round-robin strategy */
1547 atomic_set(&rrpc
->next_lun
, -1);
1549 ret
= rrpc_area_init(rrpc
, &soffset
);
1551 pr_err("nvm: rrpc: could not initialize area\n");
1552 return ERR_PTR(ret
);
1554 rrpc
->soffset
= soffset
;
1556 ret
= rrpc_luns_init(rrpc
, dev
->luns
);
1558 pr_err("nvm: rrpc: could not initialize luns\n");
1562 ret
= rrpc_core_init(rrpc
);
1564 pr_err("nvm: rrpc: could not initialize core\n");
1568 ret
= rrpc_map_init(rrpc
);
1570 pr_err("nvm: rrpc: could not initialize maps\n");
1574 ret
= rrpc_blocks_init(rrpc
);
1576 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1580 ret
= rrpc_luns_configure(rrpc
);
1582 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1586 ret
= rrpc_gc_init(rrpc
);
1588 pr_err("nvm: rrpc: could not initialize gc\n");
1592 /* inherit the size from the underlying device */
1593 blk_queue_logical_block_size(tqueue
, queue_physical_block_size(bqueue
));
1594 blk_queue_max_hw_sectors(tqueue
, queue_max_hw_sectors(bqueue
));
1596 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1597 rrpc
->nr_luns
, (unsigned long long)rrpc
->nr_sects
);
1599 mod_timer(&rrpc
->gc_timer
, jiffies
+ msecs_to_jiffies(10));
1604 return ERR_PTR(ret
);
1607 /* round robin, page-based FTL, and cost-based GC */
1608 static struct nvm_tgt_type tt_rrpc
= {
1610 .version
= {1, 0, 0},
1612 .make_rq
= rrpc_make_rq
,
1613 .capacity
= rrpc_capacity
,
1614 .end_io
= rrpc_end_io
,
1620 static int __init
rrpc_module_init(void)
1622 return nvm_register_tgt_type(&tt_rrpc
);
1625 static void rrpc_module_exit(void)
1627 nvm_unregister_tgt_type(&tt_rrpc
);
1630 module_init(rrpc_module_init
);
1631 module_exit(rrpc_module_exit
);
1632 MODULE_LICENSE("GPL v2");
1633 MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");