2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a general nvm manager for Open-Channel SSDs.
23 static struct nvm_target
*gen_find_target(struct gen_dev
*gn
, const char *name
)
25 struct nvm_target
*tgt
;
27 list_for_each_entry(tgt
, &gn
->targets
, list
)
28 if (!strcmp(name
, tgt
->disk
->disk_name
))
34 static const struct block_device_operations gen_fops
= {
38 static int gen_reserve_luns(struct nvm_dev
*dev
, struct nvm_target
*t
,
39 int lun_begin
, int lun_end
)
41 struct gen_dev
*gn
= dev
->mp
;
45 for (i
= lun_begin
; i
<= lun_end
; i
++) {
46 if (test_and_set_bit(i
, dev
->lun_map
)) {
47 pr_err("nvm: lun %d already allocated\n", i
);
52 list_add_tail(&lun
->list
, &t
->lun_list
);
58 while (--i
> lun_begin
) {
60 clear_bit(i
, dev
->lun_map
);
67 static void gen_release_luns(struct nvm_dev
*dev
, struct nvm_target
*t
)
69 struct nvm_lun
*lun
, *tmp
;
71 list_for_each_entry_safe(lun
, tmp
, &t
->lun_list
, list
) {
72 WARN_ON(!test_and_clear_bit(lun
->id
, dev
->lun_map
));
77 static void gen_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
)
82 static struct nvm_tgt_dev
*gen_create_tgt_dev(struct nvm_dev
*dev
,
83 int lun_begin
, int lun_end
)
85 struct nvm_tgt_dev
*tgt_dev
= NULL
;
86 int nr_luns
= lun_end
- lun_begin
+ 1;
88 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
92 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
93 tgt_dev
->geo
.nr_chnls
= (nr_luns
/ (dev
->geo
.luns_per_chnl
+ 1)) + 1;
94 tgt_dev
->geo
.nr_luns
= nr_luns
;
95 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
97 tgt_dev
->ops
= dev
->ops
;
98 tgt_dev
->mt
= dev
->mt
;
99 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
101 tgt_dev
->parent
= dev
;
107 static int gen_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
109 struct gen_dev
*gn
= dev
->mp
;
110 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
111 struct request_queue
*tqueue
;
112 struct gendisk
*tdisk
;
113 struct nvm_tgt_type
*tt
;
114 struct nvm_target
*t
;
115 struct nvm_tgt_dev
*tgt_dev
;
118 tt
= nvm_find_target_type(create
->tgttype
, 1);
120 pr_err("nvm: target type %s not found\n", create
->tgttype
);
124 mutex_lock(&gn
->lock
);
125 t
= gen_find_target(gn
, create
->tgtname
);
127 pr_err("nvm: target name already exists.\n");
128 mutex_unlock(&gn
->lock
);
131 mutex_unlock(&gn
->lock
);
133 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
137 INIT_LIST_HEAD(&t
->lun_list
);
139 if (gen_reserve_luns(dev
, t
, s
->lun_begin
, s
->lun_end
))
142 tgt_dev
= gen_create_tgt_dev(dev
, s
->lun_begin
, s
->lun_end
);
146 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
149 blk_queue_make_request(tqueue
, tt
->make_rq
);
151 tdisk
= alloc_disk(0);
155 sprintf(tdisk
->disk_name
, "%s", create
->tgtname
);
156 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
158 tdisk
->first_minor
= 0;
159 tdisk
->fops
= &gen_fops
;
160 tdisk
->queue
= tqueue
;
162 targetdata
= tt
->init(tgt_dev
, tdisk
, &t
->lun_list
);
163 if (IS_ERR(targetdata
))
166 tdisk
->private_data
= targetdata
;
167 tqueue
->queuedata
= targetdata
;
169 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
171 set_capacity(tdisk
, tt
->capacity(targetdata
));
178 mutex_lock(&gn
->lock
);
179 list_add_tail(&t
->list
, &gn
->targets
);
180 mutex_unlock(&gn
->lock
);
186 blk_cleanup_queue(tqueue
);
190 gen_release_luns(dev
, t
);
196 static void __gen_remove_target(struct nvm_target
*t
)
198 struct nvm_tgt_type
*tt
= t
->type
;
199 struct gendisk
*tdisk
= t
->disk
;
200 struct request_queue
*q
= tdisk
->queue
;
203 blk_cleanup_queue(q
);
206 tt
->exit(tdisk
->private_data
);
208 gen_release_luns(t
->dev
->parent
, t
);
209 gen_remove_tgt_dev(t
->dev
);
217 * gen_remove_tgt - Removes a target from the media manager
219 * @remove: ioctl structure with target name to remove.
226 static int gen_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
228 struct gen_dev
*gn
= dev
->mp
;
229 struct nvm_target
*t
;
234 mutex_lock(&gn
->lock
);
235 t
= gen_find_target(gn
, remove
->tgtname
);
237 mutex_unlock(&gn
->lock
);
240 __gen_remove_target(t
);
241 mutex_unlock(&gn
->lock
);
246 static int gen_get_area(struct nvm_dev
*dev
, sector_t
*lba
, sector_t len
)
248 struct nvm_geo
*geo
= &dev
->geo
;
249 struct gen_dev
*gn
= dev
->mp
;
250 struct gen_area
*area
, *prev
, *next
;
252 sector_t max_sectors
= (geo
->sec_size
* dev
->total_secs
) >> 9;
254 if (len
> max_sectors
)
257 area
= kmalloc(sizeof(struct gen_area
), GFP_KERNEL
);
263 spin_lock(&dev
->lock
);
264 list_for_each_entry(next
, &gn
->area_list
, list
) {
265 if (begin
+ len
> next
->begin
) {
273 if ((begin
+ len
) > max_sectors
) {
274 spin_unlock(&dev
->lock
);
279 area
->begin
= *lba
= begin
;
280 area
->end
= begin
+ len
;
282 if (prev
) /* insert into sorted order */
283 list_add(&area
->list
, &prev
->list
);
285 list_add(&area
->list
, &gn
->area_list
);
286 spin_unlock(&dev
->lock
);
291 static void gen_put_area(struct nvm_dev
*dev
, sector_t begin
)
293 struct gen_dev
*gn
= dev
->mp
;
294 struct gen_area
*area
;
296 spin_lock(&dev
->lock
);
297 list_for_each_entry(area
, &gn
->area_list
, list
) {
298 if (area
->begin
!= begin
)
301 list_del(&area
->list
);
302 spin_unlock(&dev
->lock
);
306 spin_unlock(&dev
->lock
);
309 static void gen_blocks_free(struct nvm_dev
*dev
)
311 struct gen_dev
*gn
= dev
->mp
;
315 gen_for_each_lun(gn
, lun
, i
) {
322 static void gen_luns_free(struct nvm_dev
*dev
)
324 struct gen_dev
*gn
= dev
->mp
;
329 static int gen_luns_init(struct nvm_dev
*dev
, struct gen_dev
*gn
)
331 struct nvm_geo
*geo
= &dev
->geo
;
335 gn
->luns
= kcalloc(geo
->nr_luns
, sizeof(struct nvm_lun
), GFP_KERNEL
);
339 gen_for_each_lun(gn
, lun
, i
) {
340 INIT_LIST_HEAD(&lun
->free_list
);
341 INIT_LIST_HEAD(&lun
->used_list
);
342 INIT_LIST_HEAD(&lun
->bb_list
);
343 INIT_LIST_HEAD(&lun
->list
);
345 spin_lock_init(&lun
->lock
);
348 lun
->lun_id
= i
% geo
->luns_per_chnl
;
349 lun
->chnl_id
= i
/ geo
->luns_per_chnl
;
350 lun
->nr_free_blocks
= geo
->blks_per_lun
;
355 static int gen_block_bb(struct gen_dev
*gn
, struct ppa_addr ppa
,
356 u8
*blks
, int nr_blks
)
358 struct nvm_dev
*dev
= gn
->dev
;
360 struct nvm_block
*blk
;
363 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
367 lun
= &gn
->luns
[(dev
->geo
.luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
369 for (i
= 0; i
< nr_blks
; i
++) {
370 if (blks
[i
] == NVM_BLK_T_FREE
)
373 blk
= &lun
->blocks
[i
];
374 list_move_tail(&blk
->list
, &lun
->bb_list
);
375 blk
->state
= NVM_BLK_ST_BAD
;
376 lun
->nr_free_blocks
--;
382 static int gen_block_map(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
384 struct nvm_dev
*dev
= private;
385 struct nvm_geo
*geo
= &dev
->geo
;
386 struct gen_dev
*gn
= dev
->mp
;
387 u64 elba
= slba
+ nlb
;
389 struct nvm_block
*blk
;
393 if (unlikely(elba
> dev
->total_secs
)) {
394 pr_err("gen: L2P data from device is out of bounds!\n");
398 for (i
= 0; i
< nlb
; i
++) {
399 u64 pba
= le64_to_cpu(entries
[i
]);
401 if (unlikely(pba
>= dev
->total_secs
&& pba
!= U64_MAX
)) {
402 pr_err("gen: L2P data entry is out of bounds!\n");
406 /* Address zero is a special one. The first page on a disk is
407 * protected. It often holds internal device boot
413 /* resolve block from physical address */
414 lun_id
= div_u64(pba
, geo
->sec_per_lun
);
415 lun
= &gn
->luns
[lun_id
];
417 /* Calculate block offset into lun */
418 pba
= pba
- (geo
->sec_per_lun
* lun_id
);
419 blk
= &lun
->blocks
[div_u64(pba
, geo
->sec_per_blk
)];
422 /* at this point, we don't know anything about the
423 * block. It's up to the FTL on top to re-etablish the
424 * block state. The block is assumed to be open.
426 list_move_tail(&blk
->list
, &lun
->used_list
);
427 blk
->state
= NVM_BLK_ST_TGT
;
428 lun
->nr_free_blocks
--;
435 static int gen_blocks_init(struct nvm_dev
*dev
, struct gen_dev
*gn
)
437 struct nvm_geo
*geo
= &dev
->geo
;
439 struct nvm_block
*block
;
440 sector_t lun_iter
, blk_iter
, cur_block_id
= 0;
444 nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
445 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
449 gen_for_each_lun(gn
, lun
, lun_iter
) {
450 lun
->blocks
= vzalloc(sizeof(struct nvm_block
) *
457 for (blk_iter
= 0; blk_iter
< geo
->blks_per_lun
; blk_iter
++) {
458 block
= &lun
->blocks
[blk_iter
];
460 INIT_LIST_HEAD(&block
->list
);
463 block
->id
= cur_block_id
++;
465 list_add_tail(&block
->list
, &lun
->free_list
);
468 if (dev
->ops
->get_bb_tbl
) {
472 ppa
.g
.ch
= lun
->chnl_id
;
473 ppa
.g
.lun
= lun
->lun_id
;
475 ret
= nvm_get_bb_tbl(dev
, ppa
, blks
);
477 pr_err("gen: could not get BB table\n");
479 ret
= gen_block_bb(gn
, ppa
, blks
, nr_blks
);
481 pr_err("gen: BB table map failed\n");
485 if ((dev
->identity
.dom
& NVM_RSP_L2P
) && dev
->ops
->get_l2p_tbl
) {
486 ret
= dev
->ops
->get_l2p_tbl(dev
, 0, dev
->total_secs
,
489 pr_err("gen: could not read L2P table.\n");
490 pr_warn("gen: default block initialization");
498 static void gen_free(struct nvm_dev
*dev
)
500 gen_blocks_free(dev
);
506 static int gen_register(struct nvm_dev
*dev
)
511 if (!try_module_get(THIS_MODULE
))
514 gn
= kzalloc(sizeof(struct gen_dev
), GFP_KERNEL
);
519 gn
->nr_luns
= dev
->geo
.nr_luns
;
520 INIT_LIST_HEAD(&gn
->area_list
);
521 mutex_init(&gn
->lock
);
522 INIT_LIST_HEAD(&gn
->targets
);
525 ret
= gen_luns_init(dev
, gn
);
527 pr_err("gen: could not initialize luns\n");
531 ret
= gen_blocks_init(dev
, gn
);
533 pr_err("gen: could not initialize blocks\n");
540 module_put(THIS_MODULE
);
544 static void gen_unregister(struct nvm_dev
*dev
)
546 struct gen_dev
*gn
= dev
->mp
;
547 struct nvm_target
*t
, *tmp
;
549 mutex_lock(&gn
->lock
);
550 list_for_each_entry_safe(t
, tmp
, &gn
->targets
, list
) {
551 if (t
->dev
->parent
!= dev
)
553 __gen_remove_target(t
);
555 mutex_unlock(&gn
->lock
);
558 module_put(THIS_MODULE
);
561 static void gen_mark_blk(struct nvm_dev
*dev
, struct ppa_addr ppa
, int type
)
563 struct nvm_geo
*geo
= &dev
->geo
;
564 struct gen_dev
*gn
= dev
->mp
;
566 struct nvm_block
*blk
;
568 pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
569 ppa
.g
.ch
, ppa
.g
.lun
, ppa
.g
.blk
, ppa
.g
.pg
, type
);
571 if (unlikely(ppa
.g
.ch
> geo
->nr_chnls
||
572 ppa
.g
.lun
> geo
->luns_per_chnl
||
573 ppa
.g
.blk
> geo
->blks_per_lun
)) {
575 pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
576 ppa
.g
.ch
, geo
->nr_chnls
,
577 ppa
.g
.lun
, geo
->luns_per_chnl
,
578 ppa
.g
.blk
, geo
->blks_per_lun
);
582 lun
= &gn
->luns
[(geo
->luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
583 blk
= &lun
->blocks
[ppa
.g
.blk
];
585 /* will be moved to bb list on put_blk from target */
589 static void gen_end_io(struct nvm_rq
*rqd
)
591 struct nvm_tgt_instance
*ins
= rqd
->ins
;
593 ins
->tt
->end_io(rqd
);
596 static int gen_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
598 if (!dev
->ops
->submit_io
)
601 /* Convert address space */
602 nvm_generic_to_addr_mode(dev
, rqd
);
605 rqd
->end_io
= gen_end_io
;
606 return dev
->ops
->submit_io(dev
, rqd
);
609 static int gen_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
, int flags
)
611 struct ppa_addr addr
= block_to_ppa(dev
, blk
);
613 return nvm_erase_ppa(dev
, &addr
, 1, flags
);
616 static void gen_lun_info_print(struct nvm_dev
*dev
)
618 struct gen_dev
*gn
= dev
->mp
;
623 gen_for_each_lun(gn
, lun
, i
) {
624 spin_lock(&lun
->lock
);
626 pr_info("%s: lun%8u\t%u\n", dev
->name
, i
,
627 lun
->nr_free_blocks
);
629 spin_unlock(&lun
->lock
);
633 static struct nvmm_type gen
= {
635 .version
= {0, 1, 0},
637 .register_mgr
= gen_register
,
638 .unregister_mgr
= gen_unregister
,
640 .create_tgt
= gen_create_tgt
,
641 .remove_tgt
= gen_remove_tgt
,
643 .submit_io
= gen_submit_io
,
644 .erase_blk
= gen_erase_blk
,
646 .mark_blk
= gen_mark_blk
,
648 .lun_info_print
= gen_lun_info_print
,
650 .get_area
= gen_get_area
,
651 .put_area
= gen_put_area
,
655 static int __init
gen_module_init(void)
657 return nvm_register_mgr(&gen
);
660 static void gen_module_exit(void)
662 nvm_unregister_mgr(&gen
);
665 module_init(gen_module_init
);
666 module_exit(gen_module_exit
);
667 MODULE_LICENSE("GPL v2");
668 MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");