2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a general nvm manager for Open-Channel SSDs.
23 static struct nvm_target
*gen_find_target(struct gen_dev
*gn
, const char *name
)
25 struct nvm_target
*tgt
;
27 list_for_each_entry(tgt
, &gn
->targets
, list
)
28 if (!strcmp(name
, tgt
->disk
->disk_name
))
34 static const struct block_device_operations gen_fops
= {
38 static int gen_reserve_luns(struct nvm_dev
*dev
, struct nvm_target
*t
,
39 int lun_begin
, int lun_end
)
43 for (i
= lun_begin
; i
<= lun_end
; i
++) {
44 if (test_and_set_bit(i
, dev
->lun_map
)) {
45 pr_err("nvm: lun %d already allocated\n", i
);
53 while (--i
> lun_begin
)
54 clear_bit(i
, dev
->lun_map
);
59 static void gen_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
64 for (i
= lun_begin
; i
<= lun_end
; i
++)
65 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
68 static void gen_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
)
70 struct nvm_dev
*dev
= tgt_dev
->parent
;
71 struct gen_dev_map
*dev_map
= tgt_dev
->map
;
74 for (i
= 0; i
< dev_map
->nr_chnls
; i
++) {
75 struct gen_ch_map
*ch_map
= &dev_map
->chnls
[i
];
76 int *lun_offs
= ch_map
->lun_offs
;
77 int ch
= i
+ ch_map
->ch_off
;
79 for (j
= 0; j
< ch_map
->nr_luns
; j
++) {
80 int lun
= j
+ lun_offs
[j
];
81 int lunid
= (ch
* dev
->geo
.luns_per_chnl
) + lun
;
83 WARN_ON(!test_and_clear_bit(lunid
, dev
->lun_map
));
86 kfree(ch_map
->lun_offs
);
89 kfree(dev_map
->chnls
);
95 static struct nvm_tgt_dev
*gen_create_tgt_dev(struct nvm_dev
*dev
,
96 int lun_begin
, int lun_end
)
98 struct nvm_tgt_dev
*tgt_dev
= NULL
;
99 struct gen_dev_map
*dev_rmap
= dev
->rmap
;
100 struct gen_dev_map
*dev_map
;
101 struct ppa_addr
*luns
;
102 int nr_luns
= lun_end
- lun_begin
+ 1;
103 int luns_left
= nr_luns
;
104 int nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
105 int nr_chnls_mod
= nr_luns
% dev
->geo
.luns_per_chnl
;
106 int bch
= lun_begin
/ dev
->geo
.luns_per_chnl
;
107 int blun
= lun_begin
% dev
->geo
.luns_per_chnl
;
109 int lun_balanced
= 1;
113 nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
114 nr_chnls
= (nr_chnls_mod
== 0) ? nr_chnls
: nr_chnls
+ 1;
116 dev_map
= kmalloc(sizeof(struct gen_dev_map
), GFP_KERNEL
);
120 dev_map
->chnls
= kcalloc(nr_chnls
, sizeof(struct gen_ch_map
),
125 luns
= kcalloc(nr_luns
, sizeof(struct ppa_addr
), GFP_KERNEL
);
129 prev_nr_luns
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
130 dev
->geo
.luns_per_chnl
: luns_left
;
131 for (i
= 0; i
< nr_chnls
; i
++) {
132 struct gen_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
133 int *lun_roffs
= ch_rmap
->lun_offs
;
134 struct gen_ch_map
*ch_map
= &dev_map
->chnls
[i
];
136 int luns_in_chnl
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
137 dev
->geo
.luns_per_chnl
: luns_left
;
139 if (lun_balanced
&& prev_nr_luns
!= luns_in_chnl
)
142 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
143 ch_map
->nr_luns
= luns_in_chnl
;
145 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
149 for (j
= 0; j
< luns_in_chnl
; j
++) {
151 luns
[lunid
].g
.ch
= i
;
152 luns
[lunid
++].g
.lun
= j
;
155 lun_roffs
[j
+ blun
] = blun
;
158 ch_map
->lun_offs
= lun_offs
;
160 /* when starting a new channel, lun offset is reset */
162 luns_left
-= luns_in_chnl
;
165 dev_map
->nr_chnls
= nr_chnls
;
167 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
171 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
172 /* Target device only owns a portion of the physical device */
173 tgt_dev
->geo
.nr_chnls
= nr_chnls
;
174 tgt_dev
->geo
.nr_luns
= nr_luns
;
175 tgt_dev
->geo
.luns_per_chnl
= (lun_balanced
) ? prev_nr_luns
: -1;
176 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
178 tgt_dev
->map
= dev_map
;
179 tgt_dev
->luns
= luns
;
180 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
182 tgt_dev
->parent
= dev
;
187 kfree(dev_map
->chnls
[i
].lun_offs
);
190 kfree(dev_map
->chnls
);
197 static int gen_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
199 struct gen_dev
*gn
= dev
->mp
;
200 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
201 struct request_queue
*tqueue
;
202 struct gendisk
*tdisk
;
203 struct nvm_tgt_type
*tt
;
204 struct nvm_target
*t
;
205 struct nvm_tgt_dev
*tgt_dev
;
208 tt
= nvm_find_target_type(create
->tgttype
, 1);
210 pr_err("nvm: target type %s not found\n", create
->tgttype
);
214 mutex_lock(&gn
->lock
);
215 t
= gen_find_target(gn
, create
->tgtname
);
217 pr_err("nvm: target name already exists.\n");
218 mutex_unlock(&gn
->lock
);
221 mutex_unlock(&gn
->lock
);
223 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
227 if (gen_reserve_luns(dev
, t
, s
->lun_begin
, s
->lun_end
))
230 tgt_dev
= gen_create_tgt_dev(dev
, s
->lun_begin
, s
->lun_end
);
232 pr_err("nvm: could not create target device\n");
236 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
239 blk_queue_make_request(tqueue
, tt
->make_rq
);
241 tdisk
= alloc_disk(0);
245 sprintf(tdisk
->disk_name
, "%s", create
->tgtname
);
246 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
248 tdisk
->first_minor
= 0;
249 tdisk
->fops
= &gen_fops
;
250 tdisk
->queue
= tqueue
;
252 targetdata
= tt
->init(tgt_dev
, tdisk
);
253 if (IS_ERR(targetdata
))
256 tdisk
->private_data
= targetdata
;
257 tqueue
->queuedata
= targetdata
;
259 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
261 set_capacity(tdisk
, tt
->capacity(targetdata
));
268 mutex_lock(&gn
->lock
);
269 list_add_tail(&t
->list
, &gn
->targets
);
270 mutex_unlock(&gn
->lock
);
276 blk_cleanup_queue(tqueue
);
280 gen_release_luns_err(dev
, s
->lun_begin
, s
->lun_end
);
286 static void __gen_remove_target(struct nvm_target
*t
)
288 struct nvm_tgt_type
*tt
= t
->type
;
289 struct gendisk
*tdisk
= t
->disk
;
290 struct request_queue
*q
= tdisk
->queue
;
293 blk_cleanup_queue(q
);
296 tt
->exit(tdisk
->private_data
);
298 gen_remove_tgt_dev(t
->dev
);
306 * gen_remove_tgt - Removes a target from the media manager
308 * @remove: ioctl structure with target name to remove.
315 static int gen_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
317 struct gen_dev
*gn
= dev
->mp
;
318 struct nvm_target
*t
;
323 mutex_lock(&gn
->lock
);
324 t
= gen_find_target(gn
, remove
->tgtname
);
326 mutex_unlock(&gn
->lock
);
329 __gen_remove_target(t
);
330 mutex_unlock(&gn
->lock
);
335 static int gen_get_area(struct nvm_dev
*dev
, sector_t
*lba
, sector_t len
)
337 struct nvm_geo
*geo
= &dev
->geo
;
338 struct gen_dev
*gn
= dev
->mp
;
339 struct gen_area
*area
, *prev
, *next
;
341 sector_t max_sectors
= (geo
->sec_size
* dev
->total_secs
) >> 9;
343 if (len
> max_sectors
)
346 area
= kmalloc(sizeof(struct gen_area
), GFP_KERNEL
);
352 spin_lock(&dev
->lock
);
353 list_for_each_entry(next
, &gn
->area_list
, list
) {
354 if (begin
+ len
> next
->begin
) {
362 if ((begin
+ len
) > max_sectors
) {
363 spin_unlock(&dev
->lock
);
368 area
->begin
= *lba
= begin
;
369 area
->end
= begin
+ len
;
371 if (prev
) /* insert into sorted order */
372 list_add(&area
->list
, &prev
->list
);
374 list_add(&area
->list
, &gn
->area_list
);
375 spin_unlock(&dev
->lock
);
380 static void gen_put_area(struct nvm_dev
*dev
, sector_t begin
)
382 struct gen_dev
*gn
= dev
->mp
;
383 struct gen_area
*area
;
385 spin_lock(&dev
->lock
);
386 list_for_each_entry(area
, &gn
->area_list
, list
) {
387 if (area
->begin
!= begin
)
390 list_del(&area
->list
);
391 spin_unlock(&dev
->lock
);
395 spin_unlock(&dev
->lock
);
398 static void gen_free(struct nvm_dev
*dev
)
405 static int gen_register(struct nvm_dev
*dev
)
408 struct gen_dev_map
*dev_rmap
;
411 if (!try_module_get(THIS_MODULE
))
414 gn
= kzalloc(sizeof(struct gen_dev
), GFP_KERNEL
);
418 dev_rmap
= kmalloc(sizeof(struct gen_dev_map
), GFP_KERNEL
);
422 dev_rmap
->chnls
= kcalloc(dev
->geo
.nr_chnls
, sizeof(struct gen_ch_map
),
424 if (!dev_rmap
->chnls
)
427 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++) {
428 struct gen_ch_map
*ch_rmap
;
430 int luns_in_chnl
= dev
->geo
.luns_per_chnl
;
432 ch_rmap
= &dev_rmap
->chnls
[i
];
434 ch_rmap
->ch_off
= -1;
435 ch_rmap
->nr_luns
= luns_in_chnl
;
437 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
441 for (j
= 0; j
< luns_in_chnl
; j
++)
444 ch_rmap
->lun_offs
= lun_roffs
;
448 gn
->nr_luns
= dev
->geo
.nr_luns
;
449 INIT_LIST_HEAD(&gn
->area_list
);
450 mutex_init(&gn
->lock
);
451 INIT_LIST_HEAD(&gn
->targets
);
453 dev
->rmap
= dev_rmap
;
458 kfree(dev_rmap
->chnls
[i
].lun_offs
);
464 module_put(THIS_MODULE
);
468 static void gen_unregister(struct nvm_dev
*dev
)
470 struct gen_dev
*gn
= dev
->mp
;
471 struct nvm_target
*t
, *tmp
;
473 mutex_lock(&gn
->lock
);
474 list_for_each_entry_safe(t
, tmp
, &gn
->targets
, list
) {
475 if (t
->dev
->parent
!= dev
)
477 __gen_remove_target(t
);
479 mutex_unlock(&gn
->lock
);
482 module_put(THIS_MODULE
);
485 static int gen_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
487 struct gen_dev_map
*dev_map
= tgt_dev
->map
;
488 struct gen_ch_map
*ch_map
= &dev_map
->chnls
[p
->g
.ch
];
489 int lun_off
= ch_map
->lun_offs
[p
->g
.lun
];
490 struct nvm_dev
*dev
= tgt_dev
->parent
;
491 struct gen_dev_map
*dev_rmap
= dev
->rmap
;
492 struct gen_ch_map
*ch_rmap
;
495 p
->g
.ch
+= ch_map
->ch_off
;
498 ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
499 lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
501 if (unlikely(ch_rmap
->ch_off
< 0 || lun_roff
< 0)) {
502 pr_err("nvm: corrupted device partition table\n");
509 static int gen_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
511 struct nvm_dev
*dev
= tgt_dev
->parent
;
512 struct gen_dev_map
*dev_rmap
= dev
->rmap
;
513 struct gen_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
514 int lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
516 p
->g
.ch
-= ch_rmap
->ch_off
;
517 p
->g
.lun
-= lun_roff
;
522 static int gen_trans_rq(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
529 f
= (flag
== TRANS_TGT_TO_DEV
) ? gen_map_to_dev
: gen_map_to_tgt
;
531 if (rqd
->nr_ppas
== 1)
532 return f(tgt_dev
, &rqd
->ppa_addr
);
534 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
535 ret
= f(tgt_dev
, &rqd
->ppa_list
[i
]);
544 static void gen_end_io(struct nvm_rq
*rqd
)
546 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
547 struct nvm_tgt_instance
*ins
= rqd
->ins
;
549 /* Convert address space */
551 gen_trans_rq(tgt_dev
, rqd
, TRANS_DEV_TO_TGT
);
553 ins
->tt
->end_io(rqd
);
556 static int gen_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
558 struct nvm_dev
*dev
= tgt_dev
->parent
;
560 if (!dev
->ops
->submit_io
)
563 /* Convert address space */
564 gen_trans_rq(tgt_dev
, rqd
, TRANS_TGT_TO_DEV
);
565 nvm_generic_to_addr_mode(dev
, rqd
);
568 rqd
->end_io
= gen_end_io
;
569 return dev
->ops
->submit_io(dev
, rqd
);
572 static int gen_erase_blk(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
,
575 /* Convert address space */
576 gen_map_to_dev(tgt_dev
, p
);
578 return nvm_erase_ppa(tgt_dev
->parent
, p
, 1, flags
);
581 static struct ppa_addr
gen_trans_ppa(struct nvm_tgt_dev
*tgt_dev
,
582 struct ppa_addr p
, int direction
)
585 struct ppa_addr ppa
= p
;
587 f
= (direction
== TRANS_TGT_TO_DEV
) ? gen_map_to_dev
: gen_map_to_tgt
;
593 static void gen_part_to_tgt(struct nvm_dev
*dev
, sector_t
*entries
,
596 struct nvm_geo
*geo
= &dev
->geo
;
597 struct gen_dev_map
*dev_rmap
= dev
->rmap
;
600 for (i
= 0; i
< len
; i
++) {
601 struct gen_ch_map
*ch_rmap
;
603 struct ppa_addr gaddr
;
604 u64 pba
= le64_to_cpu(entries
[i
]);
611 gaddr
= linear_to_generic_addr(geo
, pba
);
612 ch_rmap
= &dev_rmap
->chnls
[gaddr
.g
.ch
];
613 lun_roffs
= ch_rmap
->lun_offs
;
615 off
= gaddr
.g
.ch
* geo
->luns_per_chnl
+ gaddr
.g
.lun
;
617 diff
= ((ch_rmap
->ch_off
* geo
->luns_per_chnl
) +
618 (lun_roffs
[gaddr
.g
.lun
])) * geo
->sec_per_lun
;
620 entries
[i
] -= cpu_to_le64(diff
);
624 static struct nvmm_type gen
= {
626 .version
= {0, 1, 0},
628 .register_mgr
= gen_register
,
629 .unregister_mgr
= gen_unregister
,
631 .create_tgt
= gen_create_tgt
,
632 .remove_tgt
= gen_remove_tgt
,
634 .submit_io
= gen_submit_io
,
635 .erase_blk
= gen_erase_blk
,
637 .get_area
= gen_get_area
,
638 .put_area
= gen_put_area
,
640 .trans_ppa
= gen_trans_ppa
,
641 .part_to_tgt
= gen_part_to_tgt
,
644 static int __init
gen_module_init(void)
646 return nvm_register_mgr(&gen
);
649 static void gen_module_exit(void)
651 nvm_unregister_mgr(&gen
);
654 module_init(gen_module_init
);
655 module_exit(gen_module_exit
);
656 MODULE_LICENSE("GPL v2");
657 MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");