2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
30 static LIST_HEAD(nvm_tgt_types
);
31 static DECLARE_RWSEM(nvm_tgtt_lock
);
32 static LIST_HEAD(nvm_devices
);
33 static DECLARE_RWSEM(nvm_lock
);
35 /* Map between virtual and physical channel and lun */
43 struct nvm_ch_map
*chnls
;
48 struct list_head list
;
50 sector_t end
; /* end is excluded */
53 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
55 struct nvm_target
*tgt
;
57 list_for_each_entry(tgt
, &dev
->targets
, list
)
58 if (!strcmp(name
, tgt
->disk
->disk_name
))
64 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
68 for (i
= lun_begin
; i
<= lun_end
; i
++) {
69 if (test_and_set_bit(i
, dev
->lun_map
)) {
70 pr_err("nvm: lun %d already allocated\n", i
);
77 while (--i
> lun_begin
)
78 clear_bit(i
, dev
->lun_map
);
83 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
88 for (i
= lun_begin
; i
<= lun_end
; i
++)
89 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
92 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
)
94 struct nvm_dev
*dev
= tgt_dev
->parent
;
95 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
98 for (i
= 0; i
< dev_map
->nr_chnls
; i
++) {
99 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
100 int *lun_offs
= ch_map
->lun_offs
;
101 int ch
= i
+ ch_map
->ch_off
;
103 for (j
= 0; j
< ch_map
->nr_luns
; j
++) {
104 int lun
= j
+ lun_offs
[j
];
105 int lunid
= (ch
* dev
->geo
.luns_per_chnl
) + lun
;
107 WARN_ON(!test_and_clear_bit(lunid
, dev
->lun_map
));
110 kfree(ch_map
->lun_offs
);
113 kfree(dev_map
->chnls
);
116 kfree(tgt_dev
->luns
);
120 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
121 int lun_begin
, int lun_end
)
123 struct nvm_tgt_dev
*tgt_dev
= NULL
;
124 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
125 struct nvm_dev_map
*dev_map
;
126 struct ppa_addr
*luns
;
127 int nr_luns
= lun_end
- lun_begin
+ 1;
128 int luns_left
= nr_luns
;
129 int nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
130 int nr_chnls_mod
= nr_luns
% dev
->geo
.luns_per_chnl
;
131 int bch
= lun_begin
/ dev
->geo
.luns_per_chnl
;
132 int blun
= lun_begin
% dev
->geo
.luns_per_chnl
;
134 int lun_balanced
= 1;
138 nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
139 nr_chnls
= (nr_chnls_mod
== 0) ? nr_chnls
: nr_chnls
+ 1;
141 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
145 dev_map
->chnls
= kcalloc(nr_chnls
, sizeof(struct nvm_ch_map
),
150 luns
= kcalloc(nr_luns
, sizeof(struct ppa_addr
), GFP_KERNEL
);
154 prev_nr_luns
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
155 dev
->geo
.luns_per_chnl
: luns_left
;
156 for (i
= 0; i
< nr_chnls
; i
++) {
157 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
158 int *lun_roffs
= ch_rmap
->lun_offs
;
159 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
161 int luns_in_chnl
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
162 dev
->geo
.luns_per_chnl
: luns_left
;
164 if (lun_balanced
&& prev_nr_luns
!= luns_in_chnl
)
167 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
168 ch_map
->nr_luns
= luns_in_chnl
;
170 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
174 for (j
= 0; j
< luns_in_chnl
; j
++) {
176 luns
[lunid
].g
.ch
= i
;
177 luns
[lunid
++].g
.lun
= j
;
180 lun_roffs
[j
+ blun
] = blun
;
183 ch_map
->lun_offs
= lun_offs
;
185 /* when starting a new channel, lun offset is reset */
187 luns_left
-= luns_in_chnl
;
190 dev_map
->nr_chnls
= nr_chnls
;
192 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
196 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
197 /* Target device only owns a portion of the physical device */
198 tgt_dev
->geo
.nr_chnls
= nr_chnls
;
199 tgt_dev
->geo
.nr_luns
= nr_luns
;
200 tgt_dev
->geo
.luns_per_chnl
= (lun_balanced
) ? prev_nr_luns
: -1;
201 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
203 tgt_dev
->map
= dev_map
;
204 tgt_dev
->luns
= luns
;
205 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
207 tgt_dev
->parent
= dev
;
212 kfree(dev_map
->chnls
[i
].lun_offs
);
215 kfree(dev_map
->chnls
);
222 static const struct block_device_operations nvm_fops
= {
223 .owner
= THIS_MODULE
,
226 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
228 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
229 struct request_queue
*tqueue
;
230 struct gendisk
*tdisk
;
231 struct nvm_tgt_type
*tt
;
232 struct nvm_target
*t
;
233 struct nvm_tgt_dev
*tgt_dev
;
236 tt
= nvm_find_target_type(create
->tgttype
, 1);
238 pr_err("nvm: target type %s not found\n", create
->tgttype
);
242 mutex_lock(&dev
->mlock
);
243 t
= nvm_find_target(dev
, create
->tgtname
);
245 pr_err("nvm: target name already exists.\n");
246 mutex_unlock(&dev
->mlock
);
249 mutex_unlock(&dev
->mlock
);
251 if (nvm_reserve_luns(dev
, s
->lun_begin
, s
->lun_end
))
254 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
258 tgt_dev
= nvm_create_tgt_dev(dev
, s
->lun_begin
, s
->lun_end
);
260 pr_err("nvm: could not create target device\n");
264 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
267 blk_queue_make_request(tqueue
, tt
->make_rq
);
269 tdisk
= alloc_disk(0);
273 sprintf(tdisk
->disk_name
, "%s", create
->tgtname
);
274 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
276 tdisk
->first_minor
= 0;
277 tdisk
->fops
= &nvm_fops
;
278 tdisk
->queue
= tqueue
;
280 targetdata
= tt
->init(tgt_dev
, tdisk
);
281 if (IS_ERR(targetdata
))
284 tdisk
->private_data
= targetdata
;
285 tqueue
->queuedata
= targetdata
;
287 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
289 set_capacity(tdisk
, tt
->capacity(targetdata
));
292 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
))
299 mutex_lock(&dev
->mlock
);
300 list_add_tail(&t
->list
, &dev
->targets
);
301 mutex_unlock(&dev
->mlock
);
306 tt
->exit(targetdata
);
310 blk_cleanup_queue(tqueue
);
312 nvm_remove_tgt_dev(tgt_dev
);
316 nvm_release_luns_err(dev
, s
->lun_begin
, s
->lun_end
);
320 static void __nvm_remove_target(struct nvm_target
*t
)
322 struct nvm_tgt_type
*tt
= t
->type
;
323 struct gendisk
*tdisk
= t
->disk
;
324 struct request_queue
*q
= tdisk
->queue
;
327 blk_cleanup_queue(q
);
330 tt
->sysfs_exit(tdisk
);
333 tt
->exit(tdisk
->private_data
);
335 nvm_remove_tgt_dev(t
->dev
);
343 * nvm_remove_tgt - Removes a target from the media manager
345 * @remove: ioctl structure with target name to remove.
352 static int nvm_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
354 struct nvm_target
*t
;
356 mutex_lock(&dev
->mlock
);
357 t
= nvm_find_target(dev
, remove
->tgtname
);
359 mutex_unlock(&dev
->mlock
);
362 __nvm_remove_target(t
);
363 mutex_unlock(&dev
->mlock
);
368 static int nvm_register_map(struct nvm_dev
*dev
)
370 struct nvm_dev_map
*rmap
;
373 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
377 rmap
->chnls
= kcalloc(dev
->geo
.nr_chnls
, sizeof(struct nvm_ch_map
),
382 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++) {
383 struct nvm_ch_map
*ch_rmap
;
385 int luns_in_chnl
= dev
->geo
.luns_per_chnl
;
387 ch_rmap
= &rmap
->chnls
[i
];
389 ch_rmap
->ch_off
= -1;
390 ch_rmap
->nr_luns
= luns_in_chnl
;
392 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
396 for (j
= 0; j
< luns_in_chnl
; j
++)
399 ch_rmap
->lun_offs
= lun_roffs
;
407 kfree(rmap
->chnls
[i
].lun_offs
);
414 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
416 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
417 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->g
.ch
];
418 int lun_off
= ch_map
->lun_offs
[p
->g
.lun
];
420 p
->g
.ch
+= ch_map
->ch_off
;
424 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
426 struct nvm_dev
*dev
= tgt_dev
->parent
;
427 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
428 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
429 int lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
431 p
->g
.ch
-= ch_rmap
->ch_off
;
432 p
->g
.lun
-= lun_roff
;
435 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
436 struct ppa_addr
*ppa_list
, int nr_ppas
)
440 for (i
= 0; i
< nr_ppas
; i
++) {
441 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
442 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
, ppa_list
[i
]);
446 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
447 struct ppa_addr
*ppa_list
, int nr_ppas
)
451 for (i
= 0; i
< nr_ppas
; i
++) {
452 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
, ppa_list
[i
]);
453 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
457 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
459 if (rqd
->nr_ppas
== 1) {
460 nvm_ppa_tgt_to_dev(tgt_dev
, &rqd
->ppa_addr
, 1);
464 nvm_ppa_tgt_to_dev(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
467 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
469 if (rqd
->nr_ppas
== 1) {
470 nvm_ppa_dev_to_tgt(tgt_dev
, &rqd
->ppa_addr
, 1);
474 nvm_ppa_dev_to_tgt(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
477 void nvm_part_to_tgt(struct nvm_dev
*dev
, sector_t
*entries
,
480 struct nvm_geo
*geo
= &dev
->geo
;
481 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
484 for (i
= 0; i
< len
; i
++) {
485 struct nvm_ch_map
*ch_rmap
;
487 struct ppa_addr gaddr
;
488 u64 pba
= le64_to_cpu(entries
[i
]);
495 gaddr
= linear_to_generic_addr(geo
, pba
);
496 ch_rmap
= &dev_rmap
->chnls
[gaddr
.g
.ch
];
497 lun_roffs
= ch_rmap
->lun_offs
;
499 off
= gaddr
.g
.ch
* geo
->luns_per_chnl
+ gaddr
.g
.lun
;
501 diff
= ((ch_rmap
->ch_off
* geo
->luns_per_chnl
) +
502 (lun_roffs
[gaddr
.g
.lun
])) * geo
->sec_per_lun
;
504 entries
[i
] -= cpu_to_le64(diff
);
507 EXPORT_SYMBOL(nvm_part_to_tgt
);
509 struct nvm_tgt_type
*nvm_find_target_type(const char *name
, int lock
)
511 struct nvm_tgt_type
*tmp
, *tt
= NULL
;
514 down_write(&nvm_tgtt_lock
);
516 list_for_each_entry(tmp
, &nvm_tgt_types
, list
)
517 if (!strcmp(name
, tmp
->name
)) {
523 up_write(&nvm_tgtt_lock
);
526 EXPORT_SYMBOL(nvm_find_target_type
);
528 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
532 down_write(&nvm_tgtt_lock
);
533 if (nvm_find_target_type(tt
->name
, 0))
536 list_add(&tt
->list
, &nvm_tgt_types
);
537 up_write(&nvm_tgtt_lock
);
541 EXPORT_SYMBOL(nvm_register_tgt_type
);
543 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
548 down_write(&nvm_lock
);
552 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
554 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
555 dma_addr_t
*dma_handler
)
557 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
560 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
562 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
564 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
566 EXPORT_SYMBOL(nvm_dev_dma_free
);
568 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
572 list_for_each_entry(dev
, &nvm_devices
, devices
)
573 if (!strcmp(name
, dev
->name
))
579 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
580 int nr_ppas
, int type
)
582 struct nvm_dev
*dev
= tgt_dev
->parent
;
586 if (nr_ppas
> dev
->ops
->max_phys_sect
) {
587 pr_err("nvm: unable to update all blocks atomically\n");
591 memset(&rqd
, 0, sizeof(struct nvm_rq
));
593 nvm_set_rqd_ppalist(dev
, &rqd
, ppas
, nr_ppas
, 1);
594 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
596 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
597 nvm_free_rqd_ppalist(dev
, &rqd
);
599 pr_err("nvm: failed bb mark\n");
605 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl
);
607 int nvm_max_phys_sects(struct nvm_tgt_dev
*tgt_dev
)
609 struct nvm_dev
*dev
= tgt_dev
->parent
;
611 return dev
->ops
->max_phys_sect
;
613 EXPORT_SYMBOL(nvm_max_phys_sects
);
615 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
617 struct nvm_dev
*dev
= tgt_dev
->parent
;
619 if (!dev
->ops
->submit_io
)
622 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
625 return dev
->ops
->submit_io(dev
, rqd
);
627 EXPORT_SYMBOL(nvm_submit_io
);
629 int nvm_erase_blk(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
, int flags
)
631 struct nvm_dev
*dev
= tgt_dev
->parent
;
635 if (!dev
->ops
->erase_block
)
638 nvm_map_to_dev(tgt_dev
, ppas
);
640 memset(&rqd
, 0, sizeof(struct nvm_rq
));
642 ret
= nvm_set_rqd_ppalist(dev
, &rqd
, ppas
, 1, 1);
646 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
650 ret
= dev
->ops
->erase_block(dev
, &rqd
);
652 nvm_free_rqd_ppalist(dev
, &rqd
);
656 EXPORT_SYMBOL(nvm_erase_blk
);
658 int nvm_get_l2p_tbl(struct nvm_tgt_dev
*tgt_dev
, u64 slba
, u32 nlb
,
659 nvm_l2p_update_fn
*update_l2p
, void *priv
)
661 struct nvm_dev
*dev
= tgt_dev
->parent
;
663 if (!dev
->ops
->get_l2p_tbl
)
666 return dev
->ops
->get_l2p_tbl(dev
, slba
, nlb
, update_l2p
, priv
);
668 EXPORT_SYMBOL(nvm_get_l2p_tbl
);
670 int nvm_get_area(struct nvm_tgt_dev
*tgt_dev
, sector_t
*lba
, sector_t len
)
672 struct nvm_dev
*dev
= tgt_dev
->parent
;
673 struct nvm_geo
*geo
= &dev
->geo
;
674 struct nvm_area
*area
, *prev
, *next
;
676 sector_t max_sectors
= (geo
->sec_size
* dev
->total_secs
) >> 9;
678 if (len
> max_sectors
)
681 area
= kmalloc(sizeof(struct nvm_area
), GFP_KERNEL
);
687 spin_lock(&dev
->lock
);
688 list_for_each_entry(next
, &dev
->area_list
, list
) {
689 if (begin
+ len
> next
->begin
) {
697 if ((begin
+ len
) > max_sectors
) {
698 spin_unlock(&dev
->lock
);
703 area
->begin
= *lba
= begin
;
704 area
->end
= begin
+ len
;
706 if (prev
) /* insert into sorted order */
707 list_add(&area
->list
, &prev
->list
);
709 list_add(&area
->list
, &dev
->area_list
);
710 spin_unlock(&dev
->lock
);
714 EXPORT_SYMBOL(nvm_get_area
);
716 void nvm_put_area(struct nvm_tgt_dev
*tgt_dev
, sector_t begin
)
718 struct nvm_dev
*dev
= tgt_dev
->parent
;
719 struct nvm_area
*area
;
721 spin_lock(&dev
->lock
);
722 list_for_each_entry(area
, &dev
->area_list
, list
) {
723 if (area
->begin
!= begin
)
726 list_del(&area
->list
);
727 spin_unlock(&dev
->lock
);
731 spin_unlock(&dev
->lock
);
733 EXPORT_SYMBOL(nvm_put_area
);
735 int nvm_set_rqd_ppalist(struct nvm_dev
*dev
, struct nvm_rq
*rqd
,
736 const struct ppa_addr
*ppas
, int nr_ppas
, int vblk
)
738 struct nvm_geo
*geo
= &dev
->geo
;
739 int i
, plane_cnt
, pl_idx
;
742 if ((!vblk
|| geo
->plane_mode
== NVM_PLANE_SINGLE
) && nr_ppas
== 1) {
743 rqd
->nr_ppas
= nr_ppas
;
744 rqd
->ppa_addr
= ppas
[0];
749 rqd
->nr_ppas
= nr_ppas
;
750 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
751 if (!rqd
->ppa_list
) {
752 pr_err("nvm: failed to allocate dma memory\n");
757 for (i
= 0; i
< nr_ppas
; i
++)
758 rqd
->ppa_list
[i
] = ppas
[i
];
760 plane_cnt
= geo
->plane_mode
;
761 rqd
->nr_ppas
*= plane_cnt
;
763 for (i
= 0; i
< nr_ppas
; i
++) {
764 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
767 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
774 EXPORT_SYMBOL(nvm_set_rqd_ppalist
);
776 void nvm_free_rqd_ppalist(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
781 nvm_dev_dma_free(dev
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
783 EXPORT_SYMBOL(nvm_free_rqd_ppalist
);
785 void nvm_end_io(struct nvm_rq
*rqd
)
787 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
789 /* Convert address space */
791 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
796 EXPORT_SYMBOL(nvm_end_io
);
799 * folds a bad block list from its plane representation to its virtual
800 * block representation. The fold is done in place and reduced size is
803 * If any of the planes status are bad or grown bad block, the virtual block
804 * is marked bad. If not bad, the first plane state acts as the block state.
806 int nvm_bb_tbl_fold(struct nvm_dev
*dev
, u8
*blks
, int nr_blks
)
808 struct nvm_geo
*geo
= &dev
->geo
;
809 int blk
, offset
, pl
, blktype
;
811 if (nr_blks
!= geo
->blks_per_lun
* geo
->plane_mode
)
814 for (blk
= 0; blk
< geo
->blks_per_lun
; blk
++) {
815 offset
= blk
* geo
->plane_mode
;
816 blktype
= blks
[offset
];
818 /* Bad blocks on any planes take precedence over other types */
819 for (pl
= 0; pl
< geo
->plane_mode
; pl
++) {
820 if (blks
[offset
+ pl
] &
821 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
822 blktype
= blks
[offset
+ pl
];
830 return geo
->blks_per_lun
;
832 EXPORT_SYMBOL(nvm_bb_tbl_fold
);
834 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
837 struct nvm_dev
*dev
= tgt_dev
->parent
;
839 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
841 return dev
->ops
->get_bb_tbl(dev
, ppa
, blks
);
843 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl
);
845 static int nvm_init_slc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
847 struct nvm_geo
*geo
= &dev
->geo
;
850 dev
->lps_per_blk
= geo
->pgs_per_blk
;
851 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
855 /* Just a linear array */
856 for (i
= 0; i
< dev
->lps_per_blk
; i
++)
862 static int nvm_init_mlc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
865 struct nvm_id_lp_mlc
*mlc
= &grp
->lptbl
.mlc
;
870 dev
->lps_per_blk
= mlc
->num_pairs
;
871 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
875 /* The lower page table encoding consists of a list of bytes, where each
876 * has a lower and an upper half. The first half byte maintains the
877 * increment value and every value after is an offset added to the
878 * previous incrementation value
880 dev
->lptbl
[0] = mlc
->pairs
[0] & 0xF;
881 for (i
= 1; i
< dev
->lps_per_blk
; i
++) {
882 p
= mlc
->pairs
[i
>> 1];
883 if (i
& 0x1) /* upper */
884 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + ((p
& 0xF0) >> 4);
886 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + (p
& 0xF);
892 static int nvm_core_init(struct nvm_dev
*dev
)
894 struct nvm_id
*id
= &dev
->identity
;
895 struct nvm_id_group
*grp
= &id
->grp
;
896 struct nvm_geo
*geo
= &dev
->geo
;
899 /* Whole device values */
900 geo
->nr_chnls
= grp
->num_ch
;
901 geo
->luns_per_chnl
= grp
->num_lun
;
903 /* Generic device values */
904 geo
->pgs_per_blk
= grp
->num_pg
;
905 geo
->blks_per_lun
= grp
->num_blk
;
906 geo
->nr_planes
= grp
->num_pln
;
907 geo
->fpg_size
= grp
->fpg_sz
;
908 geo
->pfpg_size
= grp
->fpg_sz
* grp
->num_pln
;
909 geo
->sec_size
= grp
->csecs
;
910 geo
->oob_size
= grp
->sos
;
911 geo
->sec_per_pg
= grp
->fpg_sz
/ grp
->csecs
;
912 geo
->mccap
= grp
->mccap
;
913 memcpy(&geo
->ppaf
, &id
->ppaf
, sizeof(struct nvm_addr_format
));
915 geo
->plane_mode
= NVM_PLANE_SINGLE
;
916 geo
->max_rq_size
= dev
->ops
->max_phys_sect
* geo
->sec_size
;
918 if (grp
->mpos
& 0x020202)
919 geo
->plane_mode
= NVM_PLANE_DOUBLE
;
920 if (grp
->mpos
& 0x040404)
921 geo
->plane_mode
= NVM_PLANE_QUAD
;
923 if (grp
->mtype
!= 0) {
924 pr_err("nvm: memory type not supported\n");
928 /* calculated values */
929 geo
->sec_per_pl
= geo
->sec_per_pg
* geo
->nr_planes
;
930 geo
->sec_per_blk
= geo
->sec_per_pl
* geo
->pgs_per_blk
;
931 geo
->sec_per_lun
= geo
->sec_per_blk
* geo
->blks_per_lun
;
932 geo
->nr_luns
= geo
->luns_per_chnl
* geo
->nr_chnls
;
934 dev
->total_secs
= geo
->nr_luns
* geo
->sec_per_lun
;
935 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->nr_luns
),
936 sizeof(unsigned long), GFP_KERNEL
);
940 switch (grp
->fmtype
) {
941 case NVM_ID_FMTYPE_SLC
:
942 if (nvm_init_slc_tbl(dev
, grp
)) {
947 case NVM_ID_FMTYPE_MLC
:
948 if (nvm_init_mlc_tbl(dev
, grp
)) {
954 pr_err("nvm: flash type not supported\n");
959 INIT_LIST_HEAD(&dev
->area_list
);
960 INIT_LIST_HEAD(&dev
->targets
);
961 mutex_init(&dev
->mlock
);
962 spin_lock_init(&dev
->lock
);
964 ret
= nvm_register_map(dev
);
968 blk_queue_logical_block_size(dev
->q
, geo
->sec_size
);
975 void nvm_free(struct nvm_dev
*dev
)
981 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
989 static int nvm_init(struct nvm_dev
*dev
)
991 struct nvm_geo
*geo
= &dev
->geo
;
994 if (dev
->ops
->identity(dev
, &dev
->identity
)) {
995 pr_err("nvm: device could not be identified\n");
999 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1000 dev
->identity
.ver_id
, dev
->identity
.vmnt
);
1002 if (dev
->identity
.ver_id
!= 1) {
1003 pr_err("nvm: device not supported by kernel.");
1007 ret
= nvm_core_init(dev
);
1009 pr_err("nvm: could not initialize core structures.\n");
1013 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1014 dev
->name
, geo
->sec_per_pg
, geo
->nr_planes
,
1015 geo
->pgs_per_blk
, geo
->blks_per_lun
,
1016 geo
->nr_luns
, geo
->nr_chnls
);
1019 pr_err("nvm: failed to initialize nvm\n");
1023 struct nvm_dev
*nvm_alloc_dev(int node
)
1025 return kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
1027 EXPORT_SYMBOL(nvm_alloc_dev
);
1029 int nvm_register(struct nvm_dev
*dev
)
1033 if (!dev
->q
|| !dev
->ops
)
1036 if (dev
->ops
->max_phys_sect
> 256) {
1037 pr_info("nvm: max sectors supported is 256.\n");
1041 if (dev
->ops
->max_phys_sect
> 1) {
1042 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
1043 if (!dev
->dma_pool
) {
1044 pr_err("nvm: could not create dma pool\n");
1049 ret
= nvm_init(dev
);
1053 /* register device with a supported media manager */
1054 down_write(&nvm_lock
);
1055 list_add(&dev
->devices
, &nvm_devices
);
1056 up_write(&nvm_lock
);
1060 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1063 EXPORT_SYMBOL(nvm_register
);
1065 void nvm_unregister(struct nvm_dev
*dev
)
1067 struct nvm_target
*t
, *tmp
;
1069 mutex_lock(&dev
->mlock
);
1070 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1071 if (t
->dev
->parent
!= dev
)
1073 __nvm_remove_target(t
);
1075 mutex_unlock(&dev
->mlock
);
1077 down_write(&nvm_lock
);
1078 list_del(&dev
->devices
);
1079 up_write(&nvm_lock
);
1083 EXPORT_SYMBOL(nvm_unregister
);
1085 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1087 struct nvm_dev
*dev
;
1088 struct nvm_ioctl_create_simple
*s
;
1090 down_write(&nvm_lock
);
1091 dev
= nvm_find_nvm_dev(create
->dev
);
1092 up_write(&nvm_lock
);
1095 pr_err("nvm: device not found\n");
1099 if (create
->conf
.type
!= NVM_CONFIG_TYPE_SIMPLE
) {
1100 pr_err("nvm: config type not valid\n");
1103 s
= &create
->conf
.s
;
1105 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
1107 s
->lun_end
= dev
->geo
.nr_luns
- 1;
1110 if (s
->lun_begin
> s
->lun_end
|| s
->lun_end
>= dev
->geo
.nr_luns
) {
1111 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1112 s
->lun_begin
, s
->lun_end
, dev
->geo
.nr_luns
- 1);
1116 return nvm_create_tgt(dev
, create
);
1119 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1121 struct nvm_ioctl_info
*info
;
1122 struct nvm_tgt_type
*tt
;
1125 if (!capable(CAP_SYS_ADMIN
))
1128 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1132 info
->version
[0] = NVM_VERSION_MAJOR
;
1133 info
->version
[1] = NVM_VERSION_MINOR
;
1134 info
->version
[2] = NVM_VERSION_PATCH
;
1136 down_write(&nvm_lock
);
1137 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1138 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1140 tgt
->version
[0] = tt
->version
[0];
1141 tgt
->version
[1] = tt
->version
[1];
1142 tgt
->version
[2] = tt
->version
[2];
1143 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1148 info
->tgtsize
= tgt_iter
;
1149 up_write(&nvm_lock
);
1151 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1160 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1162 struct nvm_ioctl_get_devices
*devices
;
1163 struct nvm_dev
*dev
;
1166 if (!capable(CAP_SYS_ADMIN
))
1169 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1173 down_write(&nvm_lock
);
1174 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1175 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1177 sprintf(info
->devname
, "%s", dev
->name
);
1179 /* kept for compatibility */
1180 info
->bmversion
[0] = 1;
1181 info
->bmversion
[1] = 0;
1182 info
->bmversion
[2] = 0;
1183 sprintf(info
->bmname
, "%s", "gennvm");
1187 pr_err("nvm: max 31 devices can be reported.\n");
1191 up_write(&nvm_lock
);
1193 devices
->nr_devices
= i
;
1195 if (copy_to_user(arg
, devices
,
1196 sizeof(struct nvm_ioctl_get_devices
))) {
1205 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1207 struct nvm_ioctl_create create
;
1209 if (!capable(CAP_SYS_ADMIN
))
1212 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1215 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1216 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1217 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1219 if (create
.flags
!= 0) {
1220 pr_err("nvm: no flags supported\n");
1224 return __nvm_configure_create(&create
);
1227 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1229 struct nvm_ioctl_remove remove
;
1230 struct nvm_dev
*dev
;
1233 if (!capable(CAP_SYS_ADMIN
))
1236 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1239 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1241 if (remove
.flags
!= 0) {
1242 pr_err("nvm: no flags supported\n");
1246 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1247 ret
= nvm_remove_tgt(dev
, &remove
);
1255 /* kept for compatibility reasons */
1256 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1258 struct nvm_ioctl_dev_init init
;
1260 if (!capable(CAP_SYS_ADMIN
))
1263 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1266 if (init
.flags
!= 0) {
1267 pr_err("nvm: no flags supported\n");
1274 /* Kept for compatibility reasons */
1275 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1277 struct nvm_ioctl_dev_factory fact
;
1279 if (!capable(CAP_SYS_ADMIN
))
1282 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1285 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1287 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1293 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1295 void __user
*argp
= (void __user
*)arg
;
1299 return nvm_ioctl_info(file
, argp
);
1300 case NVM_GET_DEVICES
:
1301 return nvm_ioctl_get_devices(file
, argp
);
1302 case NVM_DEV_CREATE
:
1303 return nvm_ioctl_dev_create(file
, argp
);
1304 case NVM_DEV_REMOVE
:
1305 return nvm_ioctl_dev_remove(file
, argp
);
1307 return nvm_ioctl_dev_init(file
, argp
);
1308 case NVM_DEV_FACTORY
:
1309 return nvm_ioctl_dev_factory(file
, argp
);
1314 static const struct file_operations _ctl_fops
= {
1315 .open
= nonseekable_open
,
1316 .unlocked_ioctl
= nvm_ctl_ioctl
,
1317 .owner
= THIS_MODULE
,
1318 .llseek
= noop_llseek
,
1321 static struct miscdevice _nvm_misc
= {
1322 .minor
= MISC_DYNAMIC_MINOR
,
1324 .nodename
= "lightnvm/control",
1327 builtin_misc_device(_nvm_misc
);