]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/core.c
spi: xlp: update for ARCH_VULCAN2
[mirror_ubuntu-artful-kernel.git] / drivers / lightnvm / core.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
29
30 #include "lightnvm.h"
31
32 static LIST_HEAD(nvm_tgt_types);
33 static DECLARE_RWSEM(nvm_tgtt_lock);
34 static LIST_HEAD(nvm_mgrs);
35 static LIST_HEAD(nvm_devices);
36 static DECLARE_RWSEM(nvm_lock);
37
38 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
39 {
40 struct nvm_tgt_type *tmp, *tt = NULL;
41
42 if (lock)
43 down_write(&nvm_tgtt_lock);
44
45 list_for_each_entry(tmp, &nvm_tgt_types, list)
46 if (!strcmp(name, tmp->name)) {
47 tt = tmp;
48 break;
49 }
50
51 if (lock)
52 up_write(&nvm_tgtt_lock);
53 return tt;
54 }
55 EXPORT_SYMBOL(nvm_find_target_type);
56
57 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
58 {
59 int ret = 0;
60
61 down_write(&nvm_tgtt_lock);
62 if (nvm_find_target_type(tt->name, 0))
63 ret = -EEXIST;
64 else
65 list_add(&tt->list, &nvm_tgt_types);
66 up_write(&nvm_tgtt_lock);
67
68 return ret;
69 }
70 EXPORT_SYMBOL(nvm_register_tgt_type);
71
72 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
73 {
74 if (!tt)
75 return;
76
77 down_write(&nvm_lock);
78 list_del(&tt->list);
79 up_write(&nvm_lock);
80 }
81 EXPORT_SYMBOL(nvm_unregister_tgt_type);
82
83 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
84 dma_addr_t *dma_handler)
85 {
86 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
87 dma_handler);
88 }
89 EXPORT_SYMBOL(nvm_dev_dma_alloc);
90
91 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
92 dma_addr_t dma_handler)
93 {
94 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
95 }
96 EXPORT_SYMBOL(nvm_dev_dma_free);
97
98 static struct nvmm_type *nvm_find_mgr_type(const char *name)
99 {
100 struct nvmm_type *mt;
101
102 list_for_each_entry(mt, &nvm_mgrs, list)
103 if (!strcmp(name, mt->name))
104 return mt;
105
106 return NULL;
107 }
108
109 static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
110 {
111 struct nvmm_type *mt;
112 int ret;
113
114 lockdep_assert_held(&nvm_lock);
115
116 list_for_each_entry(mt, &nvm_mgrs, list) {
117 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
118 continue;
119
120 ret = mt->register_mgr(dev);
121 if (ret < 0) {
122 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
123 ret, dev->name);
124 return NULL; /* initialization failed */
125 } else if (ret > 0)
126 return mt;
127 }
128
129 return NULL;
130 }
131
132 int nvm_register_mgr(struct nvmm_type *mt)
133 {
134 struct nvm_dev *dev;
135 int ret = 0;
136
137 down_write(&nvm_lock);
138 if (nvm_find_mgr_type(mt->name)) {
139 ret = -EEXIST;
140 goto finish;
141 } else {
142 list_add(&mt->list, &nvm_mgrs);
143 }
144
145 /* try to register media mgr if any device have none configured */
146 list_for_each_entry(dev, &nvm_devices, devices) {
147 if (dev->mt)
148 continue;
149
150 dev->mt = nvm_init_mgr(dev);
151 }
152 finish:
153 up_write(&nvm_lock);
154
155 return ret;
156 }
157 EXPORT_SYMBOL(nvm_register_mgr);
158
159 void nvm_unregister_mgr(struct nvmm_type *mt)
160 {
161 if (!mt)
162 return;
163
164 down_write(&nvm_lock);
165 list_del(&mt->list);
166 up_write(&nvm_lock);
167 }
168 EXPORT_SYMBOL(nvm_unregister_mgr);
169
170 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
171 {
172 struct nvm_dev *dev;
173
174 list_for_each_entry(dev, &nvm_devices, devices)
175 if (!strcmp(name, dev->name))
176 return dev;
177
178 return NULL;
179 }
180
181 struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
182 unsigned long flags)
183 {
184 return dev->mt->get_blk(dev, lun, flags);
185 }
186 EXPORT_SYMBOL(nvm_get_blk);
187
188 /* Assumes that all valid pages have already been moved on release to bm */
189 void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
190 {
191 return dev->mt->put_blk(dev, blk);
192 }
193 EXPORT_SYMBOL(nvm_put_blk);
194
195 void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
196 {
197 return dev->mt->mark_blk(dev, ppa, type);
198 }
199 EXPORT_SYMBOL(nvm_mark_blk);
200
201 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
202 {
203 return dev->mt->submit_io(dev, rqd);
204 }
205 EXPORT_SYMBOL(nvm_submit_io);
206
207 int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
208 {
209 return dev->mt->erase_blk(dev, blk, 0);
210 }
211 EXPORT_SYMBOL(nvm_erase_blk);
212
213 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
214 {
215 int i;
216
217 if (rqd->nr_ppas > 1) {
218 for (i = 0; i < rqd->nr_ppas; i++)
219 rqd->ppa_list[i] = dev_to_generic_addr(dev,
220 rqd->ppa_list[i]);
221 } else {
222 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
223 }
224 }
225 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
226
227 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
228 {
229 int i;
230
231 if (rqd->nr_ppas > 1) {
232 for (i = 0; i < rqd->nr_ppas; i++)
233 rqd->ppa_list[i] = generic_to_dev_addr(dev,
234 rqd->ppa_list[i]);
235 } else {
236 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
237 }
238 }
239 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
240
241 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
242 const struct ppa_addr *ppas, int nr_ppas, int vblk)
243 {
244 int i, plane_cnt, pl_idx;
245 struct ppa_addr ppa;
246
247 if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
248 rqd->nr_ppas = nr_ppas;
249 rqd->ppa_addr = ppas[0];
250
251 return 0;
252 }
253
254 rqd->nr_ppas = nr_ppas;
255 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
256 if (!rqd->ppa_list) {
257 pr_err("nvm: failed to allocate dma memory\n");
258 return -ENOMEM;
259 }
260
261 if (!vblk) {
262 for (i = 0; i < nr_ppas; i++)
263 rqd->ppa_list[i] = ppas[i];
264 } else {
265 plane_cnt = dev->plane_mode;
266 rqd->nr_ppas *= plane_cnt;
267
268 for (i = 0; i < nr_ppas; i++) {
269 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
270 ppa = ppas[i];
271 ppa.g.pl = pl_idx;
272 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
273 }
274 }
275 }
276
277 return 0;
278 }
279 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
280
281 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
282 {
283 if (!rqd->ppa_list)
284 return;
285
286 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
287 }
288 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
289
290 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
291 {
292 struct nvm_rq rqd;
293 int ret;
294
295 if (!dev->ops->erase_block)
296 return 0;
297
298 memset(&rqd, 0, sizeof(struct nvm_rq));
299
300 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
301 if (ret)
302 return ret;
303
304 nvm_generic_to_addr_mode(dev, &rqd);
305
306 ret = dev->ops->erase_block(dev, &rqd);
307
308 nvm_free_rqd_ppalist(dev, &rqd);
309
310 return ret;
311 }
312 EXPORT_SYMBOL(nvm_erase_ppa);
313
314 void nvm_end_io(struct nvm_rq *rqd, int error)
315 {
316 rqd->error = error;
317 rqd->end_io(rqd);
318 }
319 EXPORT_SYMBOL(nvm_end_io);
320
321 static void nvm_end_io_sync(struct nvm_rq *rqd)
322 {
323 struct completion *waiting = rqd->wait;
324
325 rqd->wait = NULL;
326
327 complete(waiting);
328 }
329
330 static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
331 int flags, void *buf, int len)
332 {
333 DECLARE_COMPLETION_ONSTACK(wait);
334 struct bio *bio;
335 int ret;
336 unsigned long hang_check;
337
338 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
339 if (IS_ERR_OR_NULL(bio))
340 return -ENOMEM;
341
342 nvm_generic_to_addr_mode(dev, rqd);
343
344 rqd->dev = dev;
345 rqd->opcode = opcode;
346 rqd->flags = flags;
347 rqd->bio = bio;
348 rqd->wait = &wait;
349 rqd->end_io = nvm_end_io_sync;
350
351 ret = dev->ops->submit_io(dev, rqd);
352 if (ret) {
353 bio_put(bio);
354 return ret;
355 }
356
357 /* Prevent hang_check timer from firing at us during very long I/O */
358 hang_check = sysctl_hung_task_timeout_secs;
359 if (hang_check)
360 while (!wait_for_completion_io_timeout(&wait,
361 hang_check * (HZ/2)))
362 ;
363 else
364 wait_for_completion_io(&wait);
365
366 return rqd->error;
367 }
368
369 /**
370 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
371 * take to free ppa list if necessary.
372 * @dev: device
373 * @ppa_list: user created ppa_list
374 * @nr_ppas: length of ppa_list
375 * @opcode: device opcode
376 * @flags: device flags
377 * @buf: data buffer
378 * @len: data buffer length
379 */
380 int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
381 int nr_ppas, int opcode, int flags, void *buf, int len)
382 {
383 struct nvm_rq rqd;
384
385 if (dev->ops->max_phys_sect < nr_ppas)
386 return -EINVAL;
387
388 memset(&rqd, 0, sizeof(struct nvm_rq));
389
390 rqd.nr_ppas = nr_ppas;
391 if (nr_ppas > 1)
392 rqd.ppa_list = ppa_list;
393 else
394 rqd.ppa_addr = ppa_list[0];
395
396 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
397 }
398 EXPORT_SYMBOL(nvm_submit_ppa_list);
399
400 /**
401 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
402 * as single, dual, quad plane PPAs depending on device type.
403 * @dev: device
404 * @ppa: user created ppa_list
405 * @nr_ppas: length of ppa_list
406 * @opcode: device opcode
407 * @flags: device flags
408 * @buf: data buffer
409 * @len: data buffer length
410 */
411 int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
412 int opcode, int flags, void *buf, int len)
413 {
414 struct nvm_rq rqd;
415 int ret;
416
417 memset(&rqd, 0, sizeof(struct nvm_rq));
418 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
419 if (ret)
420 return ret;
421
422 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
423
424 nvm_free_rqd_ppalist(dev, &rqd);
425
426 return ret;
427 }
428 EXPORT_SYMBOL(nvm_submit_ppa);
429
430 /*
431 * folds a bad block list from its plane representation to its virtual
432 * block representation. The fold is done in place and reduced size is
433 * returned.
434 *
435 * If any of the planes status are bad or grown bad block, the virtual block
436 * is marked bad. If not bad, the first plane state acts as the block state.
437 */
438 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
439 {
440 int blk, offset, pl, blktype;
441
442 if (nr_blks != dev->blks_per_lun * dev->plane_mode)
443 return -EINVAL;
444
445 for (blk = 0; blk < dev->blks_per_lun; blk++) {
446 offset = blk * dev->plane_mode;
447 blktype = blks[offset];
448
449 /* Bad blocks on any planes take precedence over other types */
450 for (pl = 0; pl < dev->plane_mode; pl++) {
451 if (blks[offset + pl] &
452 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
453 blktype = blks[offset + pl];
454 break;
455 }
456 }
457
458 blks[blk] = blktype;
459 }
460
461 return dev->blks_per_lun;
462 }
463 EXPORT_SYMBOL(nvm_bb_tbl_fold);
464
465 int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
466 {
467 ppa = generic_to_dev_addr(dev, ppa);
468
469 return dev->ops->get_bb_tbl(dev, ppa, blks);
470 }
471 EXPORT_SYMBOL(nvm_get_bb_tbl);
472
473 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
474 {
475 int i;
476
477 dev->lps_per_blk = dev->pgs_per_blk;
478 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
479 if (!dev->lptbl)
480 return -ENOMEM;
481
482 /* Just a linear array */
483 for (i = 0; i < dev->lps_per_blk; i++)
484 dev->lptbl[i] = i;
485
486 return 0;
487 }
488
489 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
490 {
491 int i, p;
492 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
493
494 if (!mlc->num_pairs)
495 return 0;
496
497 dev->lps_per_blk = mlc->num_pairs;
498 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
499 if (!dev->lptbl)
500 return -ENOMEM;
501
502 /* The lower page table encoding consists of a list of bytes, where each
503 * has a lower and an upper half. The first half byte maintains the
504 * increment value and every value after is an offset added to the
505 * previous incrementation value
506 */
507 dev->lptbl[0] = mlc->pairs[0] & 0xF;
508 for (i = 1; i < dev->lps_per_blk; i++) {
509 p = mlc->pairs[i >> 1];
510 if (i & 0x1) /* upper */
511 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
512 else /* lower */
513 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
514 }
515
516 return 0;
517 }
518
519 static int nvm_core_init(struct nvm_dev *dev)
520 {
521 struct nvm_id *id = &dev->identity;
522 struct nvm_id_group *grp = &id->groups[0];
523 int ret;
524
525 /* device values */
526 dev->nr_chnls = grp->num_ch;
527 dev->luns_per_chnl = grp->num_lun;
528 dev->pgs_per_blk = grp->num_pg;
529 dev->blks_per_lun = grp->num_blk;
530 dev->nr_planes = grp->num_pln;
531 dev->fpg_size = grp->fpg_sz;
532 dev->pfpg_size = grp->fpg_sz * grp->num_pln;
533 dev->sec_size = grp->csecs;
534 dev->oob_size = grp->sos;
535 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
536 dev->mccap = grp->mccap;
537 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
538
539 dev->plane_mode = NVM_PLANE_SINGLE;
540 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
541
542 if (grp->mpos & 0x020202)
543 dev->plane_mode = NVM_PLANE_DOUBLE;
544 if (grp->mpos & 0x040404)
545 dev->plane_mode = NVM_PLANE_QUAD;
546
547 if (grp->mtype != 0) {
548 pr_err("nvm: memory type not supported\n");
549 return -EINVAL;
550 }
551
552 /* calculated values */
553 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
554 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
555 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
556 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
557
558 dev->total_secs = dev->nr_luns * dev->sec_per_lun;
559 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
560 sizeof(unsigned long), GFP_KERNEL);
561 if (!dev->lun_map)
562 return -ENOMEM;
563
564 switch (grp->fmtype) {
565 case NVM_ID_FMTYPE_SLC:
566 if (nvm_init_slc_tbl(dev, grp)) {
567 ret = -ENOMEM;
568 goto err_fmtype;
569 }
570 break;
571 case NVM_ID_FMTYPE_MLC:
572 if (nvm_init_mlc_tbl(dev, grp)) {
573 ret = -ENOMEM;
574 goto err_fmtype;
575 }
576 break;
577 default:
578 pr_err("nvm: flash type not supported\n");
579 ret = -EINVAL;
580 goto err_fmtype;
581 }
582
583 mutex_init(&dev->mlock);
584 spin_lock_init(&dev->lock);
585
586 blk_queue_logical_block_size(dev->q, dev->sec_size);
587
588 return 0;
589 err_fmtype:
590 kfree(dev->lun_map);
591 return ret;
592 }
593
594 static void nvm_free_mgr(struct nvm_dev *dev)
595 {
596 if (!dev->mt)
597 return;
598
599 dev->mt->unregister_mgr(dev);
600 dev->mt = NULL;
601 }
602
603 void nvm_free(struct nvm_dev *dev)
604 {
605 if (!dev)
606 return;
607
608 nvm_free_mgr(dev);
609
610 if (dev->dma_pool)
611 dev->ops->destroy_dma_pool(dev->dma_pool);
612
613 kfree(dev->lptbl);
614 kfree(dev->lun_map);
615 kfree(dev);
616 }
617
618 static int nvm_init(struct nvm_dev *dev)
619 {
620 int ret = -EINVAL;
621
622 if (!dev->q || !dev->ops)
623 return ret;
624
625 if (dev->ops->identity(dev, &dev->identity)) {
626 pr_err("nvm: device could not be identified\n");
627 goto err;
628 }
629
630 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
631 dev->identity.ver_id, dev->identity.vmnt,
632 dev->identity.cgrps);
633
634 if (dev->identity.ver_id != 1) {
635 pr_err("nvm: device not supported by kernel.");
636 goto err;
637 }
638
639 if (dev->identity.cgrps != 1) {
640 pr_err("nvm: only one group configuration supported.");
641 goto err;
642 }
643
644 ret = nvm_core_init(dev);
645 if (ret) {
646 pr_err("nvm: could not initialize core structures.\n");
647 goto err;
648 }
649
650 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
651 dev->name, dev->sec_per_pg, dev->nr_planes,
652 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
653 dev->nr_chnls);
654 return 0;
655 err:
656 pr_err("nvm: failed to initialize nvm\n");
657 return ret;
658 }
659
660 static void nvm_exit(struct nvm_dev *dev)
661 {
662 nvm_sysfs_unregister_dev(dev);
663 }
664
665 struct nvm_dev *nvm_alloc_dev(int node)
666 {
667 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
668 }
669 EXPORT_SYMBOL(nvm_alloc_dev);
670
671 int nvm_register(struct nvm_dev *dev)
672 {
673 int ret;
674
675 ret = nvm_init(dev);
676 if (ret)
677 goto err_init;
678
679 if (dev->ops->max_phys_sect > 256) {
680 pr_info("nvm: max sectors supported is 256.\n");
681 ret = -EINVAL;
682 goto err_init;
683 }
684
685 if (dev->ops->max_phys_sect > 1) {
686 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
687 if (!dev->dma_pool) {
688 pr_err("nvm: could not create dma pool\n");
689 ret = -ENOMEM;
690 goto err_init;
691 }
692 }
693
694 ret = nvm_sysfs_register_dev(dev);
695 if (ret)
696 goto err_ppalist;
697
698 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
699 ret = nvm_get_sysblock(dev, &dev->sb);
700 if (!ret)
701 pr_err("nvm: device not initialized.\n");
702 else if (ret < 0)
703 pr_err("nvm: err (%d) on device initialization\n", ret);
704 }
705
706 /* register device with a supported media manager */
707 down_write(&nvm_lock);
708 if (ret > 0)
709 dev->mt = nvm_init_mgr(dev);
710 list_add(&dev->devices, &nvm_devices);
711 up_write(&nvm_lock);
712
713 return 0;
714 err_ppalist:
715 dev->ops->destroy_dma_pool(dev->dma_pool);
716 err_init:
717 kfree(dev->lun_map);
718 return ret;
719 }
720 EXPORT_SYMBOL(nvm_register);
721
722 void nvm_unregister(struct nvm_dev *dev)
723 {
724 down_write(&nvm_lock);
725 list_del(&dev->devices);
726 up_write(&nvm_lock);
727
728 nvm_exit(dev);
729 }
730 EXPORT_SYMBOL(nvm_unregister);
731
732 static int __nvm_configure_create(struct nvm_ioctl_create *create)
733 {
734 struct nvm_dev *dev;
735 struct nvm_ioctl_create_simple *s;
736
737 down_write(&nvm_lock);
738 dev = nvm_find_nvm_dev(create->dev);
739 up_write(&nvm_lock);
740
741 if (!dev) {
742 pr_err("nvm: device not found\n");
743 return -EINVAL;
744 }
745
746 if (!dev->mt) {
747 pr_info("nvm: device has no media manager registered.\n");
748 return -ENODEV;
749 }
750
751 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
752 pr_err("nvm: config type not valid\n");
753 return -EINVAL;
754 }
755 s = &create->conf.s;
756
757 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
758 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
759 s->lun_begin, s->lun_end, dev->nr_luns);
760 return -EINVAL;
761 }
762
763 return dev->mt->create_tgt(dev, create);
764 }
765
766 #ifdef CONFIG_NVM_DEBUG
767 static int nvm_configure_show(const char *val)
768 {
769 struct nvm_dev *dev;
770 char opcode, devname[DISK_NAME_LEN];
771 int ret;
772
773 ret = sscanf(val, "%c %32s", &opcode, devname);
774 if (ret != 2) {
775 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
776 return -EINVAL;
777 }
778
779 down_write(&nvm_lock);
780 dev = nvm_find_nvm_dev(devname);
781 up_write(&nvm_lock);
782 if (!dev) {
783 pr_err("nvm: device not found\n");
784 return -EINVAL;
785 }
786
787 if (!dev->mt)
788 return 0;
789
790 dev->mt->lun_info_print(dev);
791
792 return 0;
793 }
794
795 static int nvm_configure_remove(const char *val)
796 {
797 struct nvm_ioctl_remove remove;
798 struct nvm_dev *dev;
799 char opcode;
800 int ret = 0;
801
802 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
803 if (ret != 2) {
804 pr_err("nvm: invalid command. Use \"d targetname\".\n");
805 return -EINVAL;
806 }
807
808 remove.flags = 0;
809
810 list_for_each_entry(dev, &nvm_devices, devices) {
811 ret = dev->mt->remove_tgt(dev, &remove);
812 if (!ret)
813 break;
814 }
815
816 return ret;
817 }
818
819 static int nvm_configure_create(const char *val)
820 {
821 struct nvm_ioctl_create create;
822 char opcode;
823 int lun_begin, lun_end, ret;
824
825 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
826 create.tgtname, create.tgttype,
827 &lun_begin, &lun_end);
828 if (ret != 6) {
829 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
830 return -EINVAL;
831 }
832
833 create.flags = 0;
834 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
835 create.conf.s.lun_begin = lun_begin;
836 create.conf.s.lun_end = lun_end;
837
838 return __nvm_configure_create(&create);
839 }
840
841
842 /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
843 static int nvm_configure_by_str_event(const char *val,
844 const struct kernel_param *kp)
845 {
846 char opcode;
847 int ret;
848
849 ret = sscanf(val, "%c", &opcode);
850 if (ret != 1) {
851 pr_err("nvm: string must have the format of \"cmd ...\"\n");
852 return -EINVAL;
853 }
854
855 switch (opcode) {
856 case 'a':
857 return nvm_configure_create(val);
858 case 'd':
859 return nvm_configure_remove(val);
860 case 's':
861 return nvm_configure_show(val);
862 default:
863 pr_err("nvm: invalid command\n");
864 return -EINVAL;
865 }
866
867 return 0;
868 }
869
870 static int nvm_configure_get(char *buf, const struct kernel_param *kp)
871 {
872 int sz;
873 struct nvm_dev *dev;
874
875 sz = sprintf(buf, "available devices:\n");
876 down_write(&nvm_lock);
877 list_for_each_entry(dev, &nvm_devices, devices) {
878 if (sz > 4095 - DISK_NAME_LEN - 2)
879 break;
880 sz += sprintf(buf + sz, " %32s\n", dev->name);
881 }
882 up_write(&nvm_lock);
883
884 return sz;
885 }
886
887 static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
888 .set = nvm_configure_by_str_event,
889 .get = nvm_configure_get,
890 };
891
892 #undef MODULE_PARAM_PREFIX
893 #define MODULE_PARAM_PREFIX "lnvm."
894
895 module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
896 0644);
897
898 #endif /* CONFIG_NVM_DEBUG */
899
900 static long nvm_ioctl_info(struct file *file, void __user *arg)
901 {
902 struct nvm_ioctl_info *info;
903 struct nvm_tgt_type *tt;
904 int tgt_iter = 0;
905
906 if (!capable(CAP_SYS_ADMIN))
907 return -EPERM;
908
909 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
910 if (IS_ERR(info))
911 return -EFAULT;
912
913 info->version[0] = NVM_VERSION_MAJOR;
914 info->version[1] = NVM_VERSION_MINOR;
915 info->version[2] = NVM_VERSION_PATCH;
916
917 down_write(&nvm_lock);
918 list_for_each_entry(tt, &nvm_tgt_types, list) {
919 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
920
921 tgt->version[0] = tt->version[0];
922 tgt->version[1] = tt->version[1];
923 tgt->version[2] = tt->version[2];
924 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
925
926 tgt_iter++;
927 }
928
929 info->tgtsize = tgt_iter;
930 up_write(&nvm_lock);
931
932 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
933 kfree(info);
934 return -EFAULT;
935 }
936
937 kfree(info);
938 return 0;
939 }
940
941 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
942 {
943 struct nvm_ioctl_get_devices *devices;
944 struct nvm_dev *dev;
945 int i = 0;
946
947 if (!capable(CAP_SYS_ADMIN))
948 return -EPERM;
949
950 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
951 if (!devices)
952 return -ENOMEM;
953
954 down_write(&nvm_lock);
955 list_for_each_entry(dev, &nvm_devices, devices) {
956 struct nvm_ioctl_device_info *info = &devices->info[i];
957
958 sprintf(info->devname, "%s", dev->name);
959 if (dev->mt) {
960 info->bmversion[0] = dev->mt->version[0];
961 info->bmversion[1] = dev->mt->version[1];
962 info->bmversion[2] = dev->mt->version[2];
963 sprintf(info->bmname, "%s", dev->mt->name);
964 } else {
965 sprintf(info->bmname, "none");
966 }
967
968 i++;
969 if (i > 31) {
970 pr_err("nvm: max 31 devices can be reported.\n");
971 break;
972 }
973 }
974 up_write(&nvm_lock);
975
976 devices->nr_devices = i;
977
978 if (copy_to_user(arg, devices,
979 sizeof(struct nvm_ioctl_get_devices))) {
980 kfree(devices);
981 return -EFAULT;
982 }
983
984 kfree(devices);
985 return 0;
986 }
987
988 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
989 {
990 struct nvm_ioctl_create create;
991
992 if (!capable(CAP_SYS_ADMIN))
993 return -EPERM;
994
995 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
996 return -EFAULT;
997
998 create.dev[DISK_NAME_LEN - 1] = '\0';
999 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1000 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1001
1002 if (create.flags != 0) {
1003 pr_err("nvm: no flags supported\n");
1004 return -EINVAL;
1005 }
1006
1007 return __nvm_configure_create(&create);
1008 }
1009
1010 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1011 {
1012 struct nvm_ioctl_remove remove;
1013 struct nvm_dev *dev;
1014 int ret = 0;
1015
1016 if (!capable(CAP_SYS_ADMIN))
1017 return -EPERM;
1018
1019 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1020 return -EFAULT;
1021
1022 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1023
1024 if (remove.flags != 0) {
1025 pr_err("nvm: no flags supported\n");
1026 return -EINVAL;
1027 }
1028
1029 list_for_each_entry(dev, &nvm_devices, devices) {
1030 ret = dev->mt->remove_tgt(dev, &remove);
1031 if (!ret)
1032 break;
1033 }
1034
1035 return ret;
1036 }
1037
1038 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1039 {
1040 info->seqnr = 1;
1041 info->erase_cnt = 0;
1042 info->version = 1;
1043 }
1044
1045 static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1046 {
1047 struct nvm_dev *dev;
1048 struct nvm_sb_info info;
1049 int ret;
1050
1051 down_write(&nvm_lock);
1052 dev = nvm_find_nvm_dev(init->dev);
1053 up_write(&nvm_lock);
1054 if (!dev) {
1055 pr_err("nvm: device not found\n");
1056 return -EINVAL;
1057 }
1058
1059 nvm_setup_nvm_sb_info(&info);
1060
1061 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1062 info.fs_ppa.ppa = -1;
1063
1064 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1065 ret = nvm_init_sysblock(dev, &info);
1066 if (ret)
1067 return ret;
1068 }
1069
1070 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1071
1072 down_write(&nvm_lock);
1073 dev->mt = nvm_init_mgr(dev);
1074 up_write(&nvm_lock);
1075
1076 return 0;
1077 }
1078
1079 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1080 {
1081 struct nvm_ioctl_dev_init init;
1082
1083 if (!capable(CAP_SYS_ADMIN))
1084 return -EPERM;
1085
1086 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1087 return -EFAULT;
1088
1089 if (init.flags != 0) {
1090 pr_err("nvm: no flags supported\n");
1091 return -EINVAL;
1092 }
1093
1094 init.dev[DISK_NAME_LEN - 1] = '\0';
1095
1096 return __nvm_ioctl_dev_init(&init);
1097 }
1098
1099 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1100 {
1101 struct nvm_ioctl_dev_factory fact;
1102 struct nvm_dev *dev;
1103
1104 if (!capable(CAP_SYS_ADMIN))
1105 return -EPERM;
1106
1107 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1108 return -EFAULT;
1109
1110 fact.dev[DISK_NAME_LEN - 1] = '\0';
1111
1112 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1113 return -EINVAL;
1114
1115 down_write(&nvm_lock);
1116 dev = nvm_find_nvm_dev(fact.dev);
1117 up_write(&nvm_lock);
1118 if (!dev) {
1119 pr_err("nvm: device not found\n");
1120 return -EINVAL;
1121 }
1122
1123 nvm_free_mgr(dev);
1124
1125 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1126 return nvm_dev_factory(dev, fact.flags);
1127
1128 return 0;
1129 }
1130
1131 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1132 {
1133 void __user *argp = (void __user *)arg;
1134
1135 switch (cmd) {
1136 case NVM_INFO:
1137 return nvm_ioctl_info(file, argp);
1138 case NVM_GET_DEVICES:
1139 return nvm_ioctl_get_devices(file, argp);
1140 case NVM_DEV_CREATE:
1141 return nvm_ioctl_dev_create(file, argp);
1142 case NVM_DEV_REMOVE:
1143 return nvm_ioctl_dev_remove(file, argp);
1144 case NVM_DEV_INIT:
1145 return nvm_ioctl_dev_init(file, argp);
1146 case NVM_DEV_FACTORY:
1147 return nvm_ioctl_dev_factory(file, argp);
1148 }
1149 return 0;
1150 }
1151
1152 static const struct file_operations _ctl_fops = {
1153 .open = nonseekable_open,
1154 .unlocked_ioctl = nvm_ctl_ioctl,
1155 .owner = THIS_MODULE,
1156 .llseek = noop_llseek,
1157 };
1158
1159 static struct miscdevice _nvm_misc = {
1160 .minor = MISC_DYNAMIC_MINOR,
1161 .name = "lightnvm",
1162 .nodename = "lightnvm/control",
1163 .fops = &_ctl_fops,
1164 };
1165 module_misc_device(_nvm_misc);
1166
1167 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1168
1169 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1170 MODULE_LICENSE("GPL v2");
1171 MODULE_VERSION("0.1");