]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/core.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / core.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
30
31 static LIST_HEAD(nvm_tgt_types);
32 static DECLARE_RWSEM(nvm_tgtt_lock);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
35
36 /* Map between virtual and physical channel and lun */
37 struct nvm_ch_map {
38 int ch_off;
39 int nr_luns;
40 int *lun_offs;
41 };
42
43 struct nvm_dev_map {
44 struct nvm_ch_map *chnls;
45 int nr_chnls;
46 };
47
48 struct nvm_area {
49 struct list_head list;
50 sector_t begin;
51 sector_t end; /* end is excluded */
52 };
53
54 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
55 {
56 struct nvm_target *tgt;
57
58 list_for_each_entry(tgt, &dev->targets, list)
59 if (!strcmp(name, tgt->disk->disk_name))
60 return tgt;
61
62 return NULL;
63 }
64
65 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
66 {
67 int i;
68
69 for (i = lun_begin; i <= lun_end; i++) {
70 if (test_and_set_bit(i, dev->lun_map)) {
71 pr_err("nvm: lun %d already allocated\n", i);
72 goto err;
73 }
74 }
75
76 return 0;
77 err:
78 while (--i >= lun_begin)
79 clear_bit(i, dev->lun_map);
80
81 return -EBUSY;
82 }
83
84 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
85 int lun_end)
86 {
87 int i;
88
89 for (i = lun_begin; i <= lun_end; i++)
90 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
91 }
92
93 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
94 {
95 struct nvm_dev *dev = tgt_dev->parent;
96 struct nvm_dev_map *dev_map = tgt_dev->map;
97 int i, j;
98
99 for (i = 0; i < dev_map->nr_chnls; i++) {
100 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
101 int *lun_offs = ch_map->lun_offs;
102 int ch = i + ch_map->ch_off;
103
104 if (clear) {
105 for (j = 0; j < ch_map->nr_luns; j++) {
106 int lun = j + lun_offs[j];
107 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
108
109 WARN_ON(!test_and_clear_bit(lunid,
110 dev->lun_map));
111 }
112 }
113
114 kfree(ch_map->lun_offs);
115 }
116
117 kfree(dev_map->chnls);
118 kfree(dev_map);
119
120 kfree(tgt_dev->luns);
121 kfree(tgt_dev);
122 }
123
124 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
125 int lun_begin, int lun_end)
126 {
127 struct nvm_tgt_dev *tgt_dev = NULL;
128 struct nvm_dev_map *dev_rmap = dev->rmap;
129 struct nvm_dev_map *dev_map;
130 struct ppa_addr *luns;
131 int nr_luns = lun_end - lun_begin + 1;
132 int luns_left = nr_luns;
133 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
134 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
135 int bch = lun_begin / dev->geo.luns_per_chnl;
136 int blun = lun_begin % dev->geo.luns_per_chnl;
137 int lunid = 0;
138 int lun_balanced = 1;
139 int prev_nr_luns;
140 int i, j;
141
142 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
143
144 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
145 if (!dev_map)
146 goto err_dev;
147
148 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
149 GFP_KERNEL);
150 if (!dev_map->chnls)
151 goto err_chnls;
152
153 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
154 if (!luns)
155 goto err_luns;
156
157 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
158 dev->geo.luns_per_chnl : luns_left;
159 for (i = 0; i < nr_chnls; i++) {
160 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
161 int *lun_roffs = ch_rmap->lun_offs;
162 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
163 int *lun_offs;
164 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
165 dev->geo.luns_per_chnl : luns_left;
166
167 if (lun_balanced && prev_nr_luns != luns_in_chnl)
168 lun_balanced = 0;
169
170 ch_map->ch_off = ch_rmap->ch_off = bch;
171 ch_map->nr_luns = luns_in_chnl;
172
173 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
174 if (!lun_offs)
175 goto err_ch;
176
177 for (j = 0; j < luns_in_chnl; j++) {
178 luns[lunid].ppa = 0;
179 luns[lunid].g.ch = i;
180 luns[lunid++].g.lun = j;
181
182 lun_offs[j] = blun;
183 lun_roffs[j + blun] = blun;
184 }
185
186 ch_map->lun_offs = lun_offs;
187
188 /* when starting a new channel, lun offset is reset */
189 blun = 0;
190 luns_left -= luns_in_chnl;
191 }
192
193 dev_map->nr_chnls = nr_chnls;
194
195 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
196 if (!tgt_dev)
197 goto err_ch;
198
199 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
200 /* Target device only owns a portion of the physical device */
201 tgt_dev->geo.nr_chnls = nr_chnls;
202 tgt_dev->geo.nr_luns = nr_luns;
203 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
204 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
205 tgt_dev->q = dev->q;
206 tgt_dev->map = dev_map;
207 tgt_dev->luns = luns;
208 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
209
210 tgt_dev->parent = dev;
211
212 return tgt_dev;
213 err_ch:
214 while (--i >= 0)
215 kfree(dev_map->chnls[i].lun_offs);
216 kfree(luns);
217 err_luns:
218 kfree(dev_map->chnls);
219 err_chnls:
220 kfree(dev_map);
221 err_dev:
222 return tgt_dev;
223 }
224
225 static const struct block_device_operations nvm_fops = {
226 .owner = THIS_MODULE,
227 };
228
229 static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
230 {
231 struct nvm_tgt_type *tmp, *tt = NULL;
232
233 if (lock)
234 down_write(&nvm_tgtt_lock);
235
236 list_for_each_entry(tmp, &nvm_tgt_types, list)
237 if (!strcmp(name, tmp->name)) {
238 tt = tmp;
239 break;
240 }
241
242 if (lock)
243 up_write(&nvm_tgtt_lock);
244 return tt;
245 }
246
247 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
248 {
249 struct nvm_ioctl_create_simple *s = &create->conf.s;
250 struct request_queue *tqueue;
251 struct gendisk *tdisk;
252 struct nvm_tgt_type *tt;
253 struct nvm_target *t;
254 struct nvm_tgt_dev *tgt_dev;
255 void *targetdata;
256 int ret;
257
258 tt = nvm_find_target_type(create->tgttype, 1);
259 if (!tt) {
260 pr_err("nvm: target type %s not found\n", create->tgttype);
261 return -EINVAL;
262 }
263
264 mutex_lock(&dev->mlock);
265 t = nvm_find_target(dev, create->tgtname);
266 if (t) {
267 pr_err("nvm: target name already exists.\n");
268 mutex_unlock(&dev->mlock);
269 return -EINVAL;
270 }
271 mutex_unlock(&dev->mlock);
272
273 ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
274 if (ret)
275 return ret;
276
277 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
278 if (!t) {
279 ret = -ENOMEM;
280 goto err_reserve;
281 }
282
283 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
284 if (!tgt_dev) {
285 pr_err("nvm: could not create target device\n");
286 ret = -ENOMEM;
287 goto err_t;
288 }
289
290 tdisk = alloc_disk(0);
291 if (!tdisk) {
292 ret = -ENOMEM;
293 goto err_dev;
294 }
295
296 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
297 if (!tqueue) {
298 ret = -ENOMEM;
299 goto err_disk;
300 }
301 blk_queue_make_request(tqueue, tt->make_rq);
302
303 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
304 tdisk->flags = GENHD_FL_EXT_DEVT;
305 tdisk->major = 0;
306 tdisk->first_minor = 0;
307 tdisk->fops = &nvm_fops;
308 tdisk->queue = tqueue;
309
310 targetdata = tt->init(tgt_dev, tdisk, create->flags);
311 if (IS_ERR(targetdata)) {
312 ret = PTR_ERR(targetdata);
313 goto err_init;
314 }
315
316 tdisk->private_data = targetdata;
317 tqueue->queuedata = targetdata;
318
319 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
320
321 set_capacity(tdisk, tt->capacity(targetdata));
322 add_disk(tdisk);
323
324 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
325 ret = -ENOMEM;
326 goto err_sysfs;
327 }
328
329 t->type = tt;
330 t->disk = tdisk;
331 t->dev = tgt_dev;
332
333 mutex_lock(&dev->mlock);
334 list_add_tail(&t->list, &dev->targets);
335 mutex_unlock(&dev->mlock);
336
337 __module_get(tt->owner);
338
339 return 0;
340 err_sysfs:
341 if (tt->exit)
342 tt->exit(targetdata);
343 err_init:
344 blk_cleanup_queue(tqueue);
345 tdisk->queue = NULL;
346 err_disk:
347 put_disk(tdisk);
348 err_dev:
349 nvm_remove_tgt_dev(tgt_dev, 0);
350 err_t:
351 kfree(t);
352 err_reserve:
353 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
354 return ret;
355 }
356
357 static void __nvm_remove_target(struct nvm_target *t)
358 {
359 struct nvm_tgt_type *tt = t->type;
360 struct gendisk *tdisk = t->disk;
361 struct request_queue *q = tdisk->queue;
362
363 del_gendisk(tdisk);
364 blk_cleanup_queue(q);
365
366 if (tt->sysfs_exit)
367 tt->sysfs_exit(tdisk);
368
369 if (tt->exit)
370 tt->exit(tdisk->private_data);
371
372 nvm_remove_tgt_dev(t->dev, 1);
373 put_disk(tdisk);
374 module_put(t->type->owner);
375
376 list_del(&t->list);
377 kfree(t);
378 }
379
380 /**
381 * nvm_remove_tgt - Removes a target from the media manager
382 * @dev: device
383 * @remove: ioctl structure with target name to remove.
384 *
385 * Returns:
386 * 0: on success
387 * 1: on not found
388 * <0: on error
389 */
390 static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
391 {
392 struct nvm_target *t;
393
394 mutex_lock(&dev->mlock);
395 t = nvm_find_target(dev, remove->tgtname);
396 if (!t) {
397 mutex_unlock(&dev->mlock);
398 return 1;
399 }
400 __nvm_remove_target(t);
401 mutex_unlock(&dev->mlock);
402
403 return 0;
404 }
405
406 static int nvm_register_map(struct nvm_dev *dev)
407 {
408 struct nvm_dev_map *rmap;
409 int i, j;
410
411 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
412 if (!rmap)
413 goto err_rmap;
414
415 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
416 GFP_KERNEL);
417 if (!rmap->chnls)
418 goto err_chnls;
419
420 for (i = 0; i < dev->geo.nr_chnls; i++) {
421 struct nvm_ch_map *ch_rmap;
422 int *lun_roffs;
423 int luns_in_chnl = dev->geo.luns_per_chnl;
424
425 ch_rmap = &rmap->chnls[i];
426
427 ch_rmap->ch_off = -1;
428 ch_rmap->nr_luns = luns_in_chnl;
429
430 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
431 if (!lun_roffs)
432 goto err_ch;
433
434 for (j = 0; j < luns_in_chnl; j++)
435 lun_roffs[j] = -1;
436
437 ch_rmap->lun_offs = lun_roffs;
438 }
439
440 dev->rmap = rmap;
441
442 return 0;
443 err_ch:
444 while (--i >= 0)
445 kfree(rmap->chnls[i].lun_offs);
446 err_chnls:
447 kfree(rmap);
448 err_rmap:
449 return -ENOMEM;
450 }
451
452 static void nvm_unregister_map(struct nvm_dev *dev)
453 {
454 struct nvm_dev_map *rmap = dev->rmap;
455 int i;
456
457 for (i = 0; i < dev->geo.nr_chnls; i++)
458 kfree(rmap->chnls[i].lun_offs);
459
460 kfree(rmap->chnls);
461 kfree(rmap);
462 }
463
464 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
465 {
466 struct nvm_dev_map *dev_map = tgt_dev->map;
467 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
468 int lun_off = ch_map->lun_offs[p->g.lun];
469
470 p->g.ch += ch_map->ch_off;
471 p->g.lun += lun_off;
472 }
473
474 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
475 {
476 struct nvm_dev *dev = tgt_dev->parent;
477 struct nvm_dev_map *dev_rmap = dev->rmap;
478 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
479 int lun_roff = ch_rmap->lun_offs[p->g.lun];
480
481 p->g.ch -= ch_rmap->ch_off;
482 p->g.lun -= lun_roff;
483 }
484
485 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
486 struct ppa_addr *ppa_list, int nr_ppas)
487 {
488 int i;
489
490 for (i = 0; i < nr_ppas; i++) {
491 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
492 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
493 }
494 }
495
496 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
497 struct ppa_addr *ppa_list, int nr_ppas)
498 {
499 int i;
500
501 for (i = 0; i < nr_ppas; i++) {
502 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
503 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
504 }
505 }
506
507 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
508 {
509 if (rqd->nr_ppas == 1) {
510 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
511 return;
512 }
513
514 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
515 }
516
517 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
518 {
519 if (rqd->nr_ppas == 1) {
520 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
521 return;
522 }
523
524 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
525 }
526
527 void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
528 int len)
529 {
530 struct nvm_geo *geo = &dev->geo;
531 struct nvm_dev_map *dev_rmap = dev->rmap;
532 u64 i;
533
534 for (i = 0; i < len; i++) {
535 struct nvm_ch_map *ch_rmap;
536 int *lun_roffs;
537 struct ppa_addr gaddr;
538 u64 pba = le64_to_cpu(entries[i]);
539 u64 diff;
540
541 if (!pba)
542 continue;
543
544 gaddr = linear_to_generic_addr(geo, pba);
545 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
546 lun_roffs = ch_rmap->lun_offs;
547
548 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
549 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
550
551 entries[i] -= cpu_to_le64(diff);
552 }
553 }
554 EXPORT_SYMBOL(nvm_part_to_tgt);
555
556 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
557 {
558 int ret = 0;
559
560 down_write(&nvm_tgtt_lock);
561 if (nvm_find_target_type(tt->name, 0))
562 ret = -EEXIST;
563 else
564 list_add(&tt->list, &nvm_tgt_types);
565 up_write(&nvm_tgtt_lock);
566
567 return ret;
568 }
569 EXPORT_SYMBOL(nvm_register_tgt_type);
570
571 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
572 {
573 if (!tt)
574 return;
575
576 down_write(&nvm_tgtt_lock);
577 list_del(&tt->list);
578 up_write(&nvm_tgtt_lock);
579 }
580 EXPORT_SYMBOL(nvm_unregister_tgt_type);
581
582 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
583 dma_addr_t *dma_handler)
584 {
585 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
586 dma_handler);
587 }
588 EXPORT_SYMBOL(nvm_dev_dma_alloc);
589
590 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
591 {
592 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
593 }
594 EXPORT_SYMBOL(nvm_dev_dma_free);
595
596 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
597 {
598 struct nvm_dev *dev;
599
600 list_for_each_entry(dev, &nvm_devices, devices)
601 if (!strcmp(name, dev->name))
602 return dev;
603
604 return NULL;
605 }
606
607 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
608 const struct ppa_addr *ppas, int nr_ppas)
609 {
610 struct nvm_dev *dev = tgt_dev->parent;
611 struct nvm_geo *geo = &tgt_dev->geo;
612 int i, plane_cnt, pl_idx;
613 struct ppa_addr ppa;
614
615 if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
616 rqd->nr_ppas = nr_ppas;
617 rqd->ppa_addr = ppas[0];
618
619 return 0;
620 }
621
622 rqd->nr_ppas = nr_ppas;
623 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
624 if (!rqd->ppa_list) {
625 pr_err("nvm: failed to allocate dma memory\n");
626 return -ENOMEM;
627 }
628
629 plane_cnt = geo->plane_mode;
630 rqd->nr_ppas *= plane_cnt;
631
632 for (i = 0; i < nr_ppas; i++) {
633 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
634 ppa = ppas[i];
635 ppa.g.pl = pl_idx;
636 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
637 }
638 }
639
640 return 0;
641 }
642
643 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
644 struct nvm_rq *rqd)
645 {
646 if (!rqd->ppa_list)
647 return;
648
649 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
650 }
651
652
653 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
654 int nr_ppas, int type)
655 {
656 struct nvm_dev *dev = tgt_dev->parent;
657 struct nvm_rq rqd;
658 int ret;
659
660 if (nr_ppas > dev->ops->max_phys_sect) {
661 pr_err("nvm: unable to update all blocks atomically\n");
662 return -EINVAL;
663 }
664
665 memset(&rqd, 0, sizeof(struct nvm_rq));
666
667 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
668 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
669
670 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
671 nvm_free_rqd_ppalist(tgt_dev, &rqd);
672 if (ret) {
673 pr_err("nvm: failed bb mark\n");
674 return -EINVAL;
675 }
676
677 return 0;
678 }
679 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
680
681 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
682 {
683 struct nvm_dev *dev = tgt_dev->parent;
684
685 return dev->ops->max_phys_sect;
686 }
687 EXPORT_SYMBOL(nvm_max_phys_sects);
688
689 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
690 {
691 struct nvm_dev *dev = tgt_dev->parent;
692 int ret;
693
694 if (!dev->ops->submit_io)
695 return -ENODEV;
696
697 nvm_rq_tgt_to_dev(tgt_dev, rqd);
698
699 rqd->dev = tgt_dev;
700
701 /* In case of error, fail with right address format */
702 ret = dev->ops->submit_io(dev, rqd);
703 if (ret)
704 nvm_rq_dev_to_tgt(tgt_dev, rqd);
705 return ret;
706 }
707 EXPORT_SYMBOL(nvm_submit_io);
708
709 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
710 {
711 struct nvm_dev *dev = tgt_dev->parent;
712 int ret;
713
714 if (!dev->ops->submit_io_sync)
715 return -ENODEV;
716
717 nvm_rq_tgt_to_dev(tgt_dev, rqd);
718
719 rqd->dev = tgt_dev;
720
721 /* In case of error, fail with right address format */
722 ret = dev->ops->submit_io_sync(dev, rqd);
723 nvm_rq_dev_to_tgt(tgt_dev, rqd);
724
725 return ret;
726 }
727 EXPORT_SYMBOL(nvm_submit_io_sync);
728
729 int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
730 int nr_ppas)
731 {
732 struct nvm_geo *geo = &tgt_dev->geo;
733 struct nvm_rq rqd;
734 int ret;
735
736 memset(&rqd, 0, sizeof(struct nvm_rq));
737
738 rqd.opcode = NVM_OP_ERASE;
739 rqd.flags = geo->plane_mode >> 1;
740
741 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
742 if (ret)
743 return ret;
744
745 ret = nvm_submit_io_sync(tgt_dev, &rqd);
746 if (ret) {
747 pr_err("rrpr: erase I/O submission failed: %d\n", ret);
748 goto free_ppa_list;
749 }
750
751 free_ppa_list:
752 nvm_free_rqd_ppalist(tgt_dev, &rqd);
753
754 return ret;
755 }
756 EXPORT_SYMBOL(nvm_erase_sync);
757
758 int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
759 nvm_l2p_update_fn *update_l2p, void *priv)
760 {
761 struct nvm_dev *dev = tgt_dev->parent;
762
763 if (!dev->ops->get_l2p_tbl)
764 return 0;
765
766 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
767 }
768 EXPORT_SYMBOL(nvm_get_l2p_tbl);
769
770 int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
771 {
772 struct nvm_dev *dev = tgt_dev->parent;
773 struct nvm_geo *geo = &dev->geo;
774 struct nvm_area *area, *prev, *next;
775 sector_t begin = 0;
776 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
777
778 if (len > max_sectors)
779 return -EINVAL;
780
781 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
782 if (!area)
783 return -ENOMEM;
784
785 prev = NULL;
786
787 spin_lock(&dev->lock);
788 list_for_each_entry(next, &dev->area_list, list) {
789 if (begin + len > next->begin) {
790 begin = next->end;
791 prev = next;
792 continue;
793 }
794 break;
795 }
796
797 if ((begin + len) > max_sectors) {
798 spin_unlock(&dev->lock);
799 kfree(area);
800 return -EINVAL;
801 }
802
803 area->begin = *lba = begin;
804 area->end = begin + len;
805
806 if (prev) /* insert into sorted order */
807 list_add(&area->list, &prev->list);
808 else
809 list_add(&area->list, &dev->area_list);
810 spin_unlock(&dev->lock);
811
812 return 0;
813 }
814 EXPORT_SYMBOL(nvm_get_area);
815
816 void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
817 {
818 struct nvm_dev *dev = tgt_dev->parent;
819 struct nvm_area *area;
820
821 spin_lock(&dev->lock);
822 list_for_each_entry(area, &dev->area_list, list) {
823 if (area->begin != begin)
824 continue;
825
826 list_del(&area->list);
827 spin_unlock(&dev->lock);
828 kfree(area);
829 return;
830 }
831 spin_unlock(&dev->lock);
832 }
833 EXPORT_SYMBOL(nvm_put_area);
834
835 void nvm_end_io(struct nvm_rq *rqd)
836 {
837 struct nvm_tgt_dev *tgt_dev = rqd->dev;
838
839 /* Convert address space */
840 if (tgt_dev)
841 nvm_rq_dev_to_tgt(tgt_dev, rqd);
842
843 if (rqd->end_io)
844 rqd->end_io(rqd);
845 }
846 EXPORT_SYMBOL(nvm_end_io);
847
848 /*
849 * folds a bad block list from its plane representation to its virtual
850 * block representation. The fold is done in place and reduced size is
851 * returned.
852 *
853 * If any of the planes status are bad or grown bad block, the virtual block
854 * is marked bad. If not bad, the first plane state acts as the block state.
855 */
856 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
857 {
858 struct nvm_geo *geo = &dev->geo;
859 int blk, offset, pl, blktype;
860
861 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
862 return -EINVAL;
863
864 for (blk = 0; blk < geo->blks_per_lun; blk++) {
865 offset = blk * geo->plane_mode;
866 blktype = blks[offset];
867
868 /* Bad blocks on any planes take precedence over other types */
869 for (pl = 0; pl < geo->plane_mode; pl++) {
870 if (blks[offset + pl] &
871 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
872 blktype = blks[offset + pl];
873 break;
874 }
875 }
876
877 blks[blk] = blktype;
878 }
879
880 return geo->blks_per_lun;
881 }
882 EXPORT_SYMBOL(nvm_bb_tbl_fold);
883
884 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
885 u8 *blks)
886 {
887 struct nvm_dev *dev = tgt_dev->parent;
888
889 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
890
891 return dev->ops->get_bb_tbl(dev, ppa, blks);
892 }
893 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
894
895 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
896 {
897 struct nvm_geo *geo = &dev->geo;
898 int i;
899
900 dev->lps_per_blk = geo->pgs_per_blk;
901 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
902 if (!dev->lptbl)
903 return -ENOMEM;
904
905 /* Just a linear array */
906 for (i = 0; i < dev->lps_per_blk; i++)
907 dev->lptbl[i] = i;
908
909 return 0;
910 }
911
912 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
913 {
914 int i, p;
915 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
916
917 if (!mlc->num_pairs)
918 return 0;
919
920 dev->lps_per_blk = mlc->num_pairs;
921 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
922 if (!dev->lptbl)
923 return -ENOMEM;
924
925 /* The lower page table encoding consists of a list of bytes, where each
926 * has a lower and an upper half. The first half byte maintains the
927 * increment value and every value after is an offset added to the
928 * previous incrementation value
929 */
930 dev->lptbl[0] = mlc->pairs[0] & 0xF;
931 for (i = 1; i < dev->lps_per_blk; i++) {
932 p = mlc->pairs[i >> 1];
933 if (i & 0x1) /* upper */
934 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
935 else /* lower */
936 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
937 }
938
939 return 0;
940 }
941
942 static int nvm_core_init(struct nvm_dev *dev)
943 {
944 struct nvm_id *id = &dev->identity;
945 struct nvm_id_group *grp = &id->grp;
946 struct nvm_geo *geo = &dev->geo;
947 int ret;
948
949 /* Whole device values */
950 geo->nr_chnls = grp->num_ch;
951 geo->luns_per_chnl = grp->num_lun;
952
953 /* Generic device values */
954 geo->pgs_per_blk = grp->num_pg;
955 geo->blks_per_lun = grp->num_blk;
956 geo->nr_planes = grp->num_pln;
957 geo->fpg_size = grp->fpg_sz;
958 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
959 geo->sec_size = grp->csecs;
960 geo->oob_size = grp->sos;
961 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
962 geo->mccap = grp->mccap;
963 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
964
965 geo->plane_mode = NVM_PLANE_SINGLE;
966 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
967
968 if (grp->mpos & 0x020202)
969 geo->plane_mode = NVM_PLANE_DOUBLE;
970 if (grp->mpos & 0x040404)
971 geo->plane_mode = NVM_PLANE_QUAD;
972
973 if (grp->mtype != 0) {
974 pr_err("nvm: memory type not supported\n");
975 return -EINVAL;
976 }
977
978 /* calculated values */
979 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
980 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
981 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
982 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
983
984 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
985 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
986 sizeof(unsigned long), GFP_KERNEL);
987 if (!dev->lun_map)
988 return -ENOMEM;
989
990 switch (grp->fmtype) {
991 case NVM_ID_FMTYPE_SLC:
992 if (nvm_init_slc_tbl(dev, grp)) {
993 ret = -ENOMEM;
994 goto err_fmtype;
995 }
996 break;
997 case NVM_ID_FMTYPE_MLC:
998 if (nvm_init_mlc_tbl(dev, grp)) {
999 ret = -ENOMEM;
1000 goto err_fmtype;
1001 }
1002 break;
1003 default:
1004 pr_err("nvm: flash type not supported\n");
1005 ret = -EINVAL;
1006 goto err_fmtype;
1007 }
1008
1009 INIT_LIST_HEAD(&dev->area_list);
1010 INIT_LIST_HEAD(&dev->targets);
1011 mutex_init(&dev->mlock);
1012 spin_lock_init(&dev->lock);
1013
1014 ret = nvm_register_map(dev);
1015 if (ret)
1016 goto err_fmtype;
1017
1018 blk_queue_logical_block_size(dev->q, geo->sec_size);
1019 return 0;
1020 err_fmtype:
1021 kfree(dev->lun_map);
1022 return ret;
1023 }
1024
1025 static void nvm_free(struct nvm_dev *dev)
1026 {
1027 if (!dev)
1028 return;
1029
1030 if (dev->dma_pool)
1031 dev->ops->destroy_dma_pool(dev->dma_pool);
1032
1033 nvm_unregister_map(dev);
1034 kfree(dev->lptbl);
1035 kfree(dev->lun_map);
1036 kfree(dev);
1037 }
1038
1039 static int nvm_init(struct nvm_dev *dev)
1040 {
1041 struct nvm_geo *geo = &dev->geo;
1042 int ret = -EINVAL;
1043
1044 if (dev->ops->identity(dev, &dev->identity)) {
1045 pr_err("nvm: device could not be identified\n");
1046 goto err;
1047 }
1048
1049 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1050 dev->identity.ver_id, dev->identity.vmnt);
1051
1052 if (dev->identity.ver_id != 1) {
1053 pr_err("nvm: device not supported by kernel.");
1054 goto err;
1055 }
1056
1057 ret = nvm_core_init(dev);
1058 if (ret) {
1059 pr_err("nvm: could not initialize core structures.\n");
1060 goto err;
1061 }
1062
1063 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1064 dev->name, geo->sec_per_pg, geo->nr_planes,
1065 geo->pgs_per_blk, geo->blks_per_lun,
1066 geo->nr_luns, geo->nr_chnls);
1067 return 0;
1068 err:
1069 pr_err("nvm: failed to initialize nvm\n");
1070 return ret;
1071 }
1072
1073 struct nvm_dev *nvm_alloc_dev(int node)
1074 {
1075 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1076 }
1077 EXPORT_SYMBOL(nvm_alloc_dev);
1078
1079 int nvm_register(struct nvm_dev *dev)
1080 {
1081 int ret;
1082
1083 if (!dev->q || !dev->ops)
1084 return -EINVAL;
1085
1086 if (dev->ops->max_phys_sect > 256) {
1087 pr_info("nvm: max sectors supported is 256.\n");
1088 return -EINVAL;
1089 }
1090
1091 if (dev->ops->max_phys_sect > 1) {
1092 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1093 if (!dev->dma_pool) {
1094 pr_err("nvm: could not create dma pool\n");
1095 return -ENOMEM;
1096 }
1097 }
1098
1099 ret = nvm_init(dev);
1100 if (ret)
1101 goto err_init;
1102
1103 /* register device with a supported media manager */
1104 down_write(&nvm_lock);
1105 list_add(&dev->devices, &nvm_devices);
1106 up_write(&nvm_lock);
1107
1108 return 0;
1109 err_init:
1110 dev->ops->destroy_dma_pool(dev->dma_pool);
1111 return ret;
1112 }
1113 EXPORT_SYMBOL(nvm_register);
1114
1115 void nvm_unregister(struct nvm_dev *dev)
1116 {
1117 struct nvm_target *t, *tmp;
1118
1119 mutex_lock(&dev->mlock);
1120 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1121 if (t->dev->parent != dev)
1122 continue;
1123 __nvm_remove_target(t);
1124 }
1125 mutex_unlock(&dev->mlock);
1126
1127 down_write(&nvm_lock);
1128 list_del(&dev->devices);
1129 up_write(&nvm_lock);
1130
1131 nvm_free(dev);
1132 }
1133 EXPORT_SYMBOL(nvm_unregister);
1134
1135 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1136 {
1137 struct nvm_dev *dev;
1138 struct nvm_ioctl_create_simple *s;
1139
1140 down_write(&nvm_lock);
1141 dev = nvm_find_nvm_dev(create->dev);
1142 up_write(&nvm_lock);
1143
1144 if (!dev) {
1145 pr_err("nvm: device not found\n");
1146 return -EINVAL;
1147 }
1148
1149 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1150 pr_err("nvm: config type not valid\n");
1151 return -EINVAL;
1152 }
1153 s = &create->conf.s;
1154
1155 if (s->lun_begin == -1 && s->lun_end == -1) {
1156 s->lun_begin = 0;
1157 s->lun_end = dev->geo.nr_luns - 1;
1158 }
1159
1160 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
1161 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1162 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
1163 return -EINVAL;
1164 }
1165
1166 return nvm_create_tgt(dev, create);
1167 }
1168
1169 static long nvm_ioctl_info(struct file *file, void __user *arg)
1170 {
1171 struct nvm_ioctl_info *info;
1172 struct nvm_tgt_type *tt;
1173 int tgt_iter = 0;
1174
1175 if (!capable(CAP_SYS_ADMIN))
1176 return -EPERM;
1177
1178 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1179 if (IS_ERR(info))
1180 return -EFAULT;
1181
1182 info->version[0] = NVM_VERSION_MAJOR;
1183 info->version[1] = NVM_VERSION_MINOR;
1184 info->version[2] = NVM_VERSION_PATCH;
1185
1186 down_write(&nvm_tgtt_lock);
1187 list_for_each_entry(tt, &nvm_tgt_types, list) {
1188 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1189
1190 tgt->version[0] = tt->version[0];
1191 tgt->version[1] = tt->version[1];
1192 tgt->version[2] = tt->version[2];
1193 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1194
1195 tgt_iter++;
1196 }
1197
1198 info->tgtsize = tgt_iter;
1199 up_write(&nvm_tgtt_lock);
1200
1201 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1202 kfree(info);
1203 return -EFAULT;
1204 }
1205
1206 kfree(info);
1207 return 0;
1208 }
1209
1210 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1211 {
1212 struct nvm_ioctl_get_devices *devices;
1213 struct nvm_dev *dev;
1214 int i = 0;
1215
1216 if (!capable(CAP_SYS_ADMIN))
1217 return -EPERM;
1218
1219 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1220 if (!devices)
1221 return -ENOMEM;
1222
1223 down_write(&nvm_lock);
1224 list_for_each_entry(dev, &nvm_devices, devices) {
1225 struct nvm_ioctl_device_info *info = &devices->info[i];
1226
1227 strlcpy(info->devname, dev->name, sizeof(info->devname));
1228
1229 /* kept for compatibility */
1230 info->bmversion[0] = 1;
1231 info->bmversion[1] = 0;
1232 info->bmversion[2] = 0;
1233 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1234 i++;
1235
1236 if (i > 31) {
1237 pr_err("nvm: max 31 devices can be reported.\n");
1238 break;
1239 }
1240 }
1241 up_write(&nvm_lock);
1242
1243 devices->nr_devices = i;
1244
1245 if (copy_to_user(arg, devices,
1246 sizeof(struct nvm_ioctl_get_devices))) {
1247 kfree(devices);
1248 return -EFAULT;
1249 }
1250
1251 kfree(devices);
1252 return 0;
1253 }
1254
1255 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1256 {
1257 struct nvm_ioctl_create create;
1258
1259 if (!capable(CAP_SYS_ADMIN))
1260 return -EPERM;
1261
1262 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1263 return -EFAULT;
1264
1265 create.dev[DISK_NAME_LEN - 1] = '\0';
1266 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1267 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1268
1269 if (create.flags != 0) {
1270 __u32 flags = create.flags;
1271
1272 /* Check for valid flags */
1273 if (flags & NVM_TARGET_FACTORY)
1274 flags &= ~NVM_TARGET_FACTORY;
1275
1276 if (flags) {
1277 pr_err("nvm: flag not supported\n");
1278 return -EINVAL;
1279 }
1280 }
1281
1282 return __nvm_configure_create(&create);
1283 }
1284
1285 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1286 {
1287 struct nvm_ioctl_remove remove;
1288 struct nvm_dev *dev;
1289 int ret = 0;
1290
1291 if (!capable(CAP_SYS_ADMIN))
1292 return -EPERM;
1293
1294 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1295 return -EFAULT;
1296
1297 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1298
1299 if (remove.flags != 0) {
1300 pr_err("nvm: no flags supported\n");
1301 return -EINVAL;
1302 }
1303
1304 list_for_each_entry(dev, &nvm_devices, devices) {
1305 ret = nvm_remove_tgt(dev, &remove);
1306 if (!ret)
1307 break;
1308 }
1309
1310 return ret;
1311 }
1312
1313 /* kept for compatibility reasons */
1314 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1315 {
1316 struct nvm_ioctl_dev_init init;
1317
1318 if (!capable(CAP_SYS_ADMIN))
1319 return -EPERM;
1320
1321 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1322 return -EFAULT;
1323
1324 if (init.flags != 0) {
1325 pr_err("nvm: no flags supported\n");
1326 return -EINVAL;
1327 }
1328
1329 return 0;
1330 }
1331
1332 /* Kept for compatibility reasons */
1333 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1334 {
1335 struct nvm_ioctl_dev_factory fact;
1336
1337 if (!capable(CAP_SYS_ADMIN))
1338 return -EPERM;
1339
1340 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1341 return -EFAULT;
1342
1343 fact.dev[DISK_NAME_LEN - 1] = '\0';
1344
1345 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1346 return -EINVAL;
1347
1348 return 0;
1349 }
1350
1351 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1352 {
1353 void __user *argp = (void __user *)arg;
1354
1355 switch (cmd) {
1356 case NVM_INFO:
1357 return nvm_ioctl_info(file, argp);
1358 case NVM_GET_DEVICES:
1359 return nvm_ioctl_get_devices(file, argp);
1360 case NVM_DEV_CREATE:
1361 return nvm_ioctl_dev_create(file, argp);
1362 case NVM_DEV_REMOVE:
1363 return nvm_ioctl_dev_remove(file, argp);
1364 case NVM_DEV_INIT:
1365 return nvm_ioctl_dev_init(file, argp);
1366 case NVM_DEV_FACTORY:
1367 return nvm_ioctl_dev_factory(file, argp);
1368 }
1369 return 0;
1370 }
1371
1372 static const struct file_operations _ctl_fops = {
1373 .open = nonseekable_open,
1374 .unlocked_ioctl = nvm_ctl_ioctl,
1375 .owner = THIS_MODULE,
1376 .llseek = noop_llseek,
1377 };
1378
1379 static struct miscdevice _nvm_misc = {
1380 .minor = MISC_DYNAMIC_MINOR,
1381 .name = "lightnvm",
1382 .nodename = "lightnvm/control",
1383 .fops = &_ctl_fops,
1384 };
1385 builtin_misc_device(_nvm_misc);