]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/core.c
lightnvm: free properly on target creation error
[mirror_ubuntu-artful-kernel.git] / drivers / lightnvm / core.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
29
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_devices);
33 static DECLARE_RWSEM(nvm_lock);
34
35 /* Map between virtual and physical channel and lun */
36 struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40 };
41
42 struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45 };
46
47 struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end; /* end is excluded */
51 };
52
53 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
54 {
55 struct nvm_target *tgt;
56
57 list_for_each_entry(tgt, &dev->targets, list)
58 if (!strcmp(name, tgt->disk->disk_name))
59 return tgt;
60
61 return NULL;
62 }
63
64 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
65 {
66 int i;
67
68 for (i = lun_begin; i <= lun_end; i++) {
69 if (test_and_set_bit(i, dev->lun_map)) {
70 pr_err("nvm: lun %d already allocated\n", i);
71 goto err;
72 }
73 }
74
75 return 0;
76 err:
77 while (--i > lun_begin)
78 clear_bit(i, dev->lun_map);
79
80 return -EBUSY;
81 }
82
83 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
84 int lun_end)
85 {
86 int i;
87
88 for (i = lun_begin; i <= lun_end; i++)
89 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
90 }
91
92 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
93 {
94 struct nvm_dev *dev = tgt_dev->parent;
95 struct nvm_dev_map *dev_map = tgt_dev->map;
96 int i, j;
97
98 for (i = 0; i < dev_map->nr_chnls; i++) {
99 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
100 int *lun_offs = ch_map->lun_offs;
101 int ch = i + ch_map->ch_off;
102
103 for (j = 0; j < ch_map->nr_luns; j++) {
104 int lun = j + lun_offs[j];
105 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
106
107 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
108 }
109
110 kfree(ch_map->lun_offs);
111 }
112
113 kfree(dev_map->chnls);
114 kfree(dev_map);
115
116 kfree(tgt_dev->luns);
117 kfree(tgt_dev);
118 }
119
120 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
121 int lun_begin, int lun_end)
122 {
123 struct nvm_tgt_dev *tgt_dev = NULL;
124 struct nvm_dev_map *dev_rmap = dev->rmap;
125 struct nvm_dev_map *dev_map;
126 struct ppa_addr *luns;
127 int nr_luns = lun_end - lun_begin + 1;
128 int luns_left = nr_luns;
129 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
130 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
131 int bch = lun_begin / dev->geo.luns_per_chnl;
132 int blun = lun_begin % dev->geo.luns_per_chnl;
133 int lunid = 0;
134 int lun_balanced = 1;
135 int prev_nr_luns;
136 int i, j;
137
138 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
139 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
140
141 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
142 if (!dev_map)
143 goto err_dev;
144
145 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
146 GFP_KERNEL);
147 if (!dev_map->chnls)
148 goto err_chnls;
149
150 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
151 if (!luns)
152 goto err_luns;
153
154 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
155 dev->geo.luns_per_chnl : luns_left;
156 for (i = 0; i < nr_chnls; i++) {
157 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
158 int *lun_roffs = ch_rmap->lun_offs;
159 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
160 int *lun_offs;
161 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
162 dev->geo.luns_per_chnl : luns_left;
163
164 if (lun_balanced && prev_nr_luns != luns_in_chnl)
165 lun_balanced = 0;
166
167 ch_map->ch_off = ch_rmap->ch_off = bch;
168 ch_map->nr_luns = luns_in_chnl;
169
170 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
171 if (!lun_offs)
172 goto err_ch;
173
174 for (j = 0; j < luns_in_chnl; j++) {
175 luns[lunid].ppa = 0;
176 luns[lunid].g.ch = i;
177 luns[lunid++].g.lun = j;
178
179 lun_offs[j] = blun;
180 lun_roffs[j + blun] = blun;
181 }
182
183 ch_map->lun_offs = lun_offs;
184
185 /* when starting a new channel, lun offset is reset */
186 blun = 0;
187 luns_left -= luns_in_chnl;
188 }
189
190 dev_map->nr_chnls = nr_chnls;
191
192 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
193 if (!tgt_dev)
194 goto err_ch;
195
196 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
197 /* Target device only owns a portion of the physical device */
198 tgt_dev->geo.nr_chnls = nr_chnls;
199 tgt_dev->geo.nr_luns = nr_luns;
200 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
201 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
202 tgt_dev->q = dev->q;
203 tgt_dev->map = dev_map;
204 tgt_dev->luns = luns;
205 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
206
207 tgt_dev->parent = dev;
208
209 return tgt_dev;
210 err_ch:
211 while (--i > 0)
212 kfree(dev_map->chnls[i].lun_offs);
213 kfree(luns);
214 err_luns:
215 kfree(dev_map->chnls);
216 err_chnls:
217 kfree(dev_map);
218 err_dev:
219 return tgt_dev;
220 }
221
222 static const struct block_device_operations nvm_fops = {
223 .owner = THIS_MODULE,
224 };
225
226 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
227 {
228 struct nvm_ioctl_create_simple *s = &create->conf.s;
229 struct request_queue *tqueue;
230 struct gendisk *tdisk;
231 struct nvm_tgt_type *tt;
232 struct nvm_target *t;
233 struct nvm_tgt_dev *tgt_dev;
234 void *targetdata;
235
236 tt = nvm_find_target_type(create->tgttype, 1);
237 if (!tt) {
238 pr_err("nvm: target type %s not found\n", create->tgttype);
239 return -EINVAL;
240 }
241
242 mutex_lock(&dev->mlock);
243 t = nvm_find_target(dev, create->tgtname);
244 if (t) {
245 pr_err("nvm: target name already exists.\n");
246 mutex_unlock(&dev->mlock);
247 return -EINVAL;
248 }
249 mutex_unlock(&dev->mlock);
250
251 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
252 return -ENOMEM;
253
254 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
255 if (!t)
256 goto err_reserve;
257
258 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
259 if (!tgt_dev) {
260 pr_err("nvm: could not create target device\n");
261 goto err_t;
262 }
263
264 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
265 if (!tqueue)
266 goto err_dev;
267 blk_queue_make_request(tqueue, tt->make_rq);
268
269 tdisk = alloc_disk(0);
270 if (!tdisk)
271 goto err_queue;
272
273 sprintf(tdisk->disk_name, "%s", create->tgtname);
274 tdisk->flags = GENHD_FL_EXT_DEVT;
275 tdisk->major = 0;
276 tdisk->first_minor = 0;
277 tdisk->fops = &nvm_fops;
278 tdisk->queue = tqueue;
279
280 targetdata = tt->init(tgt_dev, tdisk);
281 if (IS_ERR(targetdata))
282 goto err_init;
283
284 tdisk->private_data = targetdata;
285 tqueue->queuedata = targetdata;
286
287 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
288
289 set_capacity(tdisk, tt->capacity(targetdata));
290 add_disk(tdisk);
291
292 t->type = tt;
293 t->disk = tdisk;
294 t->dev = tgt_dev;
295
296 mutex_lock(&dev->mlock);
297 list_add_tail(&t->list, &dev->targets);
298 mutex_unlock(&dev->mlock);
299
300 return 0;
301 err_init:
302 put_disk(tdisk);
303 err_queue:
304 blk_cleanup_queue(tqueue);
305 err_dev:
306 nvm_remove_tgt_dev(tgt_dev);
307 err_t:
308 kfree(t);
309 err_reserve:
310 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
311 return -ENOMEM;
312 }
313
314 static void __nvm_remove_target(struct nvm_target *t)
315 {
316 struct nvm_tgt_type *tt = t->type;
317 struct gendisk *tdisk = t->disk;
318 struct request_queue *q = tdisk->queue;
319
320 del_gendisk(tdisk);
321 blk_cleanup_queue(q);
322
323 if (tt->exit)
324 tt->exit(tdisk->private_data);
325
326 nvm_remove_tgt_dev(t->dev);
327 put_disk(tdisk);
328
329 list_del(&t->list);
330 kfree(t);
331 }
332
333 /**
334 * nvm_remove_tgt - Removes a target from the media manager
335 * @dev: device
336 * @remove: ioctl structure with target name to remove.
337 *
338 * Returns:
339 * 0: on success
340 * 1: on not found
341 * <0: on error
342 */
343 static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
344 {
345 struct nvm_target *t;
346
347 mutex_lock(&dev->mlock);
348 t = nvm_find_target(dev, remove->tgtname);
349 if (!t) {
350 mutex_unlock(&dev->mlock);
351 return 1;
352 }
353 __nvm_remove_target(t);
354 mutex_unlock(&dev->mlock);
355
356 return 0;
357 }
358
359 static int nvm_register_map(struct nvm_dev *dev)
360 {
361 struct nvm_dev_map *rmap;
362 int i, j;
363
364 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
365 if (!rmap)
366 goto err_rmap;
367
368 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
369 GFP_KERNEL);
370 if (!rmap->chnls)
371 goto err_chnls;
372
373 for (i = 0; i < dev->geo.nr_chnls; i++) {
374 struct nvm_ch_map *ch_rmap;
375 int *lun_roffs;
376 int luns_in_chnl = dev->geo.luns_per_chnl;
377
378 ch_rmap = &rmap->chnls[i];
379
380 ch_rmap->ch_off = -1;
381 ch_rmap->nr_luns = luns_in_chnl;
382
383 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
384 if (!lun_roffs)
385 goto err_ch;
386
387 for (j = 0; j < luns_in_chnl; j++)
388 lun_roffs[j] = -1;
389
390 ch_rmap->lun_offs = lun_roffs;
391 }
392
393 dev->rmap = rmap;
394
395 return 0;
396 err_ch:
397 while (--i >= 0)
398 kfree(rmap->chnls[i].lun_offs);
399 err_chnls:
400 kfree(rmap);
401 err_rmap:
402 return -ENOMEM;
403 }
404
405 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
406 {
407 struct nvm_dev_map *dev_map = tgt_dev->map;
408 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
409 int lun_off = ch_map->lun_offs[p->g.lun];
410
411 p->g.ch += ch_map->ch_off;
412 p->g.lun += lun_off;
413 }
414
415 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
416 {
417 struct nvm_dev *dev = tgt_dev->parent;
418 struct nvm_dev_map *dev_rmap = dev->rmap;
419 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
420 int lun_roff = ch_rmap->lun_offs[p->g.lun];
421
422 p->g.ch -= ch_rmap->ch_off;
423 p->g.lun -= lun_roff;
424 }
425
426 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
427 struct ppa_addr *ppa_list, int nr_ppas)
428 {
429 int i;
430
431 for (i = 0; i < nr_ppas; i++) {
432 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
433 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
434 }
435 }
436
437 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
438 struct ppa_addr *ppa_list, int nr_ppas)
439 {
440 int i;
441
442 for (i = 0; i < nr_ppas; i++) {
443 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
444 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
445 }
446 }
447
448 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
449 {
450 if (rqd->nr_ppas == 1) {
451 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
452 return;
453 }
454
455 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
456 }
457
458 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
459 {
460 if (rqd->nr_ppas == 1) {
461 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
462 return;
463 }
464
465 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
466 }
467
468 void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
469 int len)
470 {
471 struct nvm_geo *geo = &dev->geo;
472 struct nvm_dev_map *dev_rmap = dev->rmap;
473 u64 i;
474
475 for (i = 0; i < len; i++) {
476 struct nvm_ch_map *ch_rmap;
477 int *lun_roffs;
478 struct ppa_addr gaddr;
479 u64 pba = le64_to_cpu(entries[i]);
480 int off;
481 u64 diff;
482
483 if (!pba)
484 continue;
485
486 gaddr = linear_to_generic_addr(geo, pba);
487 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
488 lun_roffs = ch_rmap->lun_offs;
489
490 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
491
492 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
493 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
494
495 entries[i] -= cpu_to_le64(diff);
496 }
497 }
498 EXPORT_SYMBOL(nvm_part_to_tgt);
499
500 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
501 {
502 struct nvm_tgt_type *tmp, *tt = NULL;
503
504 if (lock)
505 down_write(&nvm_tgtt_lock);
506
507 list_for_each_entry(tmp, &nvm_tgt_types, list)
508 if (!strcmp(name, tmp->name)) {
509 tt = tmp;
510 break;
511 }
512
513 if (lock)
514 up_write(&nvm_tgtt_lock);
515 return tt;
516 }
517 EXPORT_SYMBOL(nvm_find_target_type);
518
519 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
520 {
521 int ret = 0;
522
523 down_write(&nvm_tgtt_lock);
524 if (nvm_find_target_type(tt->name, 0))
525 ret = -EEXIST;
526 else
527 list_add(&tt->list, &nvm_tgt_types);
528 up_write(&nvm_tgtt_lock);
529
530 return ret;
531 }
532 EXPORT_SYMBOL(nvm_register_tgt_type);
533
534 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
535 {
536 if (!tt)
537 return;
538
539 down_write(&nvm_lock);
540 list_del(&tt->list);
541 up_write(&nvm_lock);
542 }
543 EXPORT_SYMBOL(nvm_unregister_tgt_type);
544
545 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
546 dma_addr_t *dma_handler)
547 {
548 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
549 dma_handler);
550 }
551 EXPORT_SYMBOL(nvm_dev_dma_alloc);
552
553 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
554 {
555 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
556 }
557 EXPORT_SYMBOL(nvm_dev_dma_free);
558
559 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
560 {
561 struct nvm_dev *dev;
562
563 list_for_each_entry(dev, &nvm_devices, devices)
564 if (!strcmp(name, dev->name))
565 return dev;
566
567 return NULL;
568 }
569
570 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
571 int nr_ppas, int type)
572 {
573 struct nvm_dev *dev = tgt_dev->parent;
574 struct nvm_rq rqd;
575 int ret;
576
577 if (nr_ppas > dev->ops->max_phys_sect) {
578 pr_err("nvm: unable to update all blocks atomically\n");
579 return -EINVAL;
580 }
581
582 memset(&rqd, 0, sizeof(struct nvm_rq));
583
584 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
585 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
586
587 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
588 nvm_free_rqd_ppalist(dev, &rqd);
589 if (ret) {
590 pr_err("nvm: failed bb mark\n");
591 return -EINVAL;
592 }
593
594 return 0;
595 }
596 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
597
598 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
599 {
600 struct nvm_dev *dev = tgt_dev->parent;
601
602 return dev->ops->max_phys_sect;
603 }
604 EXPORT_SYMBOL(nvm_max_phys_sects);
605
606 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
607 {
608 struct nvm_dev *dev = tgt_dev->parent;
609
610 if (!dev->ops->submit_io)
611 return -ENODEV;
612
613 nvm_rq_tgt_to_dev(tgt_dev, rqd);
614
615 rqd->dev = tgt_dev;
616 return dev->ops->submit_io(dev, rqd);
617 }
618 EXPORT_SYMBOL(nvm_submit_io);
619
620 int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
621 {
622 struct nvm_dev *dev = tgt_dev->parent;
623 struct nvm_rq rqd;
624 int ret;
625
626 if (!dev->ops->erase_block)
627 return 0;
628
629 nvm_map_to_dev(tgt_dev, ppas);
630
631 memset(&rqd, 0, sizeof(struct nvm_rq));
632
633 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
634 if (ret)
635 return ret;
636
637 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
638
639 rqd.flags = flags;
640
641 ret = dev->ops->erase_block(dev, &rqd);
642
643 nvm_free_rqd_ppalist(dev, &rqd);
644
645 return ret;
646 }
647 EXPORT_SYMBOL(nvm_erase_blk);
648
649 int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
650 nvm_l2p_update_fn *update_l2p, void *priv)
651 {
652 struct nvm_dev *dev = tgt_dev->parent;
653
654 if (!dev->ops->get_l2p_tbl)
655 return 0;
656
657 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
658 }
659 EXPORT_SYMBOL(nvm_get_l2p_tbl);
660
661 int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
662 {
663 struct nvm_dev *dev = tgt_dev->parent;
664 struct nvm_geo *geo = &dev->geo;
665 struct nvm_area *area, *prev, *next;
666 sector_t begin = 0;
667 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
668
669 if (len > max_sectors)
670 return -EINVAL;
671
672 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
673 if (!area)
674 return -ENOMEM;
675
676 prev = NULL;
677
678 spin_lock(&dev->lock);
679 list_for_each_entry(next, &dev->area_list, list) {
680 if (begin + len > next->begin) {
681 begin = next->end;
682 prev = next;
683 continue;
684 }
685 break;
686 }
687
688 if ((begin + len) > max_sectors) {
689 spin_unlock(&dev->lock);
690 kfree(area);
691 return -EINVAL;
692 }
693
694 area->begin = *lba = begin;
695 area->end = begin + len;
696
697 if (prev) /* insert into sorted order */
698 list_add(&area->list, &prev->list);
699 else
700 list_add(&area->list, &dev->area_list);
701 spin_unlock(&dev->lock);
702
703 return 0;
704 }
705 EXPORT_SYMBOL(nvm_get_area);
706
707 void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
708 {
709 struct nvm_dev *dev = tgt_dev->parent;
710 struct nvm_area *area;
711
712 spin_lock(&dev->lock);
713 list_for_each_entry(area, &dev->area_list, list) {
714 if (area->begin != begin)
715 continue;
716
717 list_del(&area->list);
718 spin_unlock(&dev->lock);
719 kfree(area);
720 return;
721 }
722 spin_unlock(&dev->lock);
723 }
724 EXPORT_SYMBOL(nvm_put_area);
725
726 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
727 const struct ppa_addr *ppas, int nr_ppas, int vblk)
728 {
729 struct nvm_geo *geo = &dev->geo;
730 int i, plane_cnt, pl_idx;
731 struct ppa_addr ppa;
732
733 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
734 rqd->nr_ppas = nr_ppas;
735 rqd->ppa_addr = ppas[0];
736
737 return 0;
738 }
739
740 rqd->nr_ppas = nr_ppas;
741 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
742 if (!rqd->ppa_list) {
743 pr_err("nvm: failed to allocate dma memory\n");
744 return -ENOMEM;
745 }
746
747 if (!vblk) {
748 for (i = 0; i < nr_ppas; i++)
749 rqd->ppa_list[i] = ppas[i];
750 } else {
751 plane_cnt = geo->plane_mode;
752 rqd->nr_ppas *= plane_cnt;
753
754 for (i = 0; i < nr_ppas; i++) {
755 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
756 ppa = ppas[i];
757 ppa.g.pl = pl_idx;
758 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
759 }
760 }
761 }
762
763 return 0;
764 }
765 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
766
767 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
768 {
769 if (!rqd->ppa_list)
770 return;
771
772 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
773 }
774 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
775
776 void nvm_end_io(struct nvm_rq *rqd)
777 {
778 struct nvm_tgt_dev *tgt_dev = rqd->dev;
779
780 /* Convert address space */
781 if (tgt_dev)
782 nvm_rq_dev_to_tgt(tgt_dev, rqd);
783
784 if (rqd->end_io)
785 rqd->end_io(rqd);
786 }
787 EXPORT_SYMBOL(nvm_end_io);
788
789 /*
790 * folds a bad block list from its plane representation to its virtual
791 * block representation. The fold is done in place and reduced size is
792 * returned.
793 *
794 * If any of the planes status are bad or grown bad block, the virtual block
795 * is marked bad. If not bad, the first plane state acts as the block state.
796 */
797 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
798 {
799 struct nvm_geo *geo = &dev->geo;
800 int blk, offset, pl, blktype;
801
802 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
803 return -EINVAL;
804
805 for (blk = 0; blk < geo->blks_per_lun; blk++) {
806 offset = blk * geo->plane_mode;
807 blktype = blks[offset];
808
809 /* Bad blocks on any planes take precedence over other types */
810 for (pl = 0; pl < geo->plane_mode; pl++) {
811 if (blks[offset + pl] &
812 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
813 blktype = blks[offset + pl];
814 break;
815 }
816 }
817
818 blks[blk] = blktype;
819 }
820
821 return geo->blks_per_lun;
822 }
823 EXPORT_SYMBOL(nvm_bb_tbl_fold);
824
825 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
826 u8 *blks)
827 {
828 struct nvm_dev *dev = tgt_dev->parent;
829
830 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
831
832 return dev->ops->get_bb_tbl(dev, ppa, blks);
833 }
834 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
835
836 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
837 {
838 struct nvm_geo *geo = &dev->geo;
839 int i;
840
841 dev->lps_per_blk = geo->pgs_per_blk;
842 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
843 if (!dev->lptbl)
844 return -ENOMEM;
845
846 /* Just a linear array */
847 for (i = 0; i < dev->lps_per_blk; i++)
848 dev->lptbl[i] = i;
849
850 return 0;
851 }
852
853 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
854 {
855 int i, p;
856 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
857
858 if (!mlc->num_pairs)
859 return 0;
860
861 dev->lps_per_blk = mlc->num_pairs;
862 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
863 if (!dev->lptbl)
864 return -ENOMEM;
865
866 /* The lower page table encoding consists of a list of bytes, where each
867 * has a lower and an upper half. The first half byte maintains the
868 * increment value and every value after is an offset added to the
869 * previous incrementation value
870 */
871 dev->lptbl[0] = mlc->pairs[0] & 0xF;
872 for (i = 1; i < dev->lps_per_blk; i++) {
873 p = mlc->pairs[i >> 1];
874 if (i & 0x1) /* upper */
875 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
876 else /* lower */
877 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
878 }
879
880 return 0;
881 }
882
883 static int nvm_core_init(struct nvm_dev *dev)
884 {
885 struct nvm_id *id = &dev->identity;
886 struct nvm_id_group *grp = &id->grp;
887 struct nvm_geo *geo = &dev->geo;
888 int ret;
889
890 /* Whole device values */
891 geo->nr_chnls = grp->num_ch;
892 geo->luns_per_chnl = grp->num_lun;
893
894 /* Generic device values */
895 geo->pgs_per_blk = grp->num_pg;
896 geo->blks_per_lun = grp->num_blk;
897 geo->nr_planes = grp->num_pln;
898 geo->fpg_size = grp->fpg_sz;
899 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
900 geo->sec_size = grp->csecs;
901 geo->oob_size = grp->sos;
902 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
903 geo->mccap = grp->mccap;
904 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
905
906 geo->plane_mode = NVM_PLANE_SINGLE;
907 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
908
909 if (grp->mpos & 0x020202)
910 geo->plane_mode = NVM_PLANE_DOUBLE;
911 if (grp->mpos & 0x040404)
912 geo->plane_mode = NVM_PLANE_QUAD;
913
914 if (grp->mtype != 0) {
915 pr_err("nvm: memory type not supported\n");
916 return -EINVAL;
917 }
918
919 /* calculated values */
920 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
921 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
922 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
923 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
924
925 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
926 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
927 sizeof(unsigned long), GFP_KERNEL);
928 if (!dev->lun_map)
929 return -ENOMEM;
930
931 switch (grp->fmtype) {
932 case NVM_ID_FMTYPE_SLC:
933 if (nvm_init_slc_tbl(dev, grp)) {
934 ret = -ENOMEM;
935 goto err_fmtype;
936 }
937 break;
938 case NVM_ID_FMTYPE_MLC:
939 if (nvm_init_mlc_tbl(dev, grp)) {
940 ret = -ENOMEM;
941 goto err_fmtype;
942 }
943 break;
944 default:
945 pr_err("nvm: flash type not supported\n");
946 ret = -EINVAL;
947 goto err_fmtype;
948 }
949
950 INIT_LIST_HEAD(&dev->area_list);
951 INIT_LIST_HEAD(&dev->targets);
952 mutex_init(&dev->mlock);
953 spin_lock_init(&dev->lock);
954
955 ret = nvm_register_map(dev);
956 if (ret)
957 goto err_fmtype;
958
959 blk_queue_logical_block_size(dev->q, geo->sec_size);
960 return 0;
961 err_fmtype:
962 kfree(dev->lun_map);
963 return ret;
964 }
965
966 void nvm_free(struct nvm_dev *dev)
967 {
968 if (!dev)
969 return;
970
971 if (dev->dma_pool)
972 dev->ops->destroy_dma_pool(dev->dma_pool);
973
974 kfree(dev->rmap);
975 kfree(dev->lptbl);
976 kfree(dev->lun_map);
977 kfree(dev);
978 }
979
980 static int nvm_init(struct nvm_dev *dev)
981 {
982 struct nvm_geo *geo = &dev->geo;
983 int ret = -EINVAL;
984
985 if (dev->ops->identity(dev, &dev->identity)) {
986 pr_err("nvm: device could not be identified\n");
987 goto err;
988 }
989
990 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
991 dev->identity.ver_id, dev->identity.vmnt);
992
993 if (dev->identity.ver_id != 1) {
994 pr_err("nvm: device not supported by kernel.");
995 goto err;
996 }
997
998 ret = nvm_core_init(dev);
999 if (ret) {
1000 pr_err("nvm: could not initialize core structures.\n");
1001 goto err;
1002 }
1003
1004 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1005 dev->name, geo->sec_per_pg, geo->nr_planes,
1006 geo->pgs_per_blk, geo->blks_per_lun,
1007 geo->nr_luns, geo->nr_chnls);
1008 return 0;
1009 err:
1010 pr_err("nvm: failed to initialize nvm\n");
1011 return ret;
1012 }
1013
1014 struct nvm_dev *nvm_alloc_dev(int node)
1015 {
1016 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1017 }
1018 EXPORT_SYMBOL(nvm_alloc_dev);
1019
1020 int nvm_register(struct nvm_dev *dev)
1021 {
1022 int ret;
1023
1024 if (!dev->q || !dev->ops)
1025 return -EINVAL;
1026
1027 if (dev->ops->max_phys_sect > 256) {
1028 pr_info("nvm: max sectors supported is 256.\n");
1029 return -EINVAL;
1030 }
1031
1032 if (dev->ops->max_phys_sect > 1) {
1033 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1034 if (!dev->dma_pool) {
1035 pr_err("nvm: could not create dma pool\n");
1036 return -ENOMEM;
1037 }
1038 }
1039
1040 ret = nvm_init(dev);
1041 if (ret)
1042 goto err_init;
1043
1044 /* register device with a supported media manager */
1045 down_write(&nvm_lock);
1046 list_add(&dev->devices, &nvm_devices);
1047 up_write(&nvm_lock);
1048
1049 return 0;
1050 err_init:
1051 dev->ops->destroy_dma_pool(dev->dma_pool);
1052 return ret;
1053 }
1054 EXPORT_SYMBOL(nvm_register);
1055
1056 void nvm_unregister(struct nvm_dev *dev)
1057 {
1058 struct nvm_target *t, *tmp;
1059
1060 mutex_lock(&dev->mlock);
1061 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1062 if (t->dev->parent != dev)
1063 continue;
1064 __nvm_remove_target(t);
1065 }
1066 mutex_unlock(&dev->mlock);
1067
1068 down_write(&nvm_lock);
1069 list_del(&dev->devices);
1070 up_write(&nvm_lock);
1071
1072 nvm_free(dev);
1073 }
1074 EXPORT_SYMBOL(nvm_unregister);
1075
1076 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1077 {
1078 struct nvm_dev *dev;
1079 struct nvm_ioctl_create_simple *s;
1080
1081 down_write(&nvm_lock);
1082 dev = nvm_find_nvm_dev(create->dev);
1083 up_write(&nvm_lock);
1084
1085 if (!dev) {
1086 pr_err("nvm: device not found\n");
1087 return -EINVAL;
1088 }
1089
1090 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1091 pr_err("nvm: config type not valid\n");
1092 return -EINVAL;
1093 }
1094 s = &create->conf.s;
1095
1096 if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
1097 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1098 s->lun_begin, s->lun_end, dev->geo.nr_luns);
1099 return -EINVAL;
1100 }
1101
1102 return nvm_create_tgt(dev, create);
1103 }
1104
1105 static long nvm_ioctl_info(struct file *file, void __user *arg)
1106 {
1107 struct nvm_ioctl_info *info;
1108 struct nvm_tgt_type *tt;
1109 int tgt_iter = 0;
1110
1111 if (!capable(CAP_SYS_ADMIN))
1112 return -EPERM;
1113
1114 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1115 if (IS_ERR(info))
1116 return -EFAULT;
1117
1118 info->version[0] = NVM_VERSION_MAJOR;
1119 info->version[1] = NVM_VERSION_MINOR;
1120 info->version[2] = NVM_VERSION_PATCH;
1121
1122 down_write(&nvm_lock);
1123 list_for_each_entry(tt, &nvm_tgt_types, list) {
1124 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1125
1126 tgt->version[0] = tt->version[0];
1127 tgt->version[1] = tt->version[1];
1128 tgt->version[2] = tt->version[2];
1129 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1130
1131 tgt_iter++;
1132 }
1133
1134 info->tgtsize = tgt_iter;
1135 up_write(&nvm_lock);
1136
1137 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1138 kfree(info);
1139 return -EFAULT;
1140 }
1141
1142 kfree(info);
1143 return 0;
1144 }
1145
1146 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1147 {
1148 struct nvm_ioctl_get_devices *devices;
1149 struct nvm_dev *dev;
1150 int i = 0;
1151
1152 if (!capable(CAP_SYS_ADMIN))
1153 return -EPERM;
1154
1155 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1156 if (!devices)
1157 return -ENOMEM;
1158
1159 down_write(&nvm_lock);
1160 list_for_each_entry(dev, &nvm_devices, devices) {
1161 struct nvm_ioctl_device_info *info = &devices->info[i];
1162
1163 sprintf(info->devname, "%s", dev->name);
1164
1165 /* kept for compatibility */
1166 info->bmversion[0] = 1;
1167 info->bmversion[1] = 0;
1168 info->bmversion[2] = 0;
1169 sprintf(info->bmname, "%s", "gennvm");
1170 i++;
1171
1172 if (i > 31) {
1173 pr_err("nvm: max 31 devices can be reported.\n");
1174 break;
1175 }
1176 }
1177 up_write(&nvm_lock);
1178
1179 devices->nr_devices = i;
1180
1181 if (copy_to_user(arg, devices,
1182 sizeof(struct nvm_ioctl_get_devices))) {
1183 kfree(devices);
1184 return -EFAULT;
1185 }
1186
1187 kfree(devices);
1188 return 0;
1189 }
1190
1191 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1192 {
1193 struct nvm_ioctl_create create;
1194
1195 if (!capable(CAP_SYS_ADMIN))
1196 return -EPERM;
1197
1198 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1199 return -EFAULT;
1200
1201 create.dev[DISK_NAME_LEN - 1] = '\0';
1202 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1203 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1204
1205 if (create.flags != 0) {
1206 pr_err("nvm: no flags supported\n");
1207 return -EINVAL;
1208 }
1209
1210 return __nvm_configure_create(&create);
1211 }
1212
1213 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1214 {
1215 struct nvm_ioctl_remove remove;
1216 struct nvm_dev *dev;
1217 int ret = 0;
1218
1219 if (!capable(CAP_SYS_ADMIN))
1220 return -EPERM;
1221
1222 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1223 return -EFAULT;
1224
1225 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1226
1227 if (remove.flags != 0) {
1228 pr_err("nvm: no flags supported\n");
1229 return -EINVAL;
1230 }
1231
1232 list_for_each_entry(dev, &nvm_devices, devices) {
1233 ret = nvm_remove_tgt(dev, &remove);
1234 if (!ret)
1235 break;
1236 }
1237
1238 return ret;
1239 }
1240
1241 /* kept for compatibility reasons */
1242 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1243 {
1244 struct nvm_ioctl_dev_init init;
1245
1246 if (!capable(CAP_SYS_ADMIN))
1247 return -EPERM;
1248
1249 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1250 return -EFAULT;
1251
1252 if (init.flags != 0) {
1253 pr_err("nvm: no flags supported\n");
1254 return -EINVAL;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /* Kept for compatibility reasons */
1261 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1262 {
1263 struct nvm_ioctl_dev_factory fact;
1264
1265 if (!capable(CAP_SYS_ADMIN))
1266 return -EPERM;
1267
1268 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1269 return -EFAULT;
1270
1271 fact.dev[DISK_NAME_LEN - 1] = '\0';
1272
1273 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1274 return -EINVAL;
1275
1276 return 0;
1277 }
1278
1279 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1280 {
1281 void __user *argp = (void __user *)arg;
1282
1283 switch (cmd) {
1284 case NVM_INFO:
1285 return nvm_ioctl_info(file, argp);
1286 case NVM_GET_DEVICES:
1287 return nvm_ioctl_get_devices(file, argp);
1288 case NVM_DEV_CREATE:
1289 return nvm_ioctl_dev_create(file, argp);
1290 case NVM_DEV_REMOVE:
1291 return nvm_ioctl_dev_remove(file, argp);
1292 case NVM_DEV_INIT:
1293 return nvm_ioctl_dev_init(file, argp);
1294 case NVM_DEV_FACTORY:
1295 return nvm_ioctl_dev_factory(file, argp);
1296 }
1297 return 0;
1298 }
1299
1300 static const struct file_operations _ctl_fops = {
1301 .open = nonseekable_open,
1302 .unlocked_ioctl = nvm_ctl_ioctl,
1303 .owner = THIS_MODULE,
1304 .llseek = noop_llseek,
1305 };
1306
1307 static struct miscdevice _nvm_misc = {
1308 .minor = MISC_DYNAMIC_MINOR,
1309 .name = "lightnvm",
1310 .nodename = "lightnvm/control",
1311 .fops = &_ctl_fops,
1312 };
1313 builtin_misc_device(_nvm_misc);