]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/lightnvm/core.c
netfilter: conntrack: rename nf_ct_iterate_cleanup
[mirror_ubuntu-artful-kernel.git] / drivers / lightnvm / core.c
CommitLineData
cd9e9808
MB
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
cd9e9808
MB
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
389b2a1c 25#include <linux/moduleparam.h>
cd9e9808
MB
26#include <linux/miscdevice.h>
27#include <linux/lightnvm.h>
91276162 28#include <linux/sched/sysctl.h>
cd9e9808 29
6063fe39 30static LIST_HEAD(nvm_tgt_types);
5cd90785 31static DECLARE_RWSEM(nvm_tgtt_lock);
cd9e9808
MB
32static LIST_HEAD(nvm_devices);
33static DECLARE_RWSEM(nvm_lock);
34
ade69e24
MB
35/* Map between virtual and physical channel and lun */
36struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40};
41
42struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45};
46
47struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end; /* end is excluded */
51};
52
ade69e24
MB
53static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
54{
55 struct nvm_target *tgt;
56
57 list_for_each_entry(tgt, &dev->targets, list)
58 if (!strcmp(name, tgt->disk->disk_name))
59 return tgt;
60
61 return NULL;
62}
63
64static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
65{
66 int i;
67
68 for (i = lun_begin; i <= lun_end; i++) {
69 if (test_and_set_bit(i, dev->lun_map)) {
70 pr_err("nvm: lun %d already allocated\n", i);
71 goto err;
72 }
73 }
74
75 return 0;
76err:
507f7d68 77 while (--i >= lun_begin)
ade69e24
MB
78 clear_bit(i, dev->lun_map);
79
80 return -EBUSY;
81}
82
83static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
84 int lun_end)
85{
86 int i;
87
88 for (i = lun_begin; i <= lun_end; i++)
89 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
90}
91
edee1bdd 92static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
ade69e24
MB
93{
94 struct nvm_dev *dev = tgt_dev->parent;
95 struct nvm_dev_map *dev_map = tgt_dev->map;
96 int i, j;
97
98 for (i = 0; i < dev_map->nr_chnls; i++) {
99 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
100 int *lun_offs = ch_map->lun_offs;
101 int ch = i + ch_map->ch_off;
102
edee1bdd
JG
103 if (clear) {
104 for (j = 0; j < ch_map->nr_luns; j++) {
105 int lun = j + lun_offs[j];
106 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
ade69e24 107
edee1bdd
JG
108 WARN_ON(!test_and_clear_bit(lunid,
109 dev->lun_map));
110 }
ade69e24
MB
111 }
112
113 kfree(ch_map->lun_offs);
114 }
115
116 kfree(dev_map->chnls);
117 kfree(dev_map);
118
119 kfree(tgt_dev->luns);
120 kfree(tgt_dev);
121}
122
123static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
124 int lun_begin, int lun_end)
125{
126 struct nvm_tgt_dev *tgt_dev = NULL;
127 struct nvm_dev_map *dev_rmap = dev->rmap;
128 struct nvm_dev_map *dev_map;
129 struct ppa_addr *luns;
130 int nr_luns = lun_end - lun_begin + 1;
131 int luns_left = nr_luns;
132 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
133 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
134 int bch = lun_begin / dev->geo.luns_per_chnl;
135 int blun = lun_begin % dev->geo.luns_per_chnl;
136 int lunid = 0;
137 int lun_balanced = 1;
138 int prev_nr_luns;
139 int i, j;
140
141 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
142 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
143
144 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
145 if (!dev_map)
146 goto err_dev;
147
148 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
149 GFP_KERNEL);
150 if (!dev_map->chnls)
151 goto err_chnls;
152
153 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
154 if (!luns)
155 goto err_luns;
156
157 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
158 dev->geo.luns_per_chnl : luns_left;
159 for (i = 0; i < nr_chnls; i++) {
160 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
161 int *lun_roffs = ch_rmap->lun_offs;
162 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
163 int *lun_offs;
164 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
165 dev->geo.luns_per_chnl : luns_left;
166
167 if (lun_balanced && prev_nr_luns != luns_in_chnl)
168 lun_balanced = 0;
169
170 ch_map->ch_off = ch_rmap->ch_off = bch;
171 ch_map->nr_luns = luns_in_chnl;
172
173 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
174 if (!lun_offs)
175 goto err_ch;
176
177 for (j = 0; j < luns_in_chnl; j++) {
178 luns[lunid].ppa = 0;
179 luns[lunid].g.ch = i;
180 luns[lunid++].g.lun = j;
181
182 lun_offs[j] = blun;
183 lun_roffs[j + blun] = blun;
184 }
185
186 ch_map->lun_offs = lun_offs;
187
188 /* when starting a new channel, lun offset is reset */
189 blun = 0;
190 luns_left -= luns_in_chnl;
191 }
192
193 dev_map->nr_chnls = nr_chnls;
194
195 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
196 if (!tgt_dev)
197 goto err_ch;
198
199 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
200 /* Target device only owns a portion of the physical device */
201 tgt_dev->geo.nr_chnls = nr_chnls;
202 tgt_dev->geo.nr_luns = nr_luns;
203 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
204 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
205 tgt_dev->q = dev->q;
206 tgt_dev->map = dev_map;
207 tgt_dev->luns = luns;
208 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
209
210 tgt_dev->parent = dev;
211
212 return tgt_dev;
213err_ch:
507f7d68 214 while (--i >= 0)
ade69e24
MB
215 kfree(dev_map->chnls[i].lun_offs);
216 kfree(luns);
217err_luns:
218 kfree(dev_map->chnls);
219err_chnls:
220 kfree(dev_map);
221err_dev:
222 return tgt_dev;
223}
224
225static const struct block_device_operations nvm_fops = {
226 .owner = THIS_MODULE,
227};
228
229static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
230{
231 struct nvm_ioctl_create_simple *s = &create->conf.s;
232 struct request_queue *tqueue;
233 struct gendisk *tdisk;
234 struct nvm_tgt_type *tt;
235 struct nvm_target *t;
236 struct nvm_tgt_dev *tgt_dev;
237 void *targetdata;
8d77bb82 238 int ret;
ade69e24
MB
239
240 tt = nvm_find_target_type(create->tgttype, 1);
241 if (!tt) {
242 pr_err("nvm: target type %s not found\n", create->tgttype);
243 return -EINVAL;
244 }
245
246 mutex_lock(&dev->mlock);
247 t = nvm_find_target(dev, create->tgtname);
248 if (t) {
249 pr_err("nvm: target name already exists.\n");
250 mutex_unlock(&dev->mlock);
251 return -EINVAL;
252 }
253 mutex_unlock(&dev->mlock);
254
255 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
256 return -ENOMEM;
257
258 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
8d77bb82
RP
259 if (!t) {
260 ret = -ENOMEM;
ade69e24 261 goto err_reserve;
8d77bb82 262 }
ade69e24
MB
263
264 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
265 if (!tgt_dev) {
266 pr_err("nvm: could not create target device\n");
8d77bb82 267 ret = -ENOMEM;
ade69e24
MB
268 goto err_t;
269 }
270
7d1ef2f4 271 tdisk = alloc_disk(0);
8d77bb82
RP
272 if (!tdisk) {
273 ret = -ENOMEM;
7d1ef2f4 274 goto err_dev;
8d77bb82 275 }
7d1ef2f4 276
ade69e24 277 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
8d77bb82
RP
278 if (!tqueue) {
279 ret = -ENOMEM;
7d1ef2f4 280 goto err_disk;
8d77bb82 281 }
ade69e24
MB
282 blk_queue_make_request(tqueue, tt->make_rq);
283
6eb08245 284 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
ade69e24
MB
285 tdisk->flags = GENHD_FL_EXT_DEVT;
286 tdisk->major = 0;
287 tdisk->first_minor = 0;
288 tdisk->fops = &nvm_fops;
289 tdisk->queue = tqueue;
290
4af3f75d 291 targetdata = tt->init(tgt_dev, tdisk, create->flags);
8d77bb82
RP
292 if (IS_ERR(targetdata)) {
293 ret = PTR_ERR(targetdata);
ade69e24 294 goto err_init;
8d77bb82 295 }
ade69e24
MB
296
297 tdisk->private_data = targetdata;
298 tqueue->queuedata = targetdata;
299
300 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
301
302 set_capacity(tdisk, tt->capacity(targetdata));
303 add_disk(tdisk);
304
8d77bb82
RP
305 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
306 ret = -ENOMEM;
9a69b0ed 307 goto err_sysfs;
8d77bb82 308 }
9a69b0ed 309
ade69e24
MB
310 t->type = tt;
311 t->disk = tdisk;
312 t->dev = tgt_dev;
313
314 mutex_lock(&dev->mlock);
315 list_add_tail(&t->list, &dev->targets);
316 mutex_unlock(&dev->mlock);
317
318 return 0;
9a69b0ed
JG
319err_sysfs:
320 if (tt->exit)
321 tt->exit(targetdata);
ade69e24 322err_init:
ade69e24 323 blk_cleanup_queue(tqueue);
75ba4ada 324 tdisk->queue = NULL;
7d1ef2f4
JG
325err_disk:
326 put_disk(tdisk);
ade69e24 327err_dev:
edee1bdd 328 nvm_remove_tgt_dev(tgt_dev, 0);
ade69e24
MB
329err_t:
330 kfree(t);
331err_reserve:
332 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
8d77bb82 333 return ret;
ade69e24
MB
334}
335
336static void __nvm_remove_target(struct nvm_target *t)
337{
338 struct nvm_tgt_type *tt = t->type;
339 struct gendisk *tdisk = t->disk;
340 struct request_queue *q = tdisk->queue;
341
342 del_gendisk(tdisk);
343 blk_cleanup_queue(q);
344
9a69b0ed
JG
345 if (tt->sysfs_exit)
346 tt->sysfs_exit(tdisk);
347
ade69e24
MB
348 if (tt->exit)
349 tt->exit(tdisk->private_data);
350
edee1bdd 351 nvm_remove_tgt_dev(t->dev, 1);
ade69e24
MB
352 put_disk(tdisk);
353
354 list_del(&t->list);
355 kfree(t);
356}
357
358/**
359 * nvm_remove_tgt - Removes a target from the media manager
360 * @dev: device
361 * @remove: ioctl structure with target name to remove.
362 *
363 * Returns:
364 * 0: on success
365 * 1: on not found
366 * <0: on error
367 */
368static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
369{
370 struct nvm_target *t;
371
372 mutex_lock(&dev->mlock);
373 t = nvm_find_target(dev, remove->tgtname);
374 if (!t) {
375 mutex_unlock(&dev->mlock);
376 return 1;
377 }
378 __nvm_remove_target(t);
379 mutex_unlock(&dev->mlock);
380
381 return 0;
382}
383
384static int nvm_register_map(struct nvm_dev *dev)
385{
386 struct nvm_dev_map *rmap;
387 int i, j;
388
389 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
390 if (!rmap)
391 goto err_rmap;
392
393 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
394 GFP_KERNEL);
395 if (!rmap->chnls)
396 goto err_chnls;
397
398 for (i = 0; i < dev->geo.nr_chnls; i++) {
399 struct nvm_ch_map *ch_rmap;
400 int *lun_roffs;
401 int luns_in_chnl = dev->geo.luns_per_chnl;
402
403 ch_rmap = &rmap->chnls[i];
404
405 ch_rmap->ch_off = -1;
406 ch_rmap->nr_luns = luns_in_chnl;
407
408 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
409 if (!lun_roffs)
410 goto err_ch;
411
412 for (j = 0; j < luns_in_chnl; j++)
413 lun_roffs[j] = -1;
414
415 ch_rmap->lun_offs = lun_roffs;
416 }
417
418 dev->rmap = rmap;
419
420 return 0;
421err_ch:
422 while (--i >= 0)
423 kfree(rmap->chnls[i].lun_offs);
424err_chnls:
425 kfree(rmap);
426err_rmap:
427 return -ENOMEM;
428}
429
7a3de2b3
JG
430static void nvm_unregister_map(struct nvm_dev *dev)
431{
432 struct nvm_dev_map *rmap = dev->rmap;
433 int i;
434
435 for (i = 0; i < dev->geo.nr_chnls; i++)
436 kfree(rmap->chnls[i].lun_offs);
437
438 kfree(rmap->chnls);
439 kfree(rmap);
440}
441
61a561d8 442static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
443{
444 struct nvm_dev_map *dev_map = tgt_dev->map;
445 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
446 int lun_off = ch_map->lun_offs[p->g.lun];
ade69e24
MB
447
448 p->g.ch += ch_map->ch_off;
449 p->g.lun += lun_off;
ade69e24
MB
450}
451
61a561d8 452static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
453{
454 struct nvm_dev *dev = tgt_dev->parent;
455 struct nvm_dev_map *dev_rmap = dev->rmap;
456 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
457 int lun_roff = ch_rmap->lun_offs[p->g.lun];
458
459 p->g.ch -= ch_rmap->ch_off;
460 p->g.lun -= lun_roff;
ade69e24
MB
461}
462
dab8ee9e
MB
463static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
464 struct ppa_addr *ppa_list, int nr_ppas)
ade69e24
MB
465{
466 int i;
ade69e24 467
dab8ee9e
MB
468 for (i = 0; i < nr_ppas; i++) {
469 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
470 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
ade69e24 471 }
dab8ee9e 472}
ade69e24 473
dab8ee9e
MB
474static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
475 struct ppa_addr *ppa_list, int nr_ppas)
476{
477 int i;
478
479 for (i = 0; i < nr_ppas; i++) {
480 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
481 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
ade69e24 482 }
ade69e24
MB
483}
484
dab8ee9e 485static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
ade69e24 486{
dab8ee9e
MB
487 if (rqd->nr_ppas == 1) {
488 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
489 return;
490 }
ade69e24 491
dab8ee9e
MB
492 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
493}
494
495static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
496{
497 if (rqd->nr_ppas == 1) {
498 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
499 return;
500 }
ade69e24 501
dab8ee9e 502 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
ade69e24
MB
503}
504
505void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
506 int len)
507{
508 struct nvm_geo *geo = &dev->geo;
509 struct nvm_dev_map *dev_rmap = dev->rmap;
510 u64 i;
511
512 for (i = 0; i < len; i++) {
513 struct nvm_ch_map *ch_rmap;
514 int *lun_roffs;
515 struct ppa_addr gaddr;
516 u64 pba = le64_to_cpu(entries[i]);
ade69e24
MB
517 u64 diff;
518
519 if (!pba)
520 continue;
521
522 gaddr = linear_to_generic_addr(geo, pba);
523 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
524 lun_roffs = ch_rmap->lun_offs;
525
ade69e24
MB
526 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
527 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
528
529 entries[i] -= cpu_to_le64(diff);
530 }
531}
532EXPORT_SYMBOL(nvm_part_to_tgt);
533
b76eb20b 534struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
6f8645cb 535{
b76eb20b 536 struct nvm_tgt_type *tmp, *tt = NULL;
6f8645cb 537
b76eb20b 538 if (lock)
5cd90785 539 down_write(&nvm_tgtt_lock);
6f8645cb 540
b76eb20b
MB
541 list_for_each_entry(tmp, &nvm_tgt_types, list)
542 if (!strcmp(name, tmp->name)) {
543 tt = tmp;
544 break;
545 }
cd9e9808 546
b76eb20b 547 if (lock)
5cd90785 548 up_write(&nvm_tgtt_lock);
b76eb20b 549 return tt;
cd9e9808 550}
b76eb20b 551EXPORT_SYMBOL(nvm_find_target_type);
cd9e9808 552
6063fe39 553int nvm_register_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
554{
555 int ret = 0;
556
5cd90785 557 down_write(&nvm_tgtt_lock);
b76eb20b 558 if (nvm_find_target_type(tt->name, 0))
cd9e9808
MB
559 ret = -EEXIST;
560 else
6063fe39 561 list_add(&tt->list, &nvm_tgt_types);
5cd90785 562 up_write(&nvm_tgtt_lock);
cd9e9808
MB
563
564 return ret;
565}
6063fe39 566EXPORT_SYMBOL(nvm_register_tgt_type);
cd9e9808 567
6063fe39 568void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
569{
570 if (!tt)
571 return;
572
573 down_write(&nvm_lock);
574 list_del(&tt->list);
575 up_write(&nvm_lock);
576}
6063fe39 577EXPORT_SYMBOL(nvm_unregister_tgt_type);
cd9e9808
MB
578
579void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
580 dma_addr_t *dma_handler)
581{
75b85649 582 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
cd9e9808
MB
583 dma_handler);
584}
585EXPORT_SYMBOL(nvm_dev_dma_alloc);
586
da2d7cb8 587void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
cd9e9808 588{
75b85649 589 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
cd9e9808
MB
590}
591EXPORT_SYMBOL(nvm_dev_dma_free);
592
cd9e9808
MB
593static struct nvm_dev *nvm_find_nvm_dev(const char *name)
594{
595 struct nvm_dev *dev;
596
597 list_for_each_entry(dev, &nvm_devices, devices)
598 if (!strcmp(name, dev->name))
599 return dev;
600
601 return NULL;
602}
603
333ba053
JG
604int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
605 int nr_ppas, int type)
606{
607 struct nvm_dev *dev = tgt_dev->parent;
608 struct nvm_rq rqd;
609 int ret;
610
611 if (nr_ppas > dev->ops->max_phys_sect) {
612 pr_err("nvm: unable to update all blocks atomically\n");
613 return -EINVAL;
614 }
615
616 memset(&rqd, 0, sizeof(struct nvm_rq));
617
17912c49 618 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
dab8ee9e 619 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
333ba053
JG
620
621 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
17912c49 622 nvm_free_rqd_ppalist(tgt_dev, &rqd);
333ba053 623 if (ret) {
ade69e24 624 pr_err("nvm: failed bb mark\n");
333ba053
JG
625 return -EINVAL;
626 }
627
628 return 0;
629}
630EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
631
a279006a
JG
632int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
633{
634 struct nvm_dev *dev = tgt_dev->parent;
635
636 return dev->ops->max_phys_sect;
637}
638EXPORT_SYMBOL(nvm_max_phys_sects);
639
8e53624d 640int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
cd9e9808 641{
8e53624d
JG
642 struct nvm_dev *dev = tgt_dev->parent;
643
ade69e24
MB
644 if (!dev->ops->submit_io)
645 return -ENODEV;
646
dab8ee9e 647 nvm_rq_tgt_to_dev(tgt_dev, rqd);
ade69e24
MB
648
649 rqd->dev = tgt_dev;
650 return dev->ops->submit_io(dev, rqd);
cd9e9808
MB
651}
652EXPORT_SYMBOL(nvm_submit_io);
653
17912c49 654static void nvm_end_io_sync(struct nvm_rq *rqd)
cd9e9808 655{
17912c49 656 struct completion *waiting = rqd->private;
10995c3d 657
17912c49
JG
658 complete(waiting);
659}
10995c3d 660
17912c49
JG
661int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
662 int nr_ppas)
663{
664 struct nvm_geo *geo = &tgt_dev->geo;
665 struct nvm_rq rqd;
666 int ret;
667 DECLARE_COMPLETION_ONSTACK(wait);
10995c3d
MB
668
669 memset(&rqd, 0, sizeof(struct nvm_rq));
8e53624d 670
17912c49
JG
671 rqd.opcode = NVM_OP_ERASE;
672 rqd.end_io = nvm_end_io_sync;
673 rqd.private = &wait;
674 rqd.flags = geo->plane_mode >> 1;
675
676 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
10995c3d
MB
677 if (ret)
678 return ret;
679
17912c49
JG
680 ret = nvm_submit_io(tgt_dev, &rqd);
681 if (ret) {
682 pr_err("rrpr: erase I/O submission failed: %d\n", ret);
683 goto free_ppa_list;
684 }
685 wait_for_completion_io(&wait);
10995c3d 686
17912c49
JG
687free_ppa_list:
688 nvm_free_rqd_ppalist(tgt_dev, &rqd);
10995c3d
MB
689
690 return ret;
cd9e9808 691}
17912c49 692EXPORT_SYMBOL(nvm_erase_sync);
cd9e9808 693
da2d7cb8 694int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
959e911b
JG
695 nvm_l2p_update_fn *update_l2p, void *priv)
696{
da2d7cb8
JG
697 struct nvm_dev *dev = tgt_dev->parent;
698
959e911b
JG
699 if (!dev->ops->get_l2p_tbl)
700 return 0;
701
702 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
703}
704EXPORT_SYMBOL(nvm_get_l2p_tbl);
705
da2d7cb8 706int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
959e911b 707{
da2d7cb8 708 struct nvm_dev *dev = tgt_dev->parent;
ade69e24
MB
709 struct nvm_geo *geo = &dev->geo;
710 struct nvm_area *area, *prev, *next;
711 sector_t begin = 0;
712 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
713
714 if (len > max_sectors)
715 return -EINVAL;
716
717 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
718 if (!area)
719 return -ENOMEM;
720
721 prev = NULL;
722
723 spin_lock(&dev->lock);
724 list_for_each_entry(next, &dev->area_list, list) {
725 if (begin + len > next->begin) {
726 begin = next->end;
727 prev = next;
728 continue;
729 }
730 break;
731 }
732
733 if ((begin + len) > max_sectors) {
734 spin_unlock(&dev->lock);
735 kfree(area);
736 return -EINVAL;
737 }
da2d7cb8 738
ade69e24
MB
739 area->begin = *lba = begin;
740 area->end = begin + len;
741
742 if (prev) /* insert into sorted order */
743 list_add(&area->list, &prev->list);
744 else
745 list_add(&area->list, &dev->area_list);
746 spin_unlock(&dev->lock);
747
748 return 0;
959e911b
JG
749}
750EXPORT_SYMBOL(nvm_get_area);
751
ade69e24 752void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
959e911b 753{
da2d7cb8 754 struct nvm_dev *dev = tgt_dev->parent;
ade69e24 755 struct nvm_area *area;
da2d7cb8 756
ade69e24
MB
757 spin_lock(&dev->lock);
758 list_for_each_entry(area, &dev->area_list, list) {
759 if (area->begin != begin)
760 continue;
761
762 list_del(&area->list);
763 spin_unlock(&dev->lock);
764 kfree(area);
765 return;
766 }
767 spin_unlock(&dev->lock);
959e911b
JG
768}
769EXPORT_SYMBOL(nvm_put_area);
770
17912c49 771int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
8680f165 772 const struct ppa_addr *ppas, int nr_ppas, int vblk)
069368e9 773{
17912c49
JG
774 struct nvm_dev *dev = tgt_dev->parent;
775 struct nvm_geo *geo = &tgt_dev->geo;
abd805ec 776 int i, plane_cnt, pl_idx;
8680f165 777 struct ppa_addr ppa;
abd805ec 778
8e79b5cb 779 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
6d5be959 780 rqd->nr_ppas = nr_ppas;
abd805ec 781 rqd->ppa_addr = ppas[0];
069368e9 782
069368e9 783 return 0;
abd805ec 784 }
069368e9 785
6d5be959 786 rqd->nr_ppas = nr_ppas;
abd805ec
MB
787 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
788 if (!rqd->ppa_list) {
789 pr_err("nvm: failed to allocate dma memory\n");
790 return -ENOMEM;
791 }
792
5ebc7d9f
MB
793 if (!vblk) {
794 for (i = 0; i < nr_ppas; i++)
795 rqd->ppa_list[i] = ppas[i];
796 } else {
8e79b5cb 797 plane_cnt = geo->plane_mode;
6d5be959 798 rqd->nr_ppas *= plane_cnt;
5ebc7d9f 799
556755e9 800 for (i = 0; i < nr_ppas; i++) {
5ebc7d9f 801 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
8680f165
MB
802 ppa = ppas[i];
803 ppa.g.pl = pl_idx;
804 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
5ebc7d9f 805 }
069368e9
MB
806 }
807 }
808
abd805ec
MB
809 return 0;
810}
811EXPORT_SYMBOL(nvm_set_rqd_ppalist);
812
17912c49 813void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
abd805ec
MB
814{
815 if (!rqd->ppa_list)
816 return;
817
17912c49 818 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
abd805ec
MB
819}
820EXPORT_SYMBOL(nvm_free_rqd_ppalist);
821
06894efe 822void nvm_end_io(struct nvm_rq *rqd)
91276162 823{
ade69e24 824 struct nvm_tgt_dev *tgt_dev = rqd->dev;
ade69e24
MB
825
826 /* Convert address space */
827 if (tgt_dev)
dab8ee9e 828 nvm_rq_dev_to_tgt(tgt_dev, rqd);
ade69e24 829
06894efe
MB
830 if (rqd->end_io)
831 rqd->end_io(rqd);
91276162
MB
832}
833EXPORT_SYMBOL(nvm_end_io);
834
22e8c976
MB
835/*
836 * folds a bad block list from its plane representation to its virtual
837 * block representation. The fold is done in place and reduced size is
838 * returned.
839 *
840 * If any of the planes status are bad or grown bad block, the virtual block
841 * is marked bad. If not bad, the first plane state acts as the block state.
842 */
843int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
844{
8e79b5cb 845 struct nvm_geo *geo = &dev->geo;
22e8c976
MB
846 int blk, offset, pl, blktype;
847
8e79b5cb 848 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
22e8c976
MB
849 return -EINVAL;
850
8e79b5cb
JG
851 for (blk = 0; blk < geo->blks_per_lun; blk++) {
852 offset = blk * geo->plane_mode;
22e8c976
MB
853 blktype = blks[offset];
854
855 /* Bad blocks on any planes take precedence over other types */
8e79b5cb 856 for (pl = 0; pl < geo->plane_mode; pl++) {
22e8c976
MB
857 if (blks[offset + pl] &
858 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
859 blktype = blks[offset + pl];
860 break;
861 }
862 }
863
864 blks[blk] = blktype;
865 }
866
8e79b5cb 867 return geo->blks_per_lun;
22e8c976
MB
868}
869EXPORT_SYMBOL(nvm_bb_tbl_fold);
870
333ba053
JG
871int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
872 u8 *blks)
873{
8f4fe008
MB
874 struct nvm_dev *dev = tgt_dev->parent;
875
dab8ee9e 876 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
333ba053 877
8f4fe008 878 return dev->ops->get_bb_tbl(dev, ppa, blks);
333ba053
JG
879}
880EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
881
ca5927e7
MB
882static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
883{
8e79b5cb 884 struct nvm_geo *geo = &dev->geo;
ca5927e7
MB
885 int i;
886
8e79b5cb 887 dev->lps_per_blk = geo->pgs_per_blk;
ca5927e7
MB
888 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
889 if (!dev->lptbl)
890 return -ENOMEM;
891
892 /* Just a linear array */
893 for (i = 0; i < dev->lps_per_blk; i++)
894 dev->lptbl[i] = i;
895
896 return 0;
897}
898
899static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
900{
901 int i, p;
902 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
903
904 if (!mlc->num_pairs)
905 return 0;
906
907 dev->lps_per_blk = mlc->num_pairs;
908 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
909 if (!dev->lptbl)
910 return -ENOMEM;
911
912 /* The lower page table encoding consists of a list of bytes, where each
913 * has a lower and an upper half. The first half byte maintains the
914 * increment value and every value after is an offset added to the
12624af2
MB
915 * previous incrementation value
916 */
ca5927e7
MB
917 dev->lptbl[0] = mlc->pairs[0] & 0xF;
918 for (i = 1; i < dev->lps_per_blk; i++) {
919 p = mlc->pairs[i >> 1];
920 if (i & 0x1) /* upper */
921 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
922 else /* lower */
923 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
924 }
925
926 return 0;
927}
928
cd9e9808
MB
929static int nvm_core_init(struct nvm_dev *dev)
930{
931 struct nvm_id *id = &dev->identity;
19bd6fe7 932 struct nvm_id_group *grp = &id->grp;
8e79b5cb 933 struct nvm_geo *geo = &dev->geo;
7f7c5d03 934 int ret;
cd9e9808 935
8e79b5cb
JG
936 /* Whole device values */
937 geo->nr_chnls = grp->num_ch;
938 geo->luns_per_chnl = grp->num_lun;
939
940 /* Generic device values */
941 geo->pgs_per_blk = grp->num_pg;
942 geo->blks_per_lun = grp->num_blk;
943 geo->nr_planes = grp->num_pln;
944 geo->fpg_size = grp->fpg_sz;
945 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
946 geo->sec_size = grp->csecs;
947 geo->oob_size = grp->sos;
948 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
949 geo->mccap = grp->mccap;
950 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
951
952 geo->plane_mode = NVM_PLANE_SINGLE;
953 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
cd9e9808
MB
954
955 if (grp->mpos & 0x020202)
8e79b5cb 956 geo->plane_mode = NVM_PLANE_DOUBLE;
cd9e9808 957 if (grp->mpos & 0x040404)
8e79b5cb 958 geo->plane_mode = NVM_PLANE_QUAD;
cd9e9808 959
7f7c5d03
MB
960 if (grp->mtype != 0) {
961 pr_err("nvm: memory type not supported\n");
962 return -EINVAL;
963 }
964
cd9e9808 965 /* calculated values */
8e79b5cb
JG
966 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
967 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
968 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
969 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
cd9e9808 970
8e79b5cb
JG
971 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
972 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
da1e2849
WT
973 sizeof(unsigned long), GFP_KERNEL);
974 if (!dev->lun_map)
975 return -ENOMEM;
7f7c5d03
MB
976
977 switch (grp->fmtype) {
978 case NVM_ID_FMTYPE_SLC:
979 if (nvm_init_slc_tbl(dev, grp)) {
980 ret = -ENOMEM;
981 goto err_fmtype;
982 }
983 break;
984 case NVM_ID_FMTYPE_MLC:
985 if (nvm_init_mlc_tbl(dev, grp)) {
986 ret = -ENOMEM;
987 goto err_fmtype;
988 }
989 break;
990 default:
991 pr_err("nvm: flash type not supported\n");
992 ret = -EINVAL;
993 goto err_fmtype;
994 }
995
ade69e24
MB
996 INIT_LIST_HEAD(&dev->area_list);
997 INIT_LIST_HEAD(&dev->targets);
e3eb3799 998 mutex_init(&dev->mlock);
4c9dacb8 999 spin_lock_init(&dev->lock);
cd9e9808 1000
ade69e24
MB
1001 ret = nvm_register_map(dev);
1002 if (ret)
1003 goto err_fmtype;
ac81bfa9 1004
ade69e24 1005 blk_queue_logical_block_size(dev->q, geo->sec_size);
cd9e9808 1006 return 0;
7f7c5d03
MB
1007err_fmtype:
1008 kfree(dev->lun_map);
1009 return ret;
cd9e9808
MB
1010}
1011
46b160ce 1012static void nvm_free(struct nvm_dev *dev)
cd9e9808
MB
1013{
1014 if (!dev)
1015 return;
1016
40267efd
SL
1017 if (dev->dma_pool)
1018 dev->ops->destroy_dma_pool(dev->dma_pool);
1019
7a3de2b3 1020 nvm_unregister_map(dev);
ca5927e7 1021 kfree(dev->lptbl);
7f7c5d03 1022 kfree(dev->lun_map);
40267efd 1023 kfree(dev);
cd9e9808
MB
1024}
1025
1026static int nvm_init(struct nvm_dev *dev)
1027{
8e79b5cb 1028 struct nvm_geo *geo = &dev->geo;
480fc0db 1029 int ret = -EINVAL;
cd9e9808 1030
16f26c3a 1031 if (dev->ops->identity(dev, &dev->identity)) {
cd9e9808 1032 pr_err("nvm: device could not be identified\n");
cd9e9808
MB
1033 goto err;
1034 }
1035
19bd6fe7
MB
1036 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1037 dev->identity.ver_id, dev->identity.vmnt);
cd9e9808
MB
1038
1039 if (dev->identity.ver_id != 1) {
1040 pr_err("nvm: device not supported by kernel.");
1041 goto err;
1042 }
1043
cd9e9808
MB
1044 ret = nvm_core_init(dev);
1045 if (ret) {
1046 pr_err("nvm: could not initialize core structures.\n");
1047 goto err;
1048 }
1049
cd9e9808 1050 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
8e79b5cb
JG
1051 dev->name, geo->sec_per_pg, geo->nr_planes,
1052 geo->pgs_per_blk, geo->blks_per_lun,
1053 geo->nr_luns, geo->nr_chnls);
cd9e9808
MB
1054 return 0;
1055err:
cd9e9808
MB
1056 pr_err("nvm: failed to initialize nvm\n");
1057 return ret;
1058}
1059
b0b4e09c 1060struct nvm_dev *nvm_alloc_dev(int node)
cd9e9808 1061{
b0b4e09c 1062 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
cd9e9808 1063}
b0b4e09c 1064EXPORT_SYMBOL(nvm_alloc_dev);
cd9e9808 1065
b0b4e09c 1066int nvm_register(struct nvm_dev *dev)
cd9e9808 1067{
cd9e9808
MB
1068 int ret;
1069
ade69e24
MB
1070 if (!dev->q || !dev->ops)
1071 return -EINVAL;
cd9e9808 1072
d160147b
WT
1073 if (dev->ops->max_phys_sect > 256) {
1074 pr_info("nvm: max sectors supported is 256.\n");
ade69e24 1075 return -EINVAL;
d160147b
WT
1076 }
1077
cd9e9808 1078 if (dev->ops->max_phys_sect > 1) {
75b85649
JG
1079 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1080 if (!dev->dma_pool) {
1081 pr_err("nvm: could not create dma pool\n");
ade69e24 1082 return -ENOMEM;
cd9e9808 1083 }
cd9e9808
MB
1084 }
1085
ade69e24
MB
1086 ret = nvm_init(dev);
1087 if (ret)
1088 goto err_init;
b7692076 1089
762796bc 1090 /* register device with a supported media manager */
edad2e66
MB
1091 down_write(&nvm_lock);
1092 list_add(&dev->devices, &nvm_devices);
1093 up_write(&nvm_lock);
1094
cd9e9808
MB
1095 return 0;
1096err_init:
ade69e24 1097 dev->ops->destroy_dma_pool(dev->dma_pool);
cd9e9808
MB
1098 return ret;
1099}
1100EXPORT_SYMBOL(nvm_register);
1101
b0b4e09c 1102void nvm_unregister(struct nvm_dev *dev)
cd9e9808 1103{
ade69e24
MB
1104 struct nvm_target *t, *tmp;
1105
1106 mutex_lock(&dev->mlock);
1107 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1108 if (t->dev->parent != dev)
1109 continue;
1110 __nvm_remove_target(t);
1111 }
1112 mutex_unlock(&dev->mlock);
1113
d0a712ce 1114 down_write(&nvm_lock);
cd9e9808
MB
1115 list_del(&dev->devices);
1116 up_write(&nvm_lock);
c1480ad5 1117
3dc87dd0 1118 nvm_free(dev);
cd9e9808
MB
1119}
1120EXPORT_SYMBOL(nvm_unregister);
1121
cd9e9808
MB
1122static int __nvm_configure_create(struct nvm_ioctl_create *create)
1123{
1124 struct nvm_dev *dev;
1125 struct nvm_ioctl_create_simple *s;
1126
d0a712ce 1127 down_write(&nvm_lock);
cd9e9808 1128 dev = nvm_find_nvm_dev(create->dev);
d0a712ce 1129 up_write(&nvm_lock);
b76eb20b 1130
cd9e9808
MB
1131 if (!dev) {
1132 pr_err("nvm: device not found\n");
1133 return -EINVAL;
1134 }
1135
1136 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1137 pr_err("nvm: config type not valid\n");
1138 return -EINVAL;
1139 }
1140 s = &create->conf.s;
1141
6732c740
MB
1142 if (s->lun_begin == -1 && s->lun_end == -1) {
1143 s->lun_begin = 0;
1144 s->lun_end = dev->geo.nr_luns - 1;
1145 }
1146
0e5ffd1c 1147 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
cd9e9808 1148 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
0e5ffd1c 1149 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
cd9e9808
MB
1150 return -EINVAL;
1151 }
1152
ade69e24 1153 return nvm_create_tgt(dev, create);
cd9e9808
MB
1154}
1155
cd9e9808
MB
1156static long nvm_ioctl_info(struct file *file, void __user *arg)
1157{
1158 struct nvm_ioctl_info *info;
1159 struct nvm_tgt_type *tt;
1160 int tgt_iter = 0;
1161
1162 if (!capable(CAP_SYS_ADMIN))
1163 return -EPERM;
1164
1165 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1166 if (IS_ERR(info))
1167 return -EFAULT;
1168
1169 info->version[0] = NVM_VERSION_MAJOR;
1170 info->version[1] = NVM_VERSION_MINOR;
1171 info->version[2] = NVM_VERSION_PATCH;
1172
1173 down_write(&nvm_lock);
6063fe39 1174 list_for_each_entry(tt, &nvm_tgt_types, list) {
cd9e9808
MB
1175 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1176
1177 tgt->version[0] = tt->version[0];
1178 tgt->version[1] = tt->version[1];
1179 tgt->version[2] = tt->version[2];
1180 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1181
1182 tgt_iter++;
1183 }
1184
1185 info->tgtsize = tgt_iter;
1186 up_write(&nvm_lock);
1187
76e25081
SM
1188 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1189 kfree(info);
cd9e9808 1190 return -EFAULT;
76e25081 1191 }
cd9e9808
MB
1192
1193 kfree(info);
1194 return 0;
1195}
1196
1197static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1198{
1199 struct nvm_ioctl_get_devices *devices;
1200 struct nvm_dev *dev;
1201 int i = 0;
1202
1203 if (!capable(CAP_SYS_ADMIN))
1204 return -EPERM;
1205
1206 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1207 if (!devices)
1208 return -ENOMEM;
1209
1210 down_write(&nvm_lock);
1211 list_for_each_entry(dev, &nvm_devices, devices) {
1212 struct nvm_ioctl_device_info *info = &devices->info[i];
1213
6eb08245 1214 strlcpy(info->devname, dev->name, sizeof(info->devname));
cd9e9808 1215
ade69e24
MB
1216 /* kept for compatibility */
1217 info->bmversion[0] = 1;
1218 info->bmversion[1] = 0;
1219 info->bmversion[2] = 0;
6eb08245 1220 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
cd9e9808 1221 i++;
ade69e24 1222
cd9e9808
MB
1223 if (i > 31) {
1224 pr_err("nvm: max 31 devices can be reported.\n");
1225 break;
1226 }
1227 }
1228 up_write(&nvm_lock);
1229
1230 devices->nr_devices = i;
1231
76e25081
SM
1232 if (copy_to_user(arg, devices,
1233 sizeof(struct nvm_ioctl_get_devices))) {
1234 kfree(devices);
cd9e9808 1235 return -EFAULT;
76e25081 1236 }
cd9e9808
MB
1237
1238 kfree(devices);
1239 return 0;
1240}
1241
1242static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1243{
1244 struct nvm_ioctl_create create;
1245
1246 if (!capable(CAP_SYS_ADMIN))
1247 return -EPERM;
1248
1249 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1250 return -EFAULT;
1251
1252 create.dev[DISK_NAME_LEN - 1] = '\0';
1253 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1254 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1255
1256 if (create.flags != 0) {
4af3f75d
JG
1257 __u32 flags = create.flags;
1258
1259 /* Check for valid flags */
1260 if (flags & NVM_TARGET_FACTORY)
1261 flags &= ~NVM_TARGET_FACTORY;
1262
1263 if (flags) {
1264 pr_err("nvm: flag not supported\n");
1265 return -EINVAL;
1266 }
cd9e9808
MB
1267 }
1268
1269 return __nvm_configure_create(&create);
1270}
1271
1272static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1273{
1274 struct nvm_ioctl_remove remove;
b76eb20b
MB
1275 struct nvm_dev *dev;
1276 int ret = 0;
cd9e9808
MB
1277
1278 if (!capable(CAP_SYS_ADMIN))
1279 return -EPERM;
1280
1281 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1282 return -EFAULT;
1283
1284 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1285
1286 if (remove.flags != 0) {
1287 pr_err("nvm: no flags supported\n");
1288 return -EINVAL;
1289 }
1290
b76eb20b 1291 list_for_each_entry(dev, &nvm_devices, devices) {
ade69e24 1292 ret = nvm_remove_tgt(dev, &remove);
b76eb20b
MB
1293 if (!ret)
1294 break;
1295 }
1296
1297 return ret;
cd9e9808
MB
1298}
1299
ade69e24 1300/* kept for compatibility reasons */
55696154
MB
1301static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1302{
1303 struct nvm_ioctl_dev_init init;
1304
1305 if (!capable(CAP_SYS_ADMIN))
1306 return -EPERM;
1307
1308 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1309 return -EFAULT;
1310
1311 if (init.flags != 0) {
1312 pr_err("nvm: no flags supported\n");
1313 return -EINVAL;
1314 }
1315
ade69e24 1316 return 0;
55696154
MB
1317}
1318
ade69e24 1319/* Kept for compatibility reasons */
8b4970c4
MB
1320static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1321{
1322 struct nvm_ioctl_dev_factory fact;
8b4970c4
MB
1323
1324 if (!capable(CAP_SYS_ADMIN))
1325 return -EPERM;
1326
1327 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1328 return -EFAULT;
1329
1330 fact.dev[DISK_NAME_LEN - 1] = '\0';
1331
1332 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1333 return -EINVAL;
1334
bf643185 1335 return 0;
8b4970c4
MB
1336}
1337
cd9e9808
MB
1338static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1339{
1340 void __user *argp = (void __user *)arg;
1341
1342 switch (cmd) {
1343 case NVM_INFO:
1344 return nvm_ioctl_info(file, argp);
1345 case NVM_GET_DEVICES:
1346 return nvm_ioctl_get_devices(file, argp);
1347 case NVM_DEV_CREATE:
1348 return nvm_ioctl_dev_create(file, argp);
1349 case NVM_DEV_REMOVE:
1350 return nvm_ioctl_dev_remove(file, argp);
55696154
MB
1351 case NVM_DEV_INIT:
1352 return nvm_ioctl_dev_init(file, argp);
8b4970c4
MB
1353 case NVM_DEV_FACTORY:
1354 return nvm_ioctl_dev_factory(file, argp);
cd9e9808
MB
1355 }
1356 return 0;
1357}
1358
1359static const struct file_operations _ctl_fops = {
1360 .open = nonseekable_open,
1361 .unlocked_ioctl = nvm_ctl_ioctl,
1362 .owner = THIS_MODULE,
1363 .llseek = noop_llseek,
1364};
1365
1366static struct miscdevice _nvm_misc = {
1367 .minor = MISC_DYNAMIC_MINOR,
1368 .name = "lightnvm",
1369 .nodename = "lightnvm/control",
1370 .fops = &_ctl_fops,
1371};
389b2a1c 1372builtin_misc_device(_nvm_misc);