]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/lightnvm/gennvm.c
lightnvm: introduce helpers for generic ops in rrpc
[mirror_ubuntu-zesty-kernel.git] / drivers / lightnvm / gennvm.c
1 /*
2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 * Implementation of a general nvm manager for Open-Channel SSDs.
19 */
20
21 #include "gennvm.h"
22
23 static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
24 {
25 struct nvm_target *tgt;
26
27 list_for_each_entry(tgt, &gn->targets, list)
28 if (!strcmp(name, tgt->disk->disk_name))
29 return tgt;
30
31 return NULL;
32 }
33
34 static const struct block_device_operations gen_fops = {
35 .owner = THIS_MODULE,
36 };
37
38 static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
39 int lun_begin, int lun_end)
40 {
41 int i;
42
43 for (i = lun_begin; i <= lun_end; i++) {
44 if (test_and_set_bit(i, dev->lun_map)) {
45 pr_err("nvm: lun %d already allocated\n", i);
46 goto err;
47 }
48 }
49
50 return 0;
51
52 err:
53 while (--i > lun_begin)
54 clear_bit(i, dev->lun_map);
55
56 return -EBUSY;
57 }
58
59 static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
60 int lun_end)
61 {
62 int i;
63
64 for (i = lun_begin; i <= lun_end; i++)
65 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
66 }
67
68 static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
69 {
70 struct nvm_dev *dev = tgt_dev->parent;
71 struct gen_dev_map *dev_map = tgt_dev->map;
72 int i, j;
73
74 for (i = 0; i < dev_map->nr_chnls; i++) {
75 struct gen_ch_map *ch_map = &dev_map->chnls[i];
76 int *lun_offs = ch_map->lun_offs;
77 int ch = i + ch_map->ch_off;
78
79 for (j = 0; j < ch_map->nr_luns; j++) {
80 int lun = j + lun_offs[j];
81 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
82
83 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
84 }
85
86 kfree(ch_map->lun_offs);
87 }
88
89 kfree(dev_map->chnls);
90 kfree(dev_map);
91 kfree(tgt_dev->luns);
92 kfree(tgt_dev);
93 }
94
95 static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
96 int lun_begin, int lun_end)
97 {
98 struct nvm_tgt_dev *tgt_dev = NULL;
99 struct gen_dev_map *dev_rmap = dev->rmap;
100 struct gen_dev_map *dev_map;
101 struct ppa_addr *luns;
102 int nr_luns = lun_end - lun_begin + 1;
103 int luns_left = nr_luns;
104 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
105 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
106 int bch = lun_begin / dev->geo.luns_per_chnl;
107 int blun = lun_begin % dev->geo.luns_per_chnl;
108 int lunid = 0;
109 int lun_balanced = 1;
110 int prev_nr_luns;
111 int i, j;
112
113 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
114 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
115
116 dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
117 if (!dev_map)
118 goto err_dev;
119
120 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
121 GFP_KERNEL);
122 if (!dev_map->chnls)
123 goto err_chnls;
124
125 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
126 if (!luns)
127 goto err_luns;
128
129 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
130 dev->geo.luns_per_chnl : luns_left;
131 for (i = 0; i < nr_chnls; i++) {
132 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
133 int *lun_roffs = ch_rmap->lun_offs;
134 struct gen_ch_map *ch_map = &dev_map->chnls[i];
135 int *lun_offs;
136 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
137 dev->geo.luns_per_chnl : luns_left;
138
139 if (lun_balanced && prev_nr_luns != luns_in_chnl)
140 lun_balanced = 0;
141
142 ch_map->ch_off = ch_rmap->ch_off = bch;
143 ch_map->nr_luns = luns_in_chnl;
144
145 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
146 if (!lun_offs)
147 goto err_ch;
148
149 for (j = 0; j < luns_in_chnl; j++) {
150 luns[lunid].ppa = 0;
151 luns[lunid].g.ch = i;
152 luns[lunid++].g.lun = j;
153
154 lun_offs[j] = blun;
155 lun_roffs[j + blun] = blun;
156 }
157
158 ch_map->lun_offs = lun_offs;
159
160 /* when starting a new channel, lun offset is reset */
161 blun = 0;
162 luns_left -= luns_in_chnl;
163 }
164
165 dev_map->nr_chnls = nr_chnls;
166
167 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
168 if (!tgt_dev)
169 goto err_ch;
170
171 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
172 /* Target device only owns a portion of the physical device */
173 tgt_dev->geo.nr_chnls = nr_chnls;
174 tgt_dev->geo.nr_luns = nr_luns;
175 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
176 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
177 tgt_dev->q = dev->q;
178 tgt_dev->map = dev_map;
179 tgt_dev->luns = luns;
180 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
181
182 tgt_dev->parent = dev;
183
184 return tgt_dev;
185 err_ch:
186 while (--i > 0)
187 kfree(dev_map->chnls[i].lun_offs);
188 kfree(luns);
189 err_luns:
190 kfree(dev_map->chnls);
191 err_chnls:
192 kfree(dev_map);
193 err_dev:
194 return tgt_dev;
195 }
196
197 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
198 {
199 struct gen_dev *gn = dev->mp;
200 struct nvm_ioctl_create_simple *s = &create->conf.s;
201 struct request_queue *tqueue;
202 struct gendisk *tdisk;
203 struct nvm_tgt_type *tt;
204 struct nvm_target *t;
205 struct nvm_tgt_dev *tgt_dev;
206 void *targetdata;
207
208 tt = nvm_find_target_type(create->tgttype, 1);
209 if (!tt) {
210 pr_err("nvm: target type %s not found\n", create->tgttype);
211 return -EINVAL;
212 }
213
214 mutex_lock(&gn->lock);
215 t = gen_find_target(gn, create->tgtname);
216 if (t) {
217 pr_err("nvm: target name already exists.\n");
218 mutex_unlock(&gn->lock);
219 return -EINVAL;
220 }
221 mutex_unlock(&gn->lock);
222
223 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
224 if (!t)
225 return -ENOMEM;
226
227 if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
228 goto err_t;
229
230 tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
231 if (!tgt_dev) {
232 pr_err("nvm: could not create target device\n");
233 goto err_reserve;
234 }
235
236 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
237 if (!tqueue)
238 goto err_dev;
239 blk_queue_make_request(tqueue, tt->make_rq);
240
241 tdisk = alloc_disk(0);
242 if (!tdisk)
243 goto err_queue;
244
245 sprintf(tdisk->disk_name, "%s", create->tgtname);
246 tdisk->flags = GENHD_FL_EXT_DEVT;
247 tdisk->major = 0;
248 tdisk->first_minor = 0;
249 tdisk->fops = &gen_fops;
250 tdisk->queue = tqueue;
251
252 targetdata = tt->init(tgt_dev, tdisk);
253 if (IS_ERR(targetdata))
254 goto err_init;
255
256 tdisk->private_data = targetdata;
257 tqueue->queuedata = targetdata;
258
259 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
260
261 set_capacity(tdisk, tt->capacity(targetdata));
262 add_disk(tdisk);
263
264 t->type = tt;
265 t->disk = tdisk;
266 t->dev = tgt_dev;
267
268 mutex_lock(&gn->lock);
269 list_add_tail(&t->list, &gn->targets);
270 mutex_unlock(&gn->lock);
271
272 return 0;
273 err_init:
274 put_disk(tdisk);
275 err_queue:
276 blk_cleanup_queue(tqueue);
277 err_dev:
278 kfree(tgt_dev);
279 err_reserve:
280 gen_release_luns_err(dev, s->lun_begin, s->lun_end);
281 err_t:
282 kfree(t);
283 return -ENOMEM;
284 }
285
286 static void __gen_remove_target(struct nvm_target *t)
287 {
288 struct nvm_tgt_type *tt = t->type;
289 struct gendisk *tdisk = t->disk;
290 struct request_queue *q = tdisk->queue;
291
292 del_gendisk(tdisk);
293 blk_cleanup_queue(q);
294
295 if (tt->exit)
296 tt->exit(tdisk->private_data);
297
298 gen_remove_tgt_dev(t->dev);
299 put_disk(tdisk);
300
301 list_del(&t->list);
302 kfree(t);
303 }
304
305 /**
306 * gen_remove_tgt - Removes a target from the media manager
307 * @dev: device
308 * @remove: ioctl structure with target name to remove.
309 *
310 * Returns:
311 * 0: on success
312 * 1: on not found
313 * <0: on error
314 */
315 static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
316 {
317 struct gen_dev *gn = dev->mp;
318 struct nvm_target *t;
319
320 if (!gn)
321 return 1;
322
323 mutex_lock(&gn->lock);
324 t = gen_find_target(gn, remove->tgtname);
325 if (!t) {
326 mutex_unlock(&gn->lock);
327 return 1;
328 }
329 __gen_remove_target(t);
330 mutex_unlock(&gn->lock);
331
332 return 0;
333 }
334
335 static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
336 {
337 struct nvm_geo *geo = &dev->geo;
338 struct gen_dev *gn = dev->mp;
339 struct gen_area *area, *prev, *next;
340 sector_t begin = 0;
341 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
342
343 if (len > max_sectors)
344 return -EINVAL;
345
346 area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
347 if (!area)
348 return -ENOMEM;
349
350 prev = NULL;
351
352 spin_lock(&dev->lock);
353 list_for_each_entry(next, &gn->area_list, list) {
354 if (begin + len > next->begin) {
355 begin = next->end;
356 prev = next;
357 continue;
358 }
359 break;
360 }
361
362 if ((begin + len) > max_sectors) {
363 spin_unlock(&dev->lock);
364 kfree(area);
365 return -EINVAL;
366 }
367
368 area->begin = *lba = begin;
369 area->end = begin + len;
370
371 if (prev) /* insert into sorted order */
372 list_add(&area->list, &prev->list);
373 else
374 list_add(&area->list, &gn->area_list);
375 spin_unlock(&dev->lock);
376
377 return 0;
378 }
379
380 static void gen_put_area(struct nvm_dev *dev, sector_t begin)
381 {
382 struct gen_dev *gn = dev->mp;
383 struct gen_area *area;
384
385 spin_lock(&dev->lock);
386 list_for_each_entry(area, &gn->area_list, list) {
387 if (area->begin != begin)
388 continue;
389
390 list_del(&area->list);
391 spin_unlock(&dev->lock);
392 kfree(area);
393 return;
394 }
395 spin_unlock(&dev->lock);
396 }
397
398 static void gen_free(struct nvm_dev *dev)
399 {
400 kfree(dev->mp);
401 kfree(dev->rmap);
402 dev->mp = NULL;
403 }
404
405 static int gen_register(struct nvm_dev *dev)
406 {
407 struct gen_dev *gn;
408 struct gen_dev_map *dev_rmap;
409 int i, j;
410
411 if (!try_module_get(THIS_MODULE))
412 return -ENODEV;
413
414 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
415 if (!gn)
416 goto err_gn;
417
418 dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
419 if (!dev_rmap)
420 goto err_rmap;
421
422 dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
423 GFP_KERNEL);
424 if (!dev_rmap->chnls)
425 goto err_chnls;
426
427 for (i = 0; i < dev->geo.nr_chnls; i++) {
428 struct gen_ch_map *ch_rmap;
429 int *lun_roffs;
430 int luns_in_chnl = dev->geo.luns_per_chnl;
431
432 ch_rmap = &dev_rmap->chnls[i];
433
434 ch_rmap->ch_off = -1;
435 ch_rmap->nr_luns = luns_in_chnl;
436
437 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
438 if (!lun_roffs)
439 goto err_ch;
440
441 for (j = 0; j < luns_in_chnl; j++)
442 lun_roffs[j] = -1;
443
444 ch_rmap->lun_offs = lun_roffs;
445 }
446
447 gn->dev = dev;
448 gn->nr_luns = dev->geo.nr_luns;
449 INIT_LIST_HEAD(&gn->area_list);
450 mutex_init(&gn->lock);
451 INIT_LIST_HEAD(&gn->targets);
452 dev->mp = gn;
453 dev->rmap = dev_rmap;
454
455 return 1;
456 err_ch:
457 while (--i >= 0)
458 kfree(dev_rmap->chnls[i].lun_offs);
459 err_chnls:
460 kfree(dev_rmap);
461 err_rmap:
462 gen_free(dev);
463 err_gn:
464 module_put(THIS_MODULE);
465 return -ENOMEM;
466 }
467
468 static void gen_unregister(struct nvm_dev *dev)
469 {
470 struct gen_dev *gn = dev->mp;
471 struct nvm_target *t, *tmp;
472
473 mutex_lock(&gn->lock);
474 list_for_each_entry_safe(t, tmp, &gn->targets, list) {
475 if (t->dev->parent != dev)
476 continue;
477 __gen_remove_target(t);
478 }
479 mutex_unlock(&gn->lock);
480
481 gen_free(dev);
482 module_put(THIS_MODULE);
483 }
484
485 enum {
486 TRANS_TGT_TO_DEV = 0x0,
487 TRANS_DEV_TO_TGT = 0x1,
488 };
489
490
491 static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
492 {
493 struct gen_dev_map *dev_map = tgt_dev->map;
494 struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
495 int lun_off = ch_map->lun_offs[p->g.lun];
496 struct nvm_dev *dev = tgt_dev->parent;
497 struct gen_dev_map *dev_rmap = dev->rmap;
498 struct gen_ch_map *ch_rmap;
499 int lun_roff;
500
501 p->g.ch += ch_map->ch_off;
502 p->g.lun += lun_off;
503
504 ch_rmap = &dev_rmap->chnls[p->g.ch];
505 lun_roff = ch_rmap->lun_offs[p->g.lun];
506
507 if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
508 pr_err("nvm: corrupted device partition table\n");
509 return -EINVAL;
510 }
511
512 return 0;
513 }
514
515 static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
516 {
517 struct nvm_dev *dev = tgt_dev->parent;
518 struct gen_dev_map *dev_rmap = dev->rmap;
519 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
520 int lun_roff = ch_rmap->lun_offs[p->g.lun];
521
522 p->g.ch -= ch_rmap->ch_off;
523 p->g.lun -= lun_roff;
524
525 return 0;
526 }
527
528 static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
529 int flag)
530 {
531 gen_trans_fn *f;
532 int i;
533 int ret = 0;
534
535 f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
536
537 if (rqd->nr_ppas == 1)
538 return f(tgt_dev, &rqd->ppa_addr);
539
540 for (i = 0; i < rqd->nr_ppas; i++) {
541 ret = f(tgt_dev, &rqd->ppa_list[i]);
542 if (ret)
543 goto out;
544 }
545
546 out:
547 return ret;
548 }
549
550 static void gen_end_io(struct nvm_rq *rqd)
551 {
552 struct nvm_tgt_dev *tgt_dev = rqd->dev;
553 struct nvm_tgt_instance *ins = rqd->ins;
554
555 /* Convert address space */
556 if (tgt_dev)
557 gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
558
559 ins->tt->end_io(rqd);
560 }
561
562 static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
563 {
564 struct nvm_dev *dev = tgt_dev->parent;
565
566 if (!dev->ops->submit_io)
567 return -ENODEV;
568
569 /* Convert address space */
570 gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
571 nvm_generic_to_addr_mode(dev, rqd);
572
573 rqd->dev = tgt_dev;
574 rqd->end_io = gen_end_io;
575 return dev->ops->submit_io(dev, rqd);
576 }
577
578 static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
579 int flags)
580 {
581 /* Convert address space */
582 gen_map_to_dev(tgt_dev, p);
583
584 return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
585 }
586
587 static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
588 int len)
589 {
590 struct nvm_geo *geo = &dev->geo;
591 struct gen_dev_map *dev_rmap = dev->rmap;
592 u64 i;
593
594 for (i = 0; i < len; i++) {
595 struct gen_ch_map *ch_rmap;
596 int *lun_roffs;
597 struct ppa_addr gaddr;
598 u64 pba = le64_to_cpu(entries[i]);
599 int off;
600 u64 diff;
601
602 if (!pba)
603 continue;
604
605 gaddr = linear_to_generic_addr(geo, pba);
606 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
607 lun_roffs = ch_rmap->lun_offs;
608
609 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
610
611 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
612 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
613
614 entries[i] -= cpu_to_le64(diff);
615 }
616 }
617
618 static struct nvmm_type gen = {
619 .name = "gennvm",
620 .version = {0, 1, 0},
621
622 .register_mgr = gen_register,
623 .unregister_mgr = gen_unregister,
624
625 .create_tgt = gen_create_tgt,
626 .remove_tgt = gen_remove_tgt,
627
628 .submit_io = gen_submit_io,
629 .erase_blk = gen_erase_blk,
630
631 .get_area = gen_get_area,
632 .put_area = gen_put_area,
633
634 .part_to_tgt = gen_part_to_tgt,
635 };
636
637 static int __init gen_module_init(void)
638 {
639 return nvm_register_mgr(&gen);
640 }
641
642 static void gen_module_exit(void)
643 {
644 nvm_unregister_mgr(&gen);
645 }
646
647 module_init(gen_module_init);
648 module_exit(gen_module_exit);
649 MODULE_LICENSE("GPL v2");
650 MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");