]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/lightnvm/gennvm.c
lightnvm: move block fold outside of get_bb_tbl()
[mirror_ubuntu-artful-kernel.git] / drivers / lightnvm / gennvm.c
CommitLineData
48add0f5
MB
1/*
2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
19 */
20
21#include "gennvm.h"
22
4c9dacb8
WT
23static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
24{
25 struct gen_nvm *gn = dev->mp;
26 struct gennvm_area *area, *prev, *next;
27 sector_t begin = 0;
28 sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
29
30 if (len > max_sectors)
31 return -EINVAL;
32
33 area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
34 if (!area)
35 return -ENOMEM;
36
37 prev = NULL;
38
39 spin_lock(&dev->lock);
40 list_for_each_entry(next, &gn->area_list, list) {
41 if (begin + len > next->begin) {
42 begin = next->end;
43 prev = next;
44 continue;
45 }
46 break;
47 }
48
49 if ((begin + len) > max_sectors) {
50 spin_unlock(&dev->lock);
51 kfree(area);
52 return -EINVAL;
53 }
54
55 area->begin = *lba = begin;
56 area->end = begin + len;
57
58 if (prev) /* insert into sorted order */
59 list_add(&area->list, &prev->list);
60 else
61 list_add(&area->list, &gn->area_list);
62 spin_unlock(&dev->lock);
63
64 return 0;
65}
66
67static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
68{
69 struct gen_nvm *gn = dev->mp;
70 struct gennvm_area *area;
71
72 spin_lock(&dev->lock);
73 list_for_each_entry(area, &gn->area_list, list) {
74 if (area->begin != begin)
75 continue;
76
77 list_del(&area->list);
78 spin_unlock(&dev->lock);
79 kfree(area);
80 return;
81 }
82 spin_unlock(&dev->lock);
83}
84
48add0f5
MB
85static void gennvm_blocks_free(struct nvm_dev *dev)
86{
87 struct gen_nvm *gn = dev->mp;
88 struct gen_lun *lun;
89 int i;
90
91 gennvm_for_each_lun(gn, lun, i) {
92 if (!lun->vlun.blocks)
93 break;
94 vfree(lun->vlun.blocks);
95 }
96}
97
98static void gennvm_luns_free(struct nvm_dev *dev)
99{
100 struct gen_nvm *gn = dev->mp;
101
102 kfree(gn->luns);
103}
104
105static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
106{
107 struct gen_lun *lun;
108 int i;
109
110 gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
111 if (!gn->luns)
112 return -ENOMEM;
113
114 gennvm_for_each_lun(gn, lun, i) {
115 spin_lock_init(&lun->vlun.lock);
116 INIT_LIST_HEAD(&lun->free_list);
117 INIT_LIST_HEAD(&lun->used_list);
118 INIT_LIST_HEAD(&lun->bb_list);
119
120 lun->reserved_blocks = 2; /* for GC only */
121 lun->vlun.id = i;
122 lun->vlun.lun_id = i % dev->luns_per_chnl;
123 lun->vlun.chnl_id = i / dev->luns_per_chnl;
124 lun->vlun.nr_free_blocks = dev->blks_per_lun;
ff0e498b
JG
125 lun->vlun.nr_open_blocks = 0;
126 lun->vlun.nr_closed_blocks = 0;
0b59733b 127 lun->vlun.nr_bad_blocks = 0;
48add0f5
MB
128 }
129 return 0;
130}
131
22e8c976
MB
132static int gennvm_block_bb(struct nvm_dev *dev, struct ppa_addr ppa,
133 u8 *blks, int nr_blks, void *private)
48add0f5
MB
134{
135 struct gen_nvm *gn = private;
11450469 136 struct gen_lun *lun;
48add0f5
MB
137 struct nvm_block *blk;
138 int i;
139
22e8c976
MB
140 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
141 if (nr_blks < 0)
142 return nr_blks;
143
c3293a9a 144 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
11450469 145
22e8c976 146 for (i = 0; i < nr_blks; i++) {
11450469
MB
147 if (blks[i] == 0)
148 continue;
48add0f5 149
48add0f5
MB
150 blk = &lun->vlun.blocks[i];
151 if (!blk) {
152 pr_err("gennvm: BB data is out of bounds.\n");
153 return -EINVAL;
154 }
155
156 list_move_tail(&blk->list, &lun->bb_list);
0b59733b 157 lun->vlun.nr_bad_blocks++;
bdded155 158 lun->vlun.nr_free_blocks--;
48add0f5
MB
159 }
160
161 return 0;
162}
163
164static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
165{
166 struct nvm_dev *dev = private;
167 struct gen_nvm *gn = dev->mp;
48add0f5
MB
168 u64 elba = slba + nlb;
169 struct gen_lun *lun;
170 struct nvm_block *blk;
171 u64 i;
172 int lun_id;
173
4ece44af 174 if (unlikely(elba > dev->total_secs)) {
48add0f5
MB
175 pr_err("gennvm: L2P data from device is out of bounds!\n");
176 return -EINVAL;
177 }
178
179 for (i = 0; i < nlb; i++) {
180 u64 pba = le64_to_cpu(entries[i]);
181
4ece44af 182 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
48add0f5
MB
183 pr_err("gennvm: L2P data entry is out of bounds!\n");
184 return -EINVAL;
185 }
186
187 /* Address zero is a special one. The first page on a disk is
188 * protected. It often holds internal device boot
189 * information.
190 */
191 if (!pba)
192 continue;
193
194 /* resolve block from physical address */
195 lun_id = div_u64(pba, dev->sec_per_lun);
196 lun = &gn->luns[lun_id];
197
198 /* Calculate block offset into lun */
199 pba = pba - (dev->sec_per_lun * lun_id);
200 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
201
ff0e498b 202 if (!blk->state) {
48add0f5
MB
203 /* at this point, we don't know anything about the
204 * block. It's up to the FTL on top to re-etablish the
ff0e498b 205 * block state. The block is assumed to be open.
48add0f5
MB
206 */
207 list_move_tail(&blk->list, &lun->used_list);
ff0e498b 208 blk->state = NVM_BLK_ST_OPEN;
48add0f5 209 lun->vlun.nr_free_blocks--;
ff0e498b 210 lun->vlun.nr_open_blocks++;
48add0f5
MB
211 }
212 }
213
214 return 0;
215}
216
217static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
218{
219 struct gen_lun *lun;
220 struct nvm_block *block;
221 sector_t lun_iter, blk_iter, cur_block_id = 0;
222 int ret;
223
224 gennvm_for_each_lun(gn, lun, lun_iter) {
225 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
226 dev->blks_per_lun);
227 if (!lun->vlun.blocks)
228 return -ENOMEM;
229
230 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
231 block = &lun->vlun.blocks[blk_iter];
232
233 INIT_LIST_HEAD(&block->list);
234
235 block->lun = &lun->vlun;
236 block->id = cur_block_id++;
237
238 /* First block is reserved for device */
0b59733b
JG
239 if (unlikely(lun_iter == 0 && blk_iter == 0)) {
240 lun->vlun.nr_free_blocks--;
48add0f5 241 continue;
0b59733b 242 }
48add0f5
MB
243
244 list_add_tail(&block->list, &lun->free_list);
245 }
246
247 if (dev->ops->get_bb_tbl) {
11450469
MB
248 struct ppa_addr ppa;
249
250 ppa.ppa = 0;
251 ppa.g.ch = lun->vlun.chnl_id;
252 ppa.g.lun = lun->vlun.id;
7386af27 253 ppa = generic_to_dev_addr(dev, ppa);
11450469 254
08236c6b 255 ret = dev->ops->get_bb_tbl(dev, ppa,
22e8c976 256 gennvm_block_bb, gn);
48add0f5
MB
257 if (ret)
258 pr_err("gennvm: could not read BB table\n");
259 }
260 }
261
29fd20b8 262 if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
4ece44af 263 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
48add0f5
MB
264 gennvm_block_map, dev);
265 if (ret) {
266 pr_err("gennvm: could not read L2P table.\n");
267 pr_warn("gennvm: default block initialization");
268 }
269 }
270
271 return 0;
272}
273
8261bd48
WT
274static void gennvm_free(struct nvm_dev *dev)
275{
276 gennvm_blocks_free(dev);
277 gennvm_luns_free(dev);
278 kfree(dev->mp);
279 dev->mp = NULL;
280}
281
48add0f5
MB
282static int gennvm_register(struct nvm_dev *dev)
283{
284 struct gen_nvm *gn;
285 int ret;
286
008b7443
MB
287 if (!try_module_get(THIS_MODULE))
288 return -ENODEV;
289
48add0f5
MB
290 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
291 if (!gn)
292 return -ENOMEM;
293
11450469 294 gn->dev = dev;
48add0f5 295 gn->nr_luns = dev->nr_luns;
4c9dacb8 296 INIT_LIST_HEAD(&gn->area_list);
48add0f5
MB
297 dev->mp = gn;
298
299 ret = gennvm_luns_init(dev, gn);
300 if (ret) {
301 pr_err("gennvm: could not initialize luns\n");
302 goto err;
303 }
304
305 ret = gennvm_blocks_init(dev, gn);
306 if (ret) {
307 pr_err("gennvm: could not initialize blocks\n");
308 goto err;
309 }
310
311 return 1;
312err:
8261bd48 313 gennvm_free(dev);
008b7443 314 module_put(THIS_MODULE);
48add0f5
MB
315 return ret;
316}
317
318static void gennvm_unregister(struct nvm_dev *dev)
319{
8261bd48 320 gennvm_free(dev);
008b7443 321 module_put(THIS_MODULE);
48add0f5
MB
322}
323
ff0e498b 324static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
48add0f5
MB
325 struct nvm_lun *vlun, unsigned long flags)
326{
327 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
328 struct nvm_block *blk = NULL;
329 int is_gc = flags & NVM_IOTYPE_GC;
330
ff0e498b 331 assert_spin_locked(&vlun->lock);
48add0f5
MB
332
333 if (list_empty(&lun->free_list)) {
334 pr_err_ratelimited("gennvm: lun %u have no free pages available",
335 lun->vlun.id);
48add0f5
MB
336 goto out;
337 }
338
e9b76a80 339 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
48add0f5 340 goto out;
48add0f5
MB
341
342 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
343 list_move_tail(&blk->list, &lun->used_list);
ff0e498b 344 blk->state = NVM_BLK_ST_OPEN;
48add0f5
MB
345
346 lun->vlun.nr_free_blocks--;
ff0e498b 347 lun->vlun.nr_open_blocks++;
48add0f5 348
48add0f5 349out:
ff0e498b
JG
350 return blk;
351}
352
353static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
354 struct nvm_lun *vlun, unsigned long flags)
355{
356 struct nvm_block *blk;
357
358 spin_lock(&vlun->lock);
359 blk = gennvm_get_blk_unlocked(dev, vlun, flags);
e9b76a80 360 spin_unlock(&vlun->lock);
48add0f5
MB
361 return blk;
362}
363
ff0e498b 364static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
48add0f5
MB
365{
366 struct nvm_lun *vlun = blk->lun;
367 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
368
ff0e498b 369 assert_spin_locked(&vlun->lock);
48add0f5 370
ff0e498b 371 if (blk->state & NVM_BLK_ST_OPEN) {
48add0f5 372 list_move_tail(&blk->list, &lun->free_list);
ff0e498b 373 lun->vlun.nr_open_blocks--;
48add0f5 374 lun->vlun.nr_free_blocks++;
ff0e498b
JG
375 blk->state = NVM_BLK_ST_FREE;
376 } else if (blk->state & NVM_BLK_ST_CLOSED) {
377 list_move_tail(&blk->list, &lun->free_list);
378 lun->vlun.nr_closed_blocks--;
379 lun->vlun.nr_free_blocks++;
380 blk->state = NVM_BLK_ST_FREE;
381 } else if (blk->state & NVM_BLK_ST_BAD) {
48add0f5 382 list_move_tail(&blk->list, &lun->bb_list);
0b59733b 383 lun->vlun.nr_bad_blocks++;
ff0e498b
JG
384 blk->state = NVM_BLK_ST_BAD;
385 } else {
48add0f5
MB
386 WARN_ON_ONCE(1);
387 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
ff0e498b 388 blk->id, blk->state);
48add0f5 389 list_move_tail(&blk->list, &lun->bb_list);
0b59733b 390 lun->vlun.nr_bad_blocks++;
ff0e498b 391 blk->state = NVM_BLK_ST_BAD;
48add0f5 392 }
ff0e498b 393}
48add0f5 394
ff0e498b
JG
395static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
396{
397 struct nvm_lun *vlun = blk->lun;
398
399 spin_lock(&vlun->lock);
400 gennvm_put_blk_unlocked(dev, blk);
48add0f5
MB
401 spin_unlock(&vlun->lock);
402}
403
48add0f5
MB
404static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
405 int type)
406{
407 struct gen_nvm *gn = dev->mp;
408 struct gen_lun *lun;
409 struct nvm_block *blk;
410
411 if (unlikely(ppa->g.ch > dev->nr_chnls ||
412 ppa->g.lun > dev->luns_per_chnl ||
413 ppa->g.blk > dev->blks_per_lun)) {
414 WARN_ON_ONCE(1);
415 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
416 ppa->g.ch, dev->nr_chnls,
417 ppa->g.lun, dev->luns_per_chnl,
418 ppa->g.blk, dev->blks_per_lun);
419 return;
420 }
421
422 lun = &gn->luns[ppa->g.lun * ppa->g.ch];
423 blk = &lun->vlun.blocks[ppa->g.blk];
424
425 /* will be moved to bb list on put_blk from target */
ff0e498b 426 blk->state = type;
48add0f5
MB
427}
428
429/* mark block bad. It is expected the target recover from the error. */
430static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
431{
432 int i;
433
11450469 434 if (!dev->ops->set_bb_tbl)
48add0f5
MB
435 return;
436
16f26c3a 437 if (dev->ops->set_bb_tbl(dev, rqd, 1))
48add0f5
MB
438 return;
439
069368e9 440 nvm_addr_to_generic_mode(dev, rqd);
48add0f5
MB
441
442 /* look up blocks and mark them as bad */
443 if (rqd->nr_pages > 1)
444 for (i = 0; i < rqd->nr_pages; i++)
ff0e498b
JG
445 gennvm_blk_set_type(dev, &rqd->ppa_list[i],
446 NVM_BLK_ST_BAD);
48add0f5 447 else
ff0e498b 448 gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
48add0f5
MB
449}
450
72d256ec 451static void gennvm_end_io(struct nvm_rq *rqd)
48add0f5
MB
452{
453 struct nvm_tgt_instance *ins = rqd->ins;
48add0f5 454
72d256ec 455 switch (rqd->error) {
48add0f5 456 case NVM_RSP_SUCCESS:
48add0f5
MB
457 case NVM_RSP_ERR_EMPTYPAGE:
458 break;
459 case NVM_RSP_ERR_FAILWRITE:
460 gennvm_mark_blk_bad(rqd->dev, rqd);
48add0f5
MB
461 }
462
72d256ec 463 ins->tt->end_io(rqd);
91276162 464}
48add0f5 465
91276162
MB
466static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
467{
468 if (!dev->ops->submit_io)
469 return -ENODEV;
470
471 /* Convert address space */
472 nvm_generic_to_addr_mode(dev, rqd);
473
474 rqd->dev = dev;
475 rqd->end_io = gennvm_end_io;
476 return dev->ops->submit_io(dev, rqd);
48add0f5
MB
477}
478
479static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
480 unsigned long flags)
481{
069368e9 482 struct ppa_addr addr = block_to_ppa(dev, blk);
48add0f5 483
81e681d3 484 return nvm_erase_ppa(dev, &addr, 1);
48add0f5
MB
485}
486
da1e2849
WT
487static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
488{
489 return test_and_set_bit(lunid, dev->lun_map);
490}
491
492static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
493{
494 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
495}
496
48add0f5
MB
497static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
498{
499 struct gen_nvm *gn = dev->mp;
500
da1e2849
WT
501 if (unlikely(lunid >= dev->nr_luns))
502 return NULL;
503
48add0f5
MB
504 return &gn->luns[lunid].vlun;
505}
506
2fde0e48 507static void gennvm_lun_info_print(struct nvm_dev *dev)
48add0f5
MB
508{
509 struct gen_nvm *gn = dev->mp;
510 struct gen_lun *lun;
511 unsigned int i;
512
2fde0e48
JG
513
514 gennvm_for_each_lun(gn, lun, i) {
515 spin_lock(&lun->vlun.lock);
516
ff0e498b 517 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
2fde0e48
JG
518 dev->name, i,
519 lun->vlun.nr_free_blocks,
ff0e498b
JG
520 lun->vlun.nr_open_blocks,
521 lun->vlun.nr_closed_blocks,
2fde0e48
JG
522 lun->vlun.nr_bad_blocks);
523
524 spin_unlock(&lun->vlun.lock);
525 }
48add0f5
MB
526}
527
528static struct nvmm_type gennvm = {
ff0e498b
JG
529 .name = "gennvm",
530 .version = {0, 1, 0},
531
532 .register_mgr = gennvm_register,
533 .unregister_mgr = gennvm_unregister,
48add0f5 534
ff0e498b
JG
535 .get_blk_unlocked = gennvm_get_blk_unlocked,
536 .put_blk_unlocked = gennvm_put_blk_unlocked,
48add0f5 537
ff0e498b
JG
538 .get_blk = gennvm_get_blk,
539 .put_blk = gennvm_put_blk,
48add0f5 540
ff0e498b
JG
541 .submit_io = gennvm_submit_io,
542 .erase_blk = gennvm_erase_blk,
48add0f5 543
ff0e498b 544 .get_lun = gennvm_get_lun,
da1e2849
WT
545 .reserve_lun = gennvm_reserve_lun,
546 .release_lun = gennvm_release_lun,
ff0e498b 547 .lun_info_print = gennvm_lun_info_print,
4c9dacb8
WT
548
549 .get_area = gennvm_get_area,
550 .put_area = gennvm_put_area,
551
48add0f5
MB
552};
553
554static int __init gennvm_module_init(void)
555{
556 return nvm_register_mgr(&gennvm);
557}
558
559static void gennvm_module_exit(void)
560{
561 nvm_unregister_mgr(&gennvm);
562}
563
564module_init(gennvm_module_init);
565module_exit(gennvm_module_exit);
566MODULE_LICENSE("GPL v2");
567MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");