]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk-init.c
lightnvm: pblk: expose max sec per write on sysfs
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-init.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a physical block-device target for Open-channel SSDs.
17 *
18 * pblk-init.c - pblk's initialization.
19 */
20
21#include "pblk.h"
22
23static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
24 *pblk_w_rq_cache, *pblk_line_meta_cache;
25static DECLARE_RWSEM(pblk_lock);
b25d5237 26struct bio_set *pblk_bio_set;
a4bd217b
JG
27
28static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29 struct bio *bio)
30{
31 int ret;
32
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
35 */
36 if (bio_data_dir(bio) == READ) {
af67c31f 37 blk_queue_split(q, &bio);
a4bd217b
JG
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40 bio_put(bio);
41
42 return ret;
43 }
44
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
48 */
49 if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
af67c31f 50 blk_queue_split(q, &bio);
a4bd217b
JG
51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53}
54
55static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56{
57 struct pblk *pblk = q->queuedata;
58
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 bio_endio(bio);
63 return BLK_QC_T_NONE;
64 }
65 }
66
67 switch (pblk_rw_io(q, pblk, bio)) {
68 case NVM_IO_ERR:
69 bio_io_error(bio);
70 break;
71 case NVM_IO_DONE:
72 bio_endio(bio);
73 break;
74 }
75
76 return BLK_QC_T_NONE;
77}
78
79static void pblk_l2p_free(struct pblk *pblk)
80{
81 vfree(pblk->trans_map);
82}
83
84static int pblk_l2p_init(struct pblk *pblk)
85{
86 sector_t i;
87 struct ppa_addr ppa;
88 int entry_size = 8;
89
90 if (pblk->ppaf_bitsize < 32)
91 entry_size = 4;
92
93 pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94 if (!pblk->trans_map)
95 return -ENOMEM;
96
97 pblk_ppa_set_empty(&ppa);
98
99 for (i = 0; i < pblk->rl.nr_secs; i++)
100 pblk_trans_map_set(pblk, i, ppa);
101
102 return 0;
103}
104
105static void pblk_rwb_free(struct pblk *pblk)
106{
107 if (pblk_rb_tear_down_check(&pblk->rwb))
108 pr_err("pblk: write buffer error on tear down\n");
109
110 pblk_rb_data_free(&pblk->rwb);
111 vfree(pblk_rb_entries_ref(&pblk->rwb));
112}
113
114static int pblk_rwb_init(struct pblk *pblk)
115{
116 struct nvm_tgt_dev *dev = pblk->dev;
117 struct nvm_geo *geo = &dev->geo;
118 struct pblk_rb_entry *entries;
119 unsigned long nr_entries;
120 unsigned int power_size, power_seg_sz;
121
122 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
123
124 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
125 if (!entries)
126 return -ENOMEM;
127
128 power_size = get_count_order(nr_entries);
129 power_seg_sz = get_count_order(geo->sec_size);
130
131 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
132}
133
134/* Minimum pages needed within a lun */
135#define PAGE_POOL_SIZE 16
136#define ADDR_POOL_SIZE 64
137
138static int pblk_set_ppaf(struct pblk *pblk)
139{
140 struct nvm_tgt_dev *dev = pblk->dev;
141 struct nvm_geo *geo = &dev->geo;
142 struct nvm_addr_format ppaf = geo->ppaf;
143 int power_len;
144
145 /* Re-calculate channel and lun format to adapt to configuration */
146 power_len = get_count_order(geo->nr_chnls);
147 if (1 << power_len != geo->nr_chnls) {
148 pr_err("pblk: supports only power-of-two channel config.\n");
149 return -EINVAL;
150 }
151 ppaf.ch_len = power_len;
152
153 power_len = get_count_order(geo->luns_per_chnl);
154 if (1 << power_len != geo->luns_per_chnl) {
155 pr_err("pblk: supports only power-of-two LUN config.\n");
156 return -EINVAL;
157 }
158 ppaf.lun_len = power_len;
159
160 pblk->ppaf.sec_offset = 0;
161 pblk->ppaf.pln_offset = ppaf.sect_len;
162 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
163 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
164 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
165 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
166 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
167 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
168 pblk->ppaf.pln_offset;
169 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
170 pblk->ppaf.ch_offset;
171 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
172 pblk->ppaf.lun_offset;
173 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
174 pblk->ppaf.pg_offset;
175 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
176 pblk->ppaf.blk_offset;
177
178 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
179
180 return 0;
181}
182
183static int pblk_init_global_caches(struct pblk *pblk)
184{
185 char cache_name[PBLK_CACHE_NAME_LEN];
186
187 down_write(&pblk_lock);
188 pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
189 sizeof(struct pblk_line_ws), 0, 0, NULL);
190 if (!pblk_blk_ws_cache) {
191 up_write(&pblk_lock);
192 return -ENOMEM;
193 }
194
195 pblk_rec_cache = kmem_cache_create("pblk_rec",
196 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
197 if (!pblk_rec_cache) {
198 kmem_cache_destroy(pblk_blk_ws_cache);
199 up_write(&pblk_lock);
200 return -ENOMEM;
201 }
202
203 pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
204 0, 0, NULL);
205 if (!pblk_r_rq_cache) {
206 kmem_cache_destroy(pblk_blk_ws_cache);
207 kmem_cache_destroy(pblk_rec_cache);
208 up_write(&pblk_lock);
209 return -ENOMEM;
210 }
211
212 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
213 0, 0, NULL);
214 if (!pblk_w_rq_cache) {
215 kmem_cache_destroy(pblk_blk_ws_cache);
216 kmem_cache_destroy(pblk_rec_cache);
217 kmem_cache_destroy(pblk_r_rq_cache);
218 up_write(&pblk_lock);
219 return -ENOMEM;
220 }
221
222 snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
223 pblk->disk->disk_name);
224 pblk_line_meta_cache = kmem_cache_create(cache_name,
225 pblk->lm.sec_bitmap_len, 0, 0, NULL);
226 if (!pblk_line_meta_cache) {
227 kmem_cache_destroy(pblk_blk_ws_cache);
228 kmem_cache_destroy(pblk_rec_cache);
229 kmem_cache_destroy(pblk_r_rq_cache);
230 kmem_cache_destroy(pblk_w_rq_cache);
231 up_write(&pblk_lock);
232 return -ENOMEM;
233 }
234 up_write(&pblk_lock);
235
236 return 0;
237}
238
239static int pblk_core_init(struct pblk *pblk)
240{
241 struct nvm_tgt_dev *dev = pblk->dev;
242 struct nvm_geo *geo = &dev->geo;
243 int max_write_ppas;
244 int mod;
245
246 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
247 max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
248 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
249 max_write_ppas : nvm_max_phys_sects(dev);
250 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
251 geo->nr_planes * geo->nr_luns;
252
c2e9f5d4
JG
253 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
254
a4bd217b
JG
255 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
256 pr_err("pblk: cannot support device max_phys_sect\n");
257 return -EINVAL;
258 }
259
260 div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
261 if (mod) {
262 pr_err("pblk: bad configuration of sectors/pages\n");
263 return -EINVAL;
264 }
265
266 if (pblk_init_global_caches(pblk))
267 return -ENOMEM;
268
269 pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
270 if (!pblk->page_pool)
271 return -ENOMEM;
272
273 pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
274 pblk_blk_ws_cache);
275 if (!pblk->line_ws_pool)
276 goto free_page_pool;
277
278 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
279 if (!pblk->rec_pool)
280 goto free_blk_ws_pool;
281
282 pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
283 if (!pblk->r_rq_pool)
284 goto free_rec_pool;
285
286 pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
287 if (!pblk->w_rq_pool)
288 goto free_r_rq_pool;
289
290 pblk->line_meta_pool =
291 mempool_create_slab_pool(16, pblk_line_meta_cache);
292 if (!pblk->line_meta_pool)
293 goto free_w_rq_pool;
294
295 pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
296 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
297 if (!pblk->kw_wq)
298 goto free_line_meta_pool;
299
300 if (pblk_set_ppaf(pblk))
301 goto free_kw_wq;
302
303 if (pblk_rwb_init(pblk))
304 goto free_kw_wq;
305
306 INIT_LIST_HEAD(&pblk->compl_list);
307 return 0;
308
309free_kw_wq:
310 destroy_workqueue(pblk->kw_wq);
311free_line_meta_pool:
312 mempool_destroy(pblk->line_meta_pool);
313free_w_rq_pool:
314 mempool_destroy(pblk->w_rq_pool);
315free_r_rq_pool:
316 mempool_destroy(pblk->r_rq_pool);
317free_rec_pool:
318 mempool_destroy(pblk->rec_pool);
319free_blk_ws_pool:
320 mempool_destroy(pblk->line_ws_pool);
321free_page_pool:
322 mempool_destroy(pblk->page_pool);
323 return -ENOMEM;
324}
325
326static void pblk_core_free(struct pblk *pblk)
327{
328 if (pblk->kw_wq)
329 destroy_workqueue(pblk->kw_wq);
330
331 mempool_destroy(pblk->page_pool);
332 mempool_destroy(pblk->line_ws_pool);
333 mempool_destroy(pblk->rec_pool);
334 mempool_destroy(pblk->r_rq_pool);
335 mempool_destroy(pblk->w_rq_pool);
336 mempool_destroy(pblk->line_meta_pool);
337
338 kmem_cache_destroy(pblk_blk_ws_cache);
339 kmem_cache_destroy(pblk_rec_cache);
340 kmem_cache_destroy(pblk_r_rq_cache);
341 kmem_cache_destroy(pblk_w_rq_cache);
342 kmem_cache_destroy(pblk_line_meta_cache);
343}
344
345static void pblk_luns_free(struct pblk *pblk)
346{
347 kfree(pblk->luns);
348}
349
350static void pblk_lines_free(struct pblk *pblk)
351{
352 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
353 struct pblk_line *line;
354 int i;
355
356 spin_lock(&l_mg->free_lock);
357 for (i = 0; i < l_mg->nr_lines; i++) {
358 line = &pblk->lines[i];
359
360 pblk_line_free(pblk, line);
361 kfree(line->blk_bitmap);
362 kfree(line->erase_bitmap);
363 }
364 spin_unlock(&l_mg->free_lock);
365}
366
367static void pblk_line_meta_free(struct pblk *pblk)
368{
369 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
370 int i;
371
372 kfree(l_mg->bb_template);
373 kfree(l_mg->bb_aux);
374
375 for (i = 0; i < PBLK_DATA_LINES; i++) {
376 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
377 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
378 }
379
380 kfree(pblk->lines);
381}
382
383static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
384{
385 struct nvm_geo *geo = &dev->geo;
386 struct ppa_addr ppa;
387 u8 *blks;
388 int nr_blks, ret;
389
390 nr_blks = geo->blks_per_lun * geo->plane_mode;
391 blks = kmalloc(nr_blks, GFP_KERNEL);
392 if (!blks)
393 return -ENOMEM;
394
395 ppa.ppa = 0;
396 ppa.g.ch = rlun->bppa.g.ch;
397 ppa.g.lun = rlun->bppa.g.lun;
398
399 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
400 if (ret)
401 goto out;
402
403 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
404 if (nr_blks < 0) {
a4bd217b 405 ret = nr_blks;
5136a4fd 406 goto out;
a4bd217b
JG
407 }
408
409 rlun->bb_list = blks;
410
5136a4fd 411 return 0;
a4bd217b 412out:
5136a4fd 413 kfree(blks);
a4bd217b
JG
414 return ret;
415}
416
417static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
418{
419 struct pblk_line_meta *lm = &pblk->lm;
420 struct pblk_lun *rlun;
421 int bb_cnt = 0;
422 int i;
423
424 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
425 if (!line->blk_bitmap)
426 return -ENOMEM;
427
428 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
429 if (!line->erase_bitmap) {
430 kfree(line->blk_bitmap);
431 return -ENOMEM;
432 }
433
434 for (i = 0; i < lm->blk_per_line; i++) {
435 rlun = &pblk->luns[i];
436 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
437 continue;
438
439 set_bit(i, line->blk_bitmap);
440 bb_cnt++;
441 }
442
443 return bb_cnt;
444}
445
446static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
447{
448 struct nvm_tgt_dev *dev = pblk->dev;
449 struct nvm_geo *geo = &dev->geo;
450 struct pblk_lun *rlun;
451 int i, ret;
452
453 /* TODO: Implement unbalanced LUN support */
454 if (geo->luns_per_chnl < 0) {
455 pr_err("pblk: unbalanced LUN config.\n");
456 return -EINVAL;
457 }
458
459 pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
460 if (!pblk->luns)
461 return -ENOMEM;
462
463 for (i = 0; i < geo->nr_luns; i++) {
464 /* Stripe across channels */
465 int ch = i % geo->nr_chnls;
466 int lun_raw = i / geo->nr_chnls;
467 int lunid = lun_raw + ch * geo->luns_per_chnl;
468
469 rlun = &pblk->luns[i];
470 rlun->bppa = luns[lunid];
471
472 sema_init(&rlun->wr_sem, 1);
473
474 ret = pblk_bb_discovery(dev, rlun);
475 if (ret) {
476 while (--i >= 0)
477 kfree(pblk->luns[i].bb_list);
478 return ret;
479 }
480 }
481
482 return 0;
483}
484
485static int pblk_lines_configure(struct pblk *pblk, int flags)
486{
487 struct pblk_line *line = NULL;
488 int ret = 0;
489
490 if (!(flags & NVM_TARGET_FACTORY)) {
491 line = pblk_recov_l2p(pblk);
492 if (IS_ERR(line)) {
493 pr_err("pblk: could not recover l2p table\n");
494 ret = -EFAULT;
495 }
496 }
497
498 if (!line) {
499 /* Configure next line for user data */
500 line = pblk_line_get_first_data(pblk);
501 if (!line) {
502 pr_err("pblk: line list corrupted\n");
503 ret = -EFAULT;
504 }
505 }
506
507 return ret;
508}
509
510/* See comment over struct line_emeta definition */
511static unsigned int calc_emeta_len(struct pblk *pblk, struct pblk_line_meta *lm)
512{
513 return (sizeof(struct line_emeta) +
514 ((lm->sec_per_line - lm->emeta_sec) * sizeof(u64)) +
515 (pblk->l_mg.nr_lines * sizeof(u32)) +
516 lm->blk_bitmap_len);
517}
518
519static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
520{
521 struct nvm_tgt_dev *dev = pblk->dev;
522 struct nvm_geo *geo = &dev->geo;
523 sector_t provisioned;
524
525 pblk->over_pct = 20;
526
527 provisioned = nr_free_blks;
528 provisioned *= (100 - pblk->over_pct);
529 sector_div(provisioned, 100);
530
531 /* Internally pblk manages all free blocks, but all calculations based
532 * on user capacity consider only provisioned blocks
533 */
534 pblk->rl.total_blocks = nr_free_blks;
535 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
536 pblk->capacity = provisioned * geo->sec_per_blk;
537 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
538}
539
540static int pblk_lines_init(struct pblk *pblk)
541{
542 struct nvm_tgt_dev *dev = pblk->dev;
543 struct nvm_geo *geo = &dev->geo;
544 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
545 struct pblk_line_meta *lm = &pblk->lm;
546 struct pblk_line *line;
547 unsigned int smeta_len, emeta_len;
548 long nr_bad_blks, nr_meta_blks, nr_free_blks;
549 int bb_distance;
550 int i;
1c6286f2 551 int ret;
a4bd217b
JG
552
553 lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
554 lm->blk_per_line = geo->nr_luns;
555 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
556 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
557 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
558 lm->high_thrs = lm->sec_per_line / 2;
559 lm->mid_thrs = lm->sec_per_line / 4;
560
561 /* Calculate necessary pages for smeta. See comment over struct
562 * line_smeta definition
563 */
564 lm->smeta_len = sizeof(struct line_smeta) +
565 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
566
567 i = 1;
568add_smeta_page:
569 lm->smeta_sec = i * geo->sec_per_pl;
570 lm->smeta_len = lm->smeta_sec * geo->sec_size;
571
572 smeta_len = sizeof(struct line_smeta) +
573 PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
574 if (smeta_len > lm->smeta_len) {
575 i++;
576 goto add_smeta_page;
577 }
578
579 /* Calculate necessary pages for emeta. See comment over struct
580 * line_emeta definition
581 */
582 i = 1;
583add_emeta_page:
584 lm->emeta_sec = i * geo->sec_per_pl;
585 lm->emeta_len = lm->emeta_sec * geo->sec_size;
586
587 emeta_len = calc_emeta_len(pblk, lm);
588 if (emeta_len > lm->emeta_len) {
589 i++;
590 goto add_emeta_page;
591 }
592 lm->emeta_bb = geo->nr_luns - i;
593
594 nr_meta_blks = (lm->smeta_sec + lm->emeta_sec +
595 (geo->sec_per_blk / 2)) / geo->sec_per_blk;
596 lm->min_blk_line = nr_meta_blks + 1;
597
598 l_mg->nr_lines = geo->blks_per_lun;
599 l_mg->log_line = l_mg->data_line = NULL;
600 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
601 l_mg->nr_free_lines = 0;
602 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
603
604 /* smeta is always small enough to fit on a kmalloc memory allocation,
605 * emeta depends on the number of LUNs allocated to the pblk instance
606 */
607 l_mg->smeta_alloc_type = PBLK_KMALLOC_META;
608 for (i = 0; i < PBLK_DATA_LINES; i++) {
609 l_mg->sline_meta[i].meta = kmalloc(lm->smeta_len, GFP_KERNEL);
610 if (!l_mg->sline_meta[i].meta)
611 while (--i >= 0) {
612 kfree(l_mg->sline_meta[i].meta);
613 ret = -ENOMEM;
614 goto fail;
615 }
616 }
617
618 if (lm->emeta_len > KMALLOC_MAX_CACHE_SIZE) {
619 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
620
621 for (i = 0; i < PBLK_DATA_LINES; i++) {
622 l_mg->eline_meta[i].meta = vmalloc(lm->emeta_len);
623 if (!l_mg->eline_meta[i].meta)
624 while (--i >= 0) {
625 vfree(l_mg->eline_meta[i].meta);
626 ret = -ENOMEM;
627 goto fail;
628 }
629 }
630 } else {
631 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
632
633 for (i = 0; i < PBLK_DATA_LINES; i++) {
634 l_mg->eline_meta[i].meta =
635 kmalloc(lm->emeta_len, GFP_KERNEL);
636 if (!l_mg->eline_meta[i].meta)
637 while (--i >= 0) {
638 kfree(l_mg->eline_meta[i].meta);
639 ret = -ENOMEM;
640 goto fail;
641 }
642 }
643 }
644
645 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
646 if (!l_mg->bb_template) {
647 ret = -ENOMEM;
a4bd217b 648 goto fail_free_meta;
1c6286f2 649 }
a4bd217b
JG
650
651 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
652 if (!l_mg->bb_aux) {
653 ret = -ENOMEM;
a4bd217b 654 goto fail_free_bb_template;
1c6286f2 655 }
a4bd217b
JG
656
657 bb_distance = (geo->nr_luns) * geo->sec_per_pl;
658 for (i = 0; i < lm->sec_per_line; i += bb_distance)
659 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
660
661 INIT_LIST_HEAD(&l_mg->free_list);
662 INIT_LIST_HEAD(&l_mg->corrupt_list);
663 INIT_LIST_HEAD(&l_mg->bad_list);
664 INIT_LIST_HEAD(&l_mg->gc_full_list);
665 INIT_LIST_HEAD(&l_mg->gc_high_list);
666 INIT_LIST_HEAD(&l_mg->gc_mid_list);
667 INIT_LIST_HEAD(&l_mg->gc_low_list);
668 INIT_LIST_HEAD(&l_mg->gc_empty_list);
669
670 l_mg->gc_lists[0] = &l_mg->gc_high_list;
671 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
672 l_mg->gc_lists[2] = &l_mg->gc_low_list;
673
674 spin_lock_init(&l_mg->free_lock);
675 spin_lock_init(&l_mg->gc_lock);
676
677 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
678 GFP_KERNEL);
1c6286f2
DC
679 if (!pblk->lines) {
680 ret = -ENOMEM;
a4bd217b 681 goto fail_free_bb_aux;
1c6286f2 682 }
a4bd217b
JG
683
684 nr_free_blks = 0;
685 for (i = 0; i < l_mg->nr_lines; i++) {
a44f53fa
JG
686 int blk_in_line;
687
a4bd217b
JG
688 line = &pblk->lines[i];
689
690 line->pblk = pblk;
691 line->id = i;
692 line->type = PBLK_LINETYPE_FREE;
693 line->state = PBLK_LINESTATE_FREE;
694 line->gc_group = PBLK_LINEGC_NONE;
695 spin_lock_init(&line->lock);
696
697 nr_bad_blks = pblk_bb_line(pblk, line);
1c6286f2
DC
698 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
699 ret = -EINVAL;
a4bd217b 700 goto fail_free_lines;
1c6286f2 701 }
a4bd217b 702
a44f53fa
JG
703 blk_in_line = lm->blk_per_line - nr_bad_blks;
704 if (blk_in_line < lm->min_blk_line) {
a4bd217b
JG
705 line->state = PBLK_LINESTATE_BAD;
706 list_add_tail(&line->list, &l_mg->bad_list);
707 continue;
708 }
709
a44f53fa
JG
710 nr_free_blks += blk_in_line;
711 atomic_set(&line->blk_in_line, blk_in_line);
a4bd217b
JG
712
713 l_mg->nr_free_lines++;
714 list_add_tail(&line->list, &l_mg->free_list);
715 }
716
717 pblk_set_provision(pblk, nr_free_blks);
718
719 sema_init(&pblk->erase_sem, 1);
720
721 /* Cleanup per-LUN bad block lists - managed within lines on run-time */
722 for (i = 0; i < geo->nr_luns; i++)
723 kfree(pblk->luns[i].bb_list);
724
725 return 0;
726fail_free_lines:
727 kfree(pblk->lines);
728fail_free_bb_aux:
729 kfree(l_mg->bb_aux);
730fail_free_bb_template:
731 kfree(l_mg->bb_template);
732fail_free_meta:
733 for (i = 0; i < PBLK_DATA_LINES; i++) {
734 pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
735 pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
736 }
737fail:
738 for (i = 0; i < geo->nr_luns; i++)
739 kfree(pblk->luns[i].bb_list);
740
741 return ret;
742}
743
744static int pblk_writer_init(struct pblk *pblk)
745{
746 setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
747 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
748
749 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
750 if (IS_ERR(pblk->writer_ts)) {
751 pr_err("pblk: could not allocate writer kthread\n");
1c6286f2 752 return PTR_ERR(pblk->writer_ts);
a4bd217b
JG
753 }
754
755 return 0;
756}
757
758static void pblk_writer_stop(struct pblk *pblk)
759{
760 if (pblk->writer_ts)
761 kthread_stop(pblk->writer_ts);
762 del_timer(&pblk->wtimer);
763}
764
765static void pblk_free(struct pblk *pblk)
766{
767 pblk_luns_free(pblk);
768 pblk_lines_free(pblk);
769 pblk_line_meta_free(pblk);
770 pblk_core_free(pblk);
771 pblk_l2p_free(pblk);
772
773 kfree(pblk);
774}
775
776static void pblk_tear_down(struct pblk *pblk)
777{
778 pblk_flush_writer(pblk);
779 pblk_writer_stop(pblk);
780 pblk_rb_sync_l2p(&pblk->rwb);
781 pblk_recov_pad(pblk);
782 pblk_rwb_free(pblk);
783 pblk_rl_free(&pblk->rl);
784
785 pr_debug("pblk: consistent tear down\n");
786}
787
788static void pblk_exit(void *private)
789{
790 struct pblk *pblk = private;
791
792 down_write(&pblk_lock);
793 pblk_gc_exit(pblk);
794 pblk_tear_down(pblk);
795 pblk_free(pblk);
796 up_write(&pblk_lock);
797}
798
799static sector_t pblk_capacity(void *private)
800{
801 struct pblk *pblk = private;
802
803 return pblk->capacity * NR_PHY_IN_LOG;
804}
805
806static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
807 int flags)
808{
809 struct nvm_geo *geo = &dev->geo;
810 struct request_queue *bqueue = dev->q;
811 struct request_queue *tqueue = tdisk->queue;
812 struct pblk *pblk;
813 int ret;
814
815 if (dev->identity.dom & NVM_RSP_L2P) {
816 pr_err("pblk: device-side L2P table not supported. (%x)\n",
817 dev->identity.dom);
818 return ERR_PTR(-EINVAL);
819 }
820
821 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
822 if (!pblk)
823 return ERR_PTR(-ENOMEM);
824
825 pblk->dev = dev;
826 pblk->disk = tdisk;
827
828 spin_lock_init(&pblk->trans_lock);
829 spin_lock_init(&pblk->lock);
830
831 if (flags & NVM_TARGET_FACTORY)
832 pblk_setup_uuid(pblk);
833
834#ifdef CONFIG_NVM_DEBUG
835 atomic_long_set(&pblk->inflight_writes, 0);
836 atomic_long_set(&pblk->padded_writes, 0);
837 atomic_long_set(&pblk->padded_wb, 0);
838 atomic_long_set(&pblk->nr_flush, 0);
839 atomic_long_set(&pblk->req_writes, 0);
840 atomic_long_set(&pblk->sub_writes, 0);
841 atomic_long_set(&pblk->sync_writes, 0);
842 atomic_long_set(&pblk->compl_writes, 0);
843 atomic_long_set(&pblk->inflight_reads, 0);
db7ada33 844 atomic_long_set(&pblk->cache_reads, 0);
a4bd217b
JG
845 atomic_long_set(&pblk->sync_reads, 0);
846 atomic_long_set(&pblk->recov_writes, 0);
847 atomic_long_set(&pblk->recov_writes, 0);
848 atomic_long_set(&pblk->recov_gc_writes, 0);
849#endif
850
851 atomic_long_set(&pblk->read_failed, 0);
852 atomic_long_set(&pblk->read_empty, 0);
853 atomic_long_set(&pblk->read_high_ecc, 0);
854 atomic_long_set(&pblk->read_failed_gc, 0);
855 atomic_long_set(&pblk->write_failed, 0);
856 atomic_long_set(&pblk->erase_failed, 0);
857
858 ret = pblk_luns_init(pblk, dev->luns);
859 if (ret) {
860 pr_err("pblk: could not initialize luns\n");
861 goto fail;
862 }
863
864 ret = pblk_lines_init(pblk);
865 if (ret) {
866 pr_err("pblk: could not initialize lines\n");
867 goto fail_free_luns;
868 }
869
870 ret = pblk_core_init(pblk);
871 if (ret) {
872 pr_err("pblk: could not initialize core\n");
873 goto fail_free_line_meta;
874 }
875
876 ret = pblk_l2p_init(pblk);
877 if (ret) {
878 pr_err("pblk: could not initialize maps\n");
879 goto fail_free_core;
880 }
881
882 ret = pblk_lines_configure(pblk, flags);
883 if (ret) {
884 pr_err("pblk: could not configure lines\n");
885 goto fail_free_l2p;
886 }
887
888 ret = pblk_writer_init(pblk);
889 if (ret) {
890 pr_err("pblk: could not initialize write thread\n");
891 goto fail_free_lines;
892 }
893
894 ret = pblk_gc_init(pblk);
895 if (ret) {
896 pr_err("pblk: could not initialize gc\n");
897 goto fail_stop_writer;
898 }
899
900 /* inherit the size from the underlying device */
901 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
902 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
903
904 blk_queue_write_cache(tqueue, true, false);
905
906 tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
907 tqueue->limits.discard_alignment = 0;
908 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
909 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
910
911 pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
912 geo->nr_luns, pblk->l_mg.nr_lines,
913 (unsigned long long)pblk->rl.nr_secs,
914 pblk->rwb.nr_entries);
915
916 wake_up_process(pblk->writer_ts);
917 return pblk;
918
919fail_stop_writer:
920 pblk_writer_stop(pblk);
921fail_free_lines:
922 pblk_lines_free(pblk);
923fail_free_l2p:
924 pblk_l2p_free(pblk);
925fail_free_core:
926 pblk_core_free(pblk);
927fail_free_line_meta:
928 pblk_line_meta_free(pblk);
929fail_free_luns:
930 pblk_luns_free(pblk);
931fail:
932 kfree(pblk);
933 return ERR_PTR(ret);
934}
935
936/* physical block device target */
937static struct nvm_tgt_type tt_pblk = {
938 .name = "pblk",
939 .version = {1, 0, 0},
940
941 .make_rq = pblk_make_rq,
942 .capacity = pblk_capacity,
943
944 .init = pblk_init,
945 .exit = pblk_exit,
946
947 .sysfs_init = pblk_sysfs_init,
948 .sysfs_exit = pblk_sysfs_exit,
949};
950
951static int __init pblk_module_init(void)
952{
b25d5237
N
953 int ret;
954
955 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
956 if (!pblk_bio_set)
957 return -ENOMEM;
958 ret = nvm_register_tgt_type(&tt_pblk);
959 if (ret)
960 bioset_free(pblk_bio_set);
961 return ret;
a4bd217b
JG
962}
963
964static void pblk_module_exit(void)
965{
b25d5237 966 bioset_free(pblk_bio_set);
a4bd217b
JG
967 nvm_unregister_tgt_type(&tt_pblk);
968}
969
970module_init(pblk_module_init);
971module_exit(pblk_module_exit);
972MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
973MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
974MODULE_LICENSE("GPL v2");
975MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");