]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk-init.c
lightnvm: pblk: simplify work_queue mempool
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-init.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a physical block-device target for Open-channel SSDs.
17 *
18 * pblk-init.c - pblk's initialization.
19 */
20
21#include "pblk.h"
22
b84ae4a8 23static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
084ec9ba 24 *pblk_w_rq_cache, *pblk_line_meta_cache;
a4bd217b 25static DECLARE_RWSEM(pblk_lock);
b25d5237 26struct bio_set *pblk_bio_set;
a4bd217b
JG
27
28static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29 struct bio *bio)
30{
31 int ret;
32
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
35 */
36 if (bio_data_dir(bio) == READ) {
af67c31f 37 blk_queue_split(q, &bio);
a4bd217b
JG
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40 bio_put(bio);
41
42 return ret;
43 }
44
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
48 */
da67e68f 49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
af67c31f 50 blk_queue_split(q, &bio);
a4bd217b
JG
51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53}
54
55static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56{
57 struct pblk *pblk = q->queuedata;
58
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 bio_endio(bio);
63 return BLK_QC_T_NONE;
64 }
65 }
66
67 switch (pblk_rw_io(q, pblk, bio)) {
68 case NVM_IO_ERR:
69 bio_io_error(bio);
70 break;
71 case NVM_IO_DONE:
72 bio_endio(bio);
73 break;
74 }
75
76 return BLK_QC_T_NONE;
77}
78
79static void pblk_l2p_free(struct pblk *pblk)
80{
81 vfree(pblk->trans_map);
82}
83
84static int pblk_l2p_init(struct pblk *pblk)
85{
86 sector_t i;
87 struct ppa_addr ppa;
88 int entry_size = 8;
89
90 if (pblk->ppaf_bitsize < 32)
91 entry_size = 4;
92
93 pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
94 if (!pblk->trans_map)
95 return -ENOMEM;
96
97 pblk_ppa_set_empty(&ppa);
98
99 for (i = 0; i < pblk->rl.nr_secs; i++)
100 pblk_trans_map_set(pblk, i, ppa);
101
102 return 0;
103}
104
105static void pblk_rwb_free(struct pblk *pblk)
106{
107 if (pblk_rb_tear_down_check(&pblk->rwb))
108 pr_err("pblk: write buffer error on tear down\n");
109
110 pblk_rb_data_free(&pblk->rwb);
111 vfree(pblk_rb_entries_ref(&pblk->rwb));
112}
113
114static int pblk_rwb_init(struct pblk *pblk)
115{
116 struct nvm_tgt_dev *dev = pblk->dev;
117 struct nvm_geo *geo = &dev->geo;
118 struct pblk_rb_entry *entries;
119 unsigned long nr_entries;
120 unsigned int power_size, power_seg_sz;
121
122 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
123
124 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
125 if (!entries)
126 return -ENOMEM;
127
128 power_size = get_count_order(nr_entries);
129 power_seg_sz = get_count_order(geo->sec_size);
130
131 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
132}
133
134/* Minimum pages needed within a lun */
a4bd217b
JG
135#define ADDR_POOL_SIZE 64
136
137static int pblk_set_ppaf(struct pblk *pblk)
138{
139 struct nvm_tgt_dev *dev = pblk->dev;
140 struct nvm_geo *geo = &dev->geo;
141 struct nvm_addr_format ppaf = geo->ppaf;
142 int power_len;
143
144 /* Re-calculate channel and lun format to adapt to configuration */
145 power_len = get_count_order(geo->nr_chnls);
146 if (1 << power_len != geo->nr_chnls) {
147 pr_err("pblk: supports only power-of-two channel config.\n");
148 return -EINVAL;
149 }
150 ppaf.ch_len = power_len;
151
152 power_len = get_count_order(geo->luns_per_chnl);
153 if (1 << power_len != geo->luns_per_chnl) {
154 pr_err("pblk: supports only power-of-two LUN config.\n");
155 return -EINVAL;
156 }
157 ppaf.lun_len = power_len;
158
159 pblk->ppaf.sec_offset = 0;
160 pblk->ppaf.pln_offset = ppaf.sect_len;
161 pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
162 pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
163 pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
164 pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
165 pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
166 pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
167 pblk->ppaf.pln_offset;
168 pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
169 pblk->ppaf.ch_offset;
170 pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
171 pblk->ppaf.lun_offset;
172 pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
173 pblk->ppaf.pg_offset;
174 pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
175 pblk->ppaf.blk_offset;
176
177 pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
178
179 return 0;
180}
181
182static int pblk_init_global_caches(struct pblk *pblk)
183{
184 char cache_name[PBLK_CACHE_NAME_LEN];
185
186 down_write(&pblk_lock);
b84ae4a8 187 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
a4bd217b 188 sizeof(struct pblk_line_ws), 0, 0, NULL);
b84ae4a8 189 if (!pblk_ws_cache) {
a4bd217b
JG
190 up_write(&pblk_lock);
191 return -ENOMEM;
192 }
193
194 pblk_rec_cache = kmem_cache_create("pblk_rec",
195 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
196 if (!pblk_rec_cache) {
b84ae4a8 197 kmem_cache_destroy(pblk_ws_cache);
a4bd217b
JG
198 up_write(&pblk_lock);
199 return -ENOMEM;
200 }
201
084ec9ba 202 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
a4bd217b 203 0, 0, NULL);
084ec9ba 204 if (!pblk_g_rq_cache) {
b84ae4a8 205 kmem_cache_destroy(pblk_ws_cache);
a4bd217b
JG
206 kmem_cache_destroy(pblk_rec_cache);
207 up_write(&pblk_lock);
208 return -ENOMEM;
209 }
210
211 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
212 0, 0, NULL);
213 if (!pblk_w_rq_cache) {
b84ae4a8 214 kmem_cache_destroy(pblk_ws_cache);
a4bd217b 215 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 216 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
217 up_write(&pblk_lock);
218 return -ENOMEM;
219 }
220
221 snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
222 pblk->disk->disk_name);
223 pblk_line_meta_cache = kmem_cache_create(cache_name,
224 pblk->lm.sec_bitmap_len, 0, 0, NULL);
225 if (!pblk_line_meta_cache) {
b84ae4a8 226 kmem_cache_destroy(pblk_ws_cache);
a4bd217b 227 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 228 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
229 kmem_cache_destroy(pblk_w_rq_cache);
230 up_write(&pblk_lock);
231 return -ENOMEM;
232 }
233 up_write(&pblk_lock);
234
235 return 0;
236}
237
238static int pblk_core_init(struct pblk *pblk)
239{
240 struct nvm_tgt_dev *dev = pblk->dev;
241 struct nvm_geo *geo = &dev->geo;
a4bd217b 242
a4bd217b
JG
243 pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
244 geo->nr_planes * geo->nr_luns;
245
a4bd217b
JG
246 if (pblk_init_global_caches(pblk))
247 return -ENOMEM;
248
b84ae4a8 249 /* Internal bios can be at most the sectors signaled by the device. */
bd432417
JG
250 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
251 0);
252 if (!pblk->page_bio_pool)
a4bd217b
JG
253 return -ENOMEM;
254
b84ae4a8
JG
255 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
256 pblk_ws_cache);
257 if (!pblk->gen_ws_pool)
bd432417 258 goto free_page_bio_pool;
a4bd217b
JG
259
260 pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
261 if (!pblk->rec_pool)
b84ae4a8 262 goto free_gen_ws_pool;
a4bd217b 263
ef576494
JG
264 pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
265 pblk_g_rq_cache);
084ec9ba 266 if (!pblk->g_rq_pool)
a4bd217b
JG
267 goto free_rec_pool;
268
ef576494
JG
269 pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
270 pblk_w_rq_cache);
a4bd217b 271 if (!pblk->w_rq_pool)
084ec9ba 272 goto free_g_rq_pool;
a4bd217b
JG
273
274 pblk->line_meta_pool =
ef576494
JG
275 mempool_create_slab_pool(PBLK_META_POOL_SIZE,
276 pblk_line_meta_cache);
a4bd217b
JG
277 if (!pblk->line_meta_pool)
278 goto free_w_rq_pool;
279
ef576494
JG
280 pblk->close_wq = alloc_workqueue("pblk-close-wq",
281 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
282 if (!pblk->close_wq)
a4bd217b
JG
283 goto free_line_meta_pool;
284
ef576494
JG
285 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
286 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
287 if (!pblk->bb_wq)
288 goto free_close_wq;
289
a4bd217b 290 if (pblk_set_ppaf(pblk))
ef576494 291 goto free_bb_wq;
a4bd217b
JG
292
293 if (pblk_rwb_init(pblk))
ef576494 294 goto free_bb_wq;
a4bd217b
JG
295
296 INIT_LIST_HEAD(&pblk->compl_list);
297 return 0;
298
ef576494
JG
299free_bb_wq:
300 destroy_workqueue(pblk->bb_wq);
301free_close_wq:
302 destroy_workqueue(pblk->close_wq);
a4bd217b
JG
303free_line_meta_pool:
304 mempool_destroy(pblk->line_meta_pool);
305free_w_rq_pool:
306 mempool_destroy(pblk->w_rq_pool);
084ec9ba
JG
307free_g_rq_pool:
308 mempool_destroy(pblk->g_rq_pool);
a4bd217b
JG
309free_rec_pool:
310 mempool_destroy(pblk->rec_pool);
b84ae4a8
JG
311free_gen_ws_pool:
312 mempool_destroy(pblk->gen_ws_pool);
bd432417
JG
313free_page_bio_pool:
314 mempool_destroy(pblk->page_bio_pool);
a4bd217b
JG
315 return -ENOMEM;
316}
317
318static void pblk_core_free(struct pblk *pblk)
319{
ef576494
JG
320 if (pblk->close_wq)
321 destroy_workqueue(pblk->close_wq);
322
323 if (pblk->bb_wq)
324 destroy_workqueue(pblk->bb_wq);
a4bd217b 325
bd432417 326 mempool_destroy(pblk->page_bio_pool);
b84ae4a8 327 mempool_destroy(pblk->gen_ws_pool);
a4bd217b 328 mempool_destroy(pblk->rec_pool);
084ec9ba 329 mempool_destroy(pblk->g_rq_pool);
a4bd217b
JG
330 mempool_destroy(pblk->w_rq_pool);
331 mempool_destroy(pblk->line_meta_pool);
332
b84ae4a8 333 kmem_cache_destroy(pblk_ws_cache);
a4bd217b 334 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 335 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
336 kmem_cache_destroy(pblk_w_rq_cache);
337 kmem_cache_destroy(pblk_line_meta_cache);
338}
339
340static void pblk_luns_free(struct pblk *pblk)
341{
342 kfree(pblk->luns);
343}
344
dffdd960
JG
345static void pblk_free_line_bitmaps(struct pblk_line *line)
346{
347 kfree(line->blk_bitmap);
348 kfree(line->erase_bitmap);
349}
350
a4bd217b
JG
351static void pblk_lines_free(struct pblk *pblk)
352{
353 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
354 struct pblk_line *line;
355 int i;
356
357 spin_lock(&l_mg->free_lock);
358 for (i = 0; i < l_mg->nr_lines; i++) {
359 line = &pblk->lines[i];
360
361 pblk_line_free(pblk, line);
dffdd960 362 pblk_free_line_bitmaps(line);
a4bd217b
JG
363 }
364 spin_unlock(&l_mg->free_lock);
365}
366
367static void pblk_line_meta_free(struct pblk *pblk)
368{
369 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
370 int i;
371
372 kfree(l_mg->bb_template);
373 kfree(l_mg->bb_aux);
dd2a4343 374 kfree(l_mg->vsc_list);
a4bd217b 375
588726d3 376 spin_lock(&l_mg->free_lock);
a4bd217b 377 for (i = 0; i < PBLK_DATA_LINES; i++) {
f680f19a 378 kfree(l_mg->sline_meta[i]);
dd2a4343 379 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
f680f19a 380 kfree(l_mg->eline_meta[i]);
a4bd217b 381 }
588726d3 382 spin_unlock(&l_mg->free_lock);
a4bd217b
JG
383
384 kfree(pblk->lines);
385}
386
387static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
388{
389 struct nvm_geo *geo = &dev->geo;
390 struct ppa_addr ppa;
391 u8 *blks;
392 int nr_blks, ret;
393
394 nr_blks = geo->blks_per_lun * geo->plane_mode;
395 blks = kmalloc(nr_blks, GFP_KERNEL);
396 if (!blks)
397 return -ENOMEM;
398
399 ppa.ppa = 0;
400 ppa.g.ch = rlun->bppa.g.ch;
401 ppa.g.lun = rlun->bppa.g.lun;
402
403 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
404 if (ret)
405 goto out;
406
407 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
408 if (nr_blks < 0) {
a4bd217b 409 ret = nr_blks;
5136a4fd 410 goto out;
a4bd217b
JG
411 }
412
413 rlun->bb_list = blks;
414
5136a4fd 415 return 0;
a4bd217b 416out:
5136a4fd 417 kfree(blks);
a4bd217b
JG
418 return ret;
419}
420
dffdd960
JG
421static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
422 int blk_per_line)
a4bd217b 423{
dffdd960
JG
424 struct nvm_tgt_dev *dev = pblk->dev;
425 struct nvm_geo *geo = &dev->geo;
a4bd217b
JG
426 struct pblk_lun *rlun;
427 int bb_cnt = 0;
428 int i;
429
dffdd960
JG
430 for (i = 0; i < blk_per_line; i++) {
431 rlun = &pblk->luns[i];
432 if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
433 continue;
434
435 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
436 bb_cnt++;
437 }
438
439 return bb_cnt;
440}
441
442static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
443{
444 struct pblk_line_meta *lm = &pblk->lm;
445
a4bd217b
JG
446 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
447 if (!line->blk_bitmap)
448 return -ENOMEM;
449
450 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
451 if (!line->erase_bitmap) {
452 kfree(line->blk_bitmap);
453 return -ENOMEM;
454 }
455
dffdd960 456 return 0;
a4bd217b
JG
457}
458
459static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
460{
461 struct nvm_tgt_dev *dev = pblk->dev;
462 struct nvm_geo *geo = &dev->geo;
463 struct pblk_lun *rlun;
464 int i, ret;
465
466 /* TODO: Implement unbalanced LUN support */
467 if (geo->luns_per_chnl < 0) {
468 pr_err("pblk: unbalanced LUN config.\n");
469 return -EINVAL;
470 }
471
472 pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
473 if (!pblk->luns)
474 return -ENOMEM;
475
476 for (i = 0; i < geo->nr_luns; i++) {
477 /* Stripe across channels */
478 int ch = i % geo->nr_chnls;
479 int lun_raw = i / geo->nr_chnls;
480 int lunid = lun_raw + ch * geo->luns_per_chnl;
481
482 rlun = &pblk->luns[i];
483 rlun->bppa = luns[lunid];
484
485 sema_init(&rlun->wr_sem, 1);
486
487 ret = pblk_bb_discovery(dev, rlun);
488 if (ret) {
489 while (--i >= 0)
490 kfree(pblk->luns[i].bb_list);
491 return ret;
492 }
493 }
494
495 return 0;
496}
497
498static int pblk_lines_configure(struct pblk *pblk, int flags)
499{
500 struct pblk_line *line = NULL;
501 int ret = 0;
502
503 if (!(flags & NVM_TARGET_FACTORY)) {
504 line = pblk_recov_l2p(pblk);
505 if (IS_ERR(line)) {
506 pr_err("pblk: could not recover l2p table\n");
507 ret = -EFAULT;
508 }
509 }
510
511 if (!line) {
512 /* Configure next line for user data */
513 line = pblk_line_get_first_data(pblk);
514 if (!line) {
515 pr_err("pblk: line list corrupted\n");
516 ret = -EFAULT;
517 }
518 }
519
520 return ret;
521}
522
523/* See comment over struct line_emeta definition */
dd2a4343 524static unsigned int calc_emeta_len(struct pblk *pblk)
a4bd217b 525{
dd2a4343
JG
526 struct pblk_line_meta *lm = &pblk->lm;
527 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
528 struct nvm_tgt_dev *dev = pblk->dev;
529 struct nvm_geo *geo = &dev->geo;
530
531 /* Round to sector size so that lba_list starts on its own sector */
532 lm->emeta_sec[1] = DIV_ROUND_UP(
533 sizeof(struct line_emeta) + lm->blk_bitmap_len,
534 geo->sec_size);
535 lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
536
537 /* Round to sector size so that vsc_list starts on its own sector */
538 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
539 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
540 geo->sec_size);
541 lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
542
543 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
544 geo->sec_size);
545 lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
546
547 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
548
549 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
a4bd217b
JG
550}
551
552static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
553{
554 struct nvm_tgt_dev *dev = pblk->dev;
555 struct nvm_geo *geo = &dev->geo;
556 sector_t provisioned;
557
558 pblk->over_pct = 20;
559
560 provisioned = nr_free_blks;
561 provisioned *= (100 - pblk->over_pct);
562 sector_div(provisioned, 100);
563
564 /* Internally pblk manages all free blocks, but all calculations based
565 * on user capacity consider only provisioned blocks
566 */
567 pblk->rl.total_blocks = nr_free_blks;
568 pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
569 pblk->capacity = provisioned * geo->sec_per_blk;
570 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
571}
572
dd2a4343
JG
573static int pblk_lines_alloc_metadata(struct pblk *pblk)
574{
575 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
576 struct pblk_line_meta *lm = &pblk->lm;
577 int i;
578
579 /* smeta is always small enough to fit on a kmalloc memory allocation,
580 * emeta depends on the number of LUNs allocated to the pblk instance
581 */
dd2a4343
JG
582 for (i = 0; i < PBLK_DATA_LINES; i++) {
583 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
584 if (!l_mg->sline_meta[i])
585 goto fail_free_smeta;
586 }
587
588 /* emeta allocates three different buffers for managing metadata with
589 * in-memory and in-media layouts
590 */
591 for (i = 0; i < PBLK_DATA_LINES; i++) {
592 struct pblk_emeta *emeta;
593
594 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
595 if (!emeta)
596 goto fail_free_emeta;
597
598 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
599 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
600
601 emeta->buf = vmalloc(lm->emeta_len[0]);
602 if (!emeta->buf) {
603 kfree(emeta);
604 goto fail_free_emeta;
605 }
606
607 emeta->nr_entries = lm->emeta_sec[0];
608 l_mg->eline_meta[i] = emeta;
609 } else {
610 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
611
612 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
613 if (!emeta->buf) {
614 kfree(emeta);
615 goto fail_free_emeta;
616 }
617
618 emeta->nr_entries = lm->emeta_sec[0];
619 l_mg->eline_meta[i] = emeta;
620 }
621 }
622
623 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
624 if (!l_mg->vsc_list)
625 goto fail_free_emeta;
626
627 for (i = 0; i < l_mg->nr_lines; i++)
628 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
629
630 return 0;
631
632fail_free_emeta:
633 while (--i >= 0) {
c9d84b35
RP
634 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
635 vfree(l_mg->eline_meta[i]->buf);
636 else
637 kfree(l_mg->eline_meta[i]->buf);
f680f19a 638 kfree(l_mg->eline_meta[i]);
dd2a4343
JG
639 }
640
641fail_free_smeta:
642 for (i = 0; i < PBLK_DATA_LINES; i++)
f680f19a 643 kfree(l_mg->sline_meta[i]);
dd2a4343
JG
644
645 return -ENOMEM;
646}
647
a4bd217b
JG
648static int pblk_lines_init(struct pblk *pblk)
649{
650 struct nvm_tgt_dev *dev = pblk->dev;
651 struct nvm_geo *geo = &dev->geo;
652 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
653 struct pblk_line_meta *lm = &pblk->lm;
654 struct pblk_line *line;
655 unsigned int smeta_len, emeta_len;
d624f371 656 long nr_bad_blks, nr_free_blks;
dd2a4343
JG
657 int bb_distance, max_write_ppas, mod;
658 int i, ret;
659
660 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
661 max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
662 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
663 max_write_ppas : nvm_max_phys_sects(dev);
664 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
665
666 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
667 pr_err("pblk: cannot support device max_phys_sect\n");
668 return -EINVAL;
669 }
670
671 div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
672 if (mod) {
673 pr_err("pblk: bad configuration of sectors/pages\n");
674 return -EINVAL;
675 }
676
677 l_mg->nr_lines = geo->blks_per_lun;
678 l_mg->log_line = l_mg->data_line = NULL;
679 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
680 l_mg->nr_free_lines = 0;
681 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
a4bd217b
JG
682
683 lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
684 lm->blk_per_line = geo->nr_luns;
685 lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
686 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
687 lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
688 lm->high_thrs = lm->sec_per_line / 2;
689 lm->mid_thrs = lm->sec_per_line / 4;
dd2a4343 690 lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
a4bd217b
JG
691
692 /* Calculate necessary pages for smeta. See comment over struct
693 * line_smeta definition
694 */
a4bd217b
JG
695 i = 1;
696add_smeta_page:
697 lm->smeta_sec = i * geo->sec_per_pl;
698 lm->smeta_len = lm->smeta_sec * geo->sec_size;
699
dd2a4343 700 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
a4bd217b
JG
701 if (smeta_len > lm->smeta_len) {
702 i++;
703 goto add_smeta_page;
704 }
705
706 /* Calculate necessary pages for emeta. See comment over struct
707 * line_emeta definition
708 */
709 i = 1;
710add_emeta_page:
dd2a4343
JG
711 lm->emeta_sec[0] = i * geo->sec_per_pl;
712 lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
a4bd217b 713
dd2a4343
JG
714 emeta_len = calc_emeta_len(pblk);
715 if (emeta_len > lm->emeta_len[0]) {
a4bd217b
JG
716 i++;
717 goto add_emeta_page;
718 }
a4bd217b 719
dd2a4343
JG
720 lm->emeta_bb = geo->nr_luns - i;
721 lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
d624f371 722 geo->sec_per_blk);
b5e063a2
JG
723 if (lm->min_blk_line > lm->blk_per_line) {
724 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
725 lm->blk_per_line);
726 ret = -EINVAL;
727 goto fail;
728 }
a4bd217b 729
dd2a4343
JG
730 ret = pblk_lines_alloc_metadata(pblk);
731 if (ret)
732 goto fail;
a4bd217b
JG
733
734 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
735 if (!l_mg->bb_template) {
736 ret = -ENOMEM;
a4bd217b 737 goto fail_free_meta;
1c6286f2 738 }
a4bd217b
JG
739
740 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1c6286f2
DC
741 if (!l_mg->bb_aux) {
742 ret = -ENOMEM;
a4bd217b 743 goto fail_free_bb_template;
1c6286f2 744 }
a4bd217b
JG
745
746 bb_distance = (geo->nr_luns) * geo->sec_per_pl;
747 for (i = 0; i < lm->sec_per_line; i += bb_distance)
748 bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
749
750 INIT_LIST_HEAD(&l_mg->free_list);
751 INIT_LIST_HEAD(&l_mg->corrupt_list);
752 INIT_LIST_HEAD(&l_mg->bad_list);
753 INIT_LIST_HEAD(&l_mg->gc_full_list);
754 INIT_LIST_HEAD(&l_mg->gc_high_list);
755 INIT_LIST_HEAD(&l_mg->gc_mid_list);
756 INIT_LIST_HEAD(&l_mg->gc_low_list);
757 INIT_LIST_HEAD(&l_mg->gc_empty_list);
758
dd2a4343
JG
759 INIT_LIST_HEAD(&l_mg->emeta_list);
760
a4bd217b
JG
761 l_mg->gc_lists[0] = &l_mg->gc_high_list;
762 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
763 l_mg->gc_lists[2] = &l_mg->gc_low_list;
764
765 spin_lock_init(&l_mg->free_lock);
dd2a4343 766 spin_lock_init(&l_mg->close_lock);
a4bd217b
JG
767 spin_lock_init(&l_mg->gc_lock);
768
769 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
770 GFP_KERNEL);
1c6286f2
DC
771 if (!pblk->lines) {
772 ret = -ENOMEM;
a4bd217b 773 goto fail_free_bb_aux;
1c6286f2 774 }
a4bd217b
JG
775
776 nr_free_blks = 0;
777 for (i = 0; i < l_mg->nr_lines; i++) {
a44f53fa
JG
778 int blk_in_line;
779
a4bd217b
JG
780 line = &pblk->lines[i];
781
782 line->pblk = pblk;
783 line->id = i;
784 line->type = PBLK_LINETYPE_FREE;
785 line->state = PBLK_LINESTATE_FREE;
786 line->gc_group = PBLK_LINEGC_NONE;
dd2a4343 787 line->vsc = &l_mg->vsc_list[i];
a4bd217b
JG
788 spin_lock_init(&line->lock);
789
dffdd960
JG
790 ret = pblk_alloc_line_bitmaps(pblk, line);
791 if (ret)
792 goto fail_free_lines;
793
794 nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
1c6286f2 795 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
dffdd960 796 pblk_free_line_bitmaps(line);
1c6286f2 797 ret = -EINVAL;
a4bd217b 798 goto fail_free_lines;
1c6286f2 799 }
a4bd217b 800
a44f53fa
JG
801 blk_in_line = lm->blk_per_line - nr_bad_blks;
802 if (blk_in_line < lm->min_blk_line) {
a4bd217b
JG
803 line->state = PBLK_LINESTATE_BAD;
804 list_add_tail(&line->list, &l_mg->bad_list);
805 continue;
806 }
807
a44f53fa
JG
808 nr_free_blks += blk_in_line;
809 atomic_set(&line->blk_in_line, blk_in_line);
a4bd217b
JG
810
811 l_mg->nr_free_lines++;
812 list_add_tail(&line->list, &l_mg->free_list);
813 }
814
815 pblk_set_provision(pblk, nr_free_blks);
816
a4bd217b
JG
817 /* Cleanup per-LUN bad block lists - managed within lines on run-time */
818 for (i = 0; i < geo->nr_luns; i++)
819 kfree(pblk->luns[i].bb_list);
820
821 return 0;
822fail_free_lines:
dffdd960
JG
823 while (--i >= 0)
824 pblk_free_line_bitmaps(&pblk->lines[i]);
a4bd217b
JG
825fail_free_bb_aux:
826 kfree(l_mg->bb_aux);
827fail_free_bb_template:
828 kfree(l_mg->bb_template);
829fail_free_meta:
dd2a4343 830 pblk_line_meta_free(pblk);
a4bd217b
JG
831fail:
832 for (i = 0; i < geo->nr_luns; i++)
833 kfree(pblk->luns[i].bb_list);
834
835 return ret;
836}
837
838static int pblk_writer_init(struct pblk *pblk)
839{
840 setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
841 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
842
843 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
844 if (IS_ERR(pblk->writer_ts)) {
845 pr_err("pblk: could not allocate writer kthread\n");
1c6286f2 846 return PTR_ERR(pblk->writer_ts);
a4bd217b
JG
847 }
848
849 return 0;
850}
851
852static void pblk_writer_stop(struct pblk *pblk)
853{
ee8d5c1a
JG
854 /* The pipeline must be stopped and the write buffer emptied before the
855 * write thread is stopped
856 */
857 WARN(pblk_rb_read_count(&pblk->rwb),
858 "Stopping not fully persisted write buffer\n");
859
860 WARN(pblk_rb_sync_count(&pblk->rwb),
861 "Stopping not fully synced write buffer\n");
862
a4bd217b
JG
863 if (pblk->writer_ts)
864 kthread_stop(pblk->writer_ts);
865 del_timer(&pblk->wtimer);
866}
867
868static void pblk_free(struct pblk *pblk)
869{
870 pblk_luns_free(pblk);
871 pblk_lines_free(pblk);
872 pblk_line_meta_free(pblk);
873 pblk_core_free(pblk);
874 pblk_l2p_free(pblk);
875
876 kfree(pblk);
877}
878
879static void pblk_tear_down(struct pblk *pblk)
880{
588726d3 881 pblk_pipeline_stop(pblk);
a4bd217b
JG
882 pblk_writer_stop(pblk);
883 pblk_rb_sync_l2p(&pblk->rwb);
a4bd217b
JG
884 pblk_rwb_free(pblk);
885 pblk_rl_free(&pblk->rl);
886
887 pr_debug("pblk: consistent tear down\n");
888}
889
890static void pblk_exit(void *private)
891{
892 struct pblk *pblk = private;
893
894 down_write(&pblk_lock);
895 pblk_gc_exit(pblk);
896 pblk_tear_down(pblk);
897 pblk_free(pblk);
898 up_write(&pblk_lock);
899}
900
901static sector_t pblk_capacity(void *private)
902{
903 struct pblk *pblk = private;
904
905 return pblk->capacity * NR_PHY_IN_LOG;
906}
907
908static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
909 int flags)
910{
911 struct nvm_geo *geo = &dev->geo;
912 struct request_queue *bqueue = dev->q;
913 struct request_queue *tqueue = tdisk->queue;
914 struct pblk *pblk;
915 int ret;
916
917 if (dev->identity.dom & NVM_RSP_L2P) {
4e76af53 918 pr_err("pblk: host-side L2P table not supported. (%x)\n",
a4bd217b
JG
919 dev->identity.dom);
920 return ERR_PTR(-EINVAL);
921 }
922
923 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
924 if (!pblk)
925 return ERR_PTR(-ENOMEM);
926
927 pblk->dev = dev;
928 pblk->disk = tdisk;
588726d3 929 pblk->state = PBLK_STATE_RUNNING;
a4bd217b
JG
930
931 spin_lock_init(&pblk->trans_lock);
932 spin_lock_init(&pblk->lock);
933
934 if (flags & NVM_TARGET_FACTORY)
935 pblk_setup_uuid(pblk);
936
937#ifdef CONFIG_NVM_DEBUG
938 atomic_long_set(&pblk->inflight_writes, 0);
939 atomic_long_set(&pblk->padded_writes, 0);
940 atomic_long_set(&pblk->padded_wb, 0);
941 atomic_long_set(&pblk->nr_flush, 0);
942 atomic_long_set(&pblk->req_writes, 0);
943 atomic_long_set(&pblk->sub_writes, 0);
944 atomic_long_set(&pblk->sync_writes, 0);
a4bd217b 945 atomic_long_set(&pblk->inflight_reads, 0);
db7ada33 946 atomic_long_set(&pblk->cache_reads, 0);
a4bd217b
JG
947 atomic_long_set(&pblk->sync_reads, 0);
948 atomic_long_set(&pblk->recov_writes, 0);
949 atomic_long_set(&pblk->recov_writes, 0);
950 atomic_long_set(&pblk->recov_gc_writes, 0);
a1121176 951 atomic_long_set(&pblk->recov_gc_reads, 0);
a4bd217b
JG
952#endif
953
954 atomic_long_set(&pblk->read_failed, 0);
955 atomic_long_set(&pblk->read_empty, 0);
956 atomic_long_set(&pblk->read_high_ecc, 0);
957 atomic_long_set(&pblk->read_failed_gc, 0);
958 atomic_long_set(&pblk->write_failed, 0);
959 atomic_long_set(&pblk->erase_failed, 0);
960
961 ret = pblk_luns_init(pblk, dev->luns);
962 if (ret) {
963 pr_err("pblk: could not initialize luns\n");
964 goto fail;
965 }
966
967 ret = pblk_lines_init(pblk);
968 if (ret) {
969 pr_err("pblk: could not initialize lines\n");
970 goto fail_free_luns;
971 }
972
973 ret = pblk_core_init(pblk);
974 if (ret) {
975 pr_err("pblk: could not initialize core\n");
976 goto fail_free_line_meta;
977 }
978
979 ret = pblk_l2p_init(pblk);
980 if (ret) {
981 pr_err("pblk: could not initialize maps\n");
982 goto fail_free_core;
983 }
984
985 ret = pblk_lines_configure(pblk, flags);
986 if (ret) {
987 pr_err("pblk: could not configure lines\n");
988 goto fail_free_l2p;
989 }
990
991 ret = pblk_writer_init(pblk);
992 if (ret) {
993 pr_err("pblk: could not initialize write thread\n");
994 goto fail_free_lines;
995 }
996
997 ret = pblk_gc_init(pblk);
998 if (ret) {
999 pr_err("pblk: could not initialize gc\n");
1000 goto fail_stop_writer;
1001 }
1002
1003 /* inherit the size from the underlying device */
1004 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1005 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1006
1007 blk_queue_write_cache(tqueue, true, false);
1008
1009 tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
1010 tqueue->limits.discard_alignment = 0;
1011 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1012 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
1013
1014 pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1015 geo->nr_luns, pblk->l_mg.nr_lines,
1016 (unsigned long long)pblk->rl.nr_secs,
1017 pblk->rwb.nr_entries);
1018
1019 wake_up_process(pblk->writer_ts);
1020 return pblk;
1021
1022fail_stop_writer:
1023 pblk_writer_stop(pblk);
1024fail_free_lines:
1025 pblk_lines_free(pblk);
1026fail_free_l2p:
1027 pblk_l2p_free(pblk);
1028fail_free_core:
1029 pblk_core_free(pblk);
1030fail_free_line_meta:
1031 pblk_line_meta_free(pblk);
1032fail_free_luns:
1033 pblk_luns_free(pblk);
1034fail:
1035 kfree(pblk);
1036 return ERR_PTR(ret);
1037}
1038
1039/* physical block device target */
1040static struct nvm_tgt_type tt_pblk = {
1041 .name = "pblk",
1042 .version = {1, 0, 0},
1043
1044 .make_rq = pblk_make_rq,
1045 .capacity = pblk_capacity,
1046
1047 .init = pblk_init,
1048 .exit = pblk_exit,
1049
1050 .sysfs_init = pblk_sysfs_init,
1051 .sysfs_exit = pblk_sysfs_exit,
90014829 1052 .owner = THIS_MODULE,
a4bd217b
JG
1053};
1054
1055static int __init pblk_module_init(void)
1056{
b25d5237
N
1057 int ret;
1058
1059 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1060 if (!pblk_bio_set)
1061 return -ENOMEM;
1062 ret = nvm_register_tgt_type(&tt_pblk);
1063 if (ret)
1064 bioset_free(pblk_bio_set);
1065 return ret;
a4bd217b
JG
1066}
1067
1068static void pblk_module_exit(void)
1069{
b25d5237 1070 bioset_free(pblk_bio_set);
a4bd217b
JG
1071 nvm_unregister_tgt_type(&tt_pblk);
1072}
1073
1074module_init(pblk_module_init);
1075module_exit(pblk_module_exit);
1076MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1077MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1078MODULE_LICENSE("GPL v2");
1079MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");