]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/lightnvm/pblk-init.c
lightnvm: simplify geometry structure
[mirror_ubuntu-jammy-kernel.git] / drivers / lightnvm / pblk-init.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a physical block-device target for Open-channel SSDs.
17 *
18 * pblk-init.c - pblk's initialization.
19 */
20
21#include "pblk.h"
22
b84ae4a8 23static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
e72ec1d3 24 *pblk_w_rq_cache;
a4bd217b 25static DECLARE_RWSEM(pblk_lock);
b25d5237 26struct bio_set *pblk_bio_set;
a4bd217b
JG
27
28static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
29 struct bio *bio)
30{
31 int ret;
32
33 /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
34 * constraint. Writes can be of arbitrary size.
35 */
36 if (bio_data_dir(bio) == READ) {
af67c31f 37 blk_queue_split(q, &bio);
a4bd217b
JG
38 ret = pblk_submit_read(pblk, bio);
39 if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
40 bio_put(bio);
41
42 return ret;
43 }
44
45 /* Prevent deadlock in the case of a modest LUN configuration and large
46 * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
47 * available for user I/O.
48 */
da67e68f 49 if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
af67c31f 50 blk_queue_split(q, &bio);
a4bd217b
JG
51
52 return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
53}
54
55static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
56{
57 struct pblk *pblk = q->queuedata;
58
59 if (bio_op(bio) == REQ_OP_DISCARD) {
60 pblk_discard(pblk, bio);
61 if (!(bio->bi_opf & REQ_PREFLUSH)) {
62 bio_endio(bio);
63 return BLK_QC_T_NONE;
64 }
65 }
66
67 switch (pblk_rw_io(q, pblk, bio)) {
68 case NVM_IO_ERR:
69 bio_io_error(bio);
70 break;
71 case NVM_IO_DONE:
72 bio_endio(bio);
73 break;
74 }
75
76 return BLK_QC_T_NONE;
77}
78
c5586192
HH
79static size_t pblk_trans_map_size(struct pblk *pblk)
80{
81 int entry_size = 8;
82
83 if (pblk->ppaf_bitsize < 32)
84 entry_size = 4;
85
86 return entry_size * pblk->rl.nr_secs;
87}
88
89#ifdef CONFIG_NVM_DEBUG
90static u32 pblk_l2p_crc(struct pblk *pblk)
91{
92 size_t map_size;
93 u32 crc = ~(u32)0;
94
95 map_size = pblk_trans_map_size(pblk);
96 crc = crc32_le(crc, pblk->trans_map, map_size);
97 return crc;
98}
99#endif
100
a4bd217b
JG
101static void pblk_l2p_free(struct pblk *pblk)
102{
103 vfree(pblk->trans_map);
104}
105
43d47127
JG
106static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
107{
108 struct pblk_line *line = NULL;
109
110 if (factory_init) {
111 pblk_setup_uuid(pblk);
112 } else {
113 line = pblk_recov_l2p(pblk);
114 if (IS_ERR(line)) {
115 pr_err("pblk: could not recover l2p table\n");
116 return -EFAULT;
117 }
118 }
119
120#ifdef CONFIG_NVM_DEBUG
121 pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
122#endif
123
124 /* Free full lines directly as GC has not been started yet */
125 pblk_gc_free_full_lines(pblk);
126
127 if (!line) {
128 /* Configure next line for user data */
129 line = pblk_line_get_first_data(pblk);
130 if (!line) {
131 pr_err("pblk: line list corrupted\n");
132 return -EFAULT;
133 }
134 }
135
136 return 0;
137}
138
139static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
a4bd217b
JG
140{
141 sector_t i;
142 struct ppa_addr ppa;
c5586192 143 size_t map_size;
a4bd217b 144
c5586192
HH
145 map_size = pblk_trans_map_size(pblk);
146 pblk->trans_map = vmalloc(map_size);
a4bd217b
JG
147 if (!pblk->trans_map)
148 return -ENOMEM;
149
150 pblk_ppa_set_empty(&ppa);
151
152 for (i = 0; i < pblk->rl.nr_secs; i++)
153 pblk_trans_map_set(pblk, i, ppa);
154
43d47127 155 return pblk_l2p_recover(pblk, factory_init);
a4bd217b
JG
156}
157
158static void pblk_rwb_free(struct pblk *pblk)
159{
160 if (pblk_rb_tear_down_check(&pblk->rwb))
161 pr_err("pblk: write buffer error on tear down\n");
162
163 pblk_rb_data_free(&pblk->rwb);
164 vfree(pblk_rb_entries_ref(&pblk->rwb));
165}
166
167static int pblk_rwb_init(struct pblk *pblk)
168{
169 struct nvm_tgt_dev *dev = pblk->dev;
170 struct nvm_geo *geo = &dev->geo;
171 struct pblk_rb_entry *entries;
172 unsigned long nr_entries;
173 unsigned int power_size, power_seg_sz;
174
175 nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
176
177 entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
178 if (!entries)
179 return -ENOMEM;
180
181 power_size = get_count_order(nr_entries);
e46f4e48 182 power_seg_sz = get_count_order(geo->csecs);
a4bd217b
JG
183
184 return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
185}
186
187/* Minimum pages needed within a lun */
a4bd217b
JG
188#define ADDR_POOL_SIZE 64
189
e46f4e48 190static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst)
a4bd217b 191{
e46f4e48
JG
192 struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
193 int power_len;
a4bd217b
JG
194
195 /* Re-calculate channel and lun format to adapt to configuration */
196 power_len = get_count_order(geo->nr_chnls);
197 if (1 << power_len != geo->nr_chnls) {
198 pr_err("pblk: supports only power-of-two channel config.\n");
199 return -EINVAL;
200 }
e46f4e48 201 dst->ch_len = power_len;
a4bd217b 202
fae7fae4
MB
203 power_len = get_count_order(geo->nr_luns);
204 if (1 << power_len != geo->nr_luns) {
a4bd217b
JG
205 pr_err("pblk: supports only power-of-two LUN config.\n");
206 return -EINVAL;
207 }
e46f4e48
JG
208 dst->lun_len = power_len;
209
210 dst->blk_len = src->blk_len;
211 dst->pg_len = src->pg_len;
212 dst->pln_len = src->pln_len;
213 dst->sect_len = src->sect_len;
214
215 dst->sect_offset = 0;
216 dst->pln_offset = dst->sect_len;
217 dst->ch_offset = dst->pln_offset + dst->pln_len;
218 dst->lun_offset = dst->ch_offset + dst->ch_len;
219 dst->pg_offset = dst->lun_offset + dst->lun_len;
220 dst->blk_offset = dst->pg_offset + dst->pg_len;
221
222 dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset;
223 dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
224 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
225 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
226 dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
227 dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
228
229 return dst->blk_offset + src->blk_len;
230}
231
232static int pblk_set_ppaf(struct pblk *pblk)
233{
234 struct nvm_tgt_dev *dev = pblk->dev;
235 struct nvm_geo *geo = &dev->geo;
236 int mod;
237
238 div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
239 if (mod) {
240 pr_err("pblk: bad configuration of sectors/pages\n");
241 return -EINVAL;
242 }
243
244 pblk->ppaf_bitsize = pblk_set_addrf_12(geo, (void *)&pblk->ppaf);
a4bd217b
JG
245
246 return 0;
247}
248
249static int pblk_init_global_caches(struct pblk *pblk)
250{
a4bd217b 251 down_write(&pblk_lock);
b84ae4a8 252 pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
a4bd217b 253 sizeof(struct pblk_line_ws), 0, 0, NULL);
b84ae4a8 254 if (!pblk_ws_cache) {
a4bd217b
JG
255 up_write(&pblk_lock);
256 return -ENOMEM;
257 }
258
259 pblk_rec_cache = kmem_cache_create("pblk_rec",
260 sizeof(struct pblk_rec_ctx), 0, 0, NULL);
261 if (!pblk_rec_cache) {
b84ae4a8 262 kmem_cache_destroy(pblk_ws_cache);
a4bd217b
JG
263 up_write(&pblk_lock);
264 return -ENOMEM;
265 }
266
084ec9ba 267 pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
a4bd217b 268 0, 0, NULL);
084ec9ba 269 if (!pblk_g_rq_cache) {
b84ae4a8 270 kmem_cache_destroy(pblk_ws_cache);
a4bd217b
JG
271 kmem_cache_destroy(pblk_rec_cache);
272 up_write(&pblk_lock);
273 return -ENOMEM;
274 }
275
276 pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
277 0, 0, NULL);
278 if (!pblk_w_rq_cache) {
b84ae4a8 279 kmem_cache_destroy(pblk_ws_cache);
a4bd217b 280 kmem_cache_destroy(pblk_rec_cache);
084ec9ba 281 kmem_cache_destroy(pblk_g_rq_cache);
a4bd217b
JG
282 up_write(&pblk_lock);
283 return -ENOMEM;
284 }
a4bd217b
JG
285 up_write(&pblk_lock);
286
287 return 0;
288}
289
22a4e061
RP
290static void pblk_free_global_caches(struct pblk *pblk)
291{
292 kmem_cache_destroy(pblk_ws_cache);
293 kmem_cache_destroy(pblk_rec_cache);
294 kmem_cache_destroy(pblk_g_rq_cache);
295 kmem_cache_destroy(pblk_w_rq_cache);
296}
297
a4bd217b
JG
298static int pblk_core_init(struct pblk *pblk)
299{
300 struct nvm_tgt_dev *dev = pblk->dev;
301 struct nvm_geo *geo = &dev->geo;
43d47127
JG
302 int max_write_ppas;
303
304 atomic64_set(&pblk->user_wa, 0);
305 atomic64_set(&pblk->pad_wa, 0);
306 atomic64_set(&pblk->gc_wa, 0);
307 pblk->user_rst_wa = 0;
308 pblk->pad_rst_wa = 0;
309 pblk->gc_rst_wa = 0;
310
311 atomic64_set(&pblk->nr_flush, 0);
312 pblk->nr_flush_rst = 0;
a4bd217b 313
e46f4e48 314 pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns;
a4bd217b 315
e46f4e48 316 pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
43d47127
JG
317 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
318 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
319 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
320
321 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
322 pr_err("pblk: vector list too big(%u > %u)\n",
323 pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
324 return -EINVAL;
325 }
326
327 pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t),
328 GFP_KERNEL);
329 if (!pblk->pad_dist)
a4bd217b
JG
330 return -ENOMEM;
331
43d47127
JG
332 if (pblk_init_global_caches(pblk))
333 goto fail_free_pad_dist;
334
b84ae4a8 335 /* Internal bios can be at most the sectors signaled by the device. */
89a09c56 336 pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0);
bd432417 337 if (!pblk->page_bio_pool)
22a4e061 338 goto free_global_caches;
a4bd217b 339
b84ae4a8
JG
340 pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
341 pblk_ws_cache);
342 if (!pblk->gen_ws_pool)
bd432417 343 goto free_page_bio_pool;
a4bd217b 344
fae7fae4
MB
345 pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
346 pblk_rec_cache);
a4bd217b 347 if (!pblk->rec_pool)
b84ae4a8 348 goto free_gen_ws_pool;
a4bd217b 349
fae7fae4 350 pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
ef576494 351 pblk_g_rq_cache);
0d880398 352 if (!pblk->r_rq_pool)
a4bd217b
JG
353 goto free_rec_pool;
354
fae7fae4 355 pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
0d880398
JG
356 pblk_g_rq_cache);
357 if (!pblk->e_rq_pool)
358 goto free_r_rq_pool;
359
fae7fae4 360 pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
ef576494 361 pblk_w_rq_cache);
a4bd217b 362 if (!pblk->w_rq_pool)
0d880398 363 goto free_e_rq_pool;
a4bd217b 364
ef576494
JG
365 pblk->close_wq = alloc_workqueue("pblk-close-wq",
366 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
367 if (!pblk->close_wq)
e72ec1d3 368 goto free_w_rq_pool;
a4bd217b 369
ef576494
JG
370 pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
371 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
372 if (!pblk->bb_wq)
373 goto free_close_wq;
374
7bd4d370
JG
375 pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
376 WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
377 if (!pblk->r_end_wq)
ef576494 378 goto free_bb_wq;
a4bd217b 379
7bd4d370
JG
380 if (pblk_set_ppaf(pblk))
381 goto free_r_end_wq;
382
a4bd217b 383 INIT_LIST_HEAD(&pblk->compl_list);
43d47127 384
a4bd217b
JG
385 return 0;
386
7bd4d370
JG
387free_r_end_wq:
388 destroy_workqueue(pblk->r_end_wq);
ef576494
JG
389free_bb_wq:
390 destroy_workqueue(pblk->bb_wq);
391free_close_wq:
392 destroy_workqueue(pblk->close_wq);
a4bd217b
JG
393free_w_rq_pool:
394 mempool_destroy(pblk->w_rq_pool);
0d880398
JG
395free_e_rq_pool:
396 mempool_destroy(pblk->e_rq_pool);
397free_r_rq_pool:
398 mempool_destroy(pblk->r_rq_pool);
a4bd217b
JG
399free_rec_pool:
400 mempool_destroy(pblk->rec_pool);
b84ae4a8
JG
401free_gen_ws_pool:
402 mempool_destroy(pblk->gen_ws_pool);
bd432417
JG
403free_page_bio_pool:
404 mempool_destroy(pblk->page_bio_pool);
22a4e061
RP
405free_global_caches:
406 pblk_free_global_caches(pblk);
43d47127
JG
407fail_free_pad_dist:
408 kfree(pblk->pad_dist);
a4bd217b
JG
409 return -ENOMEM;
410}
411
412static void pblk_core_free(struct pblk *pblk)
413{
ef576494
JG
414 if (pblk->close_wq)
415 destroy_workqueue(pblk->close_wq);
416
7bd4d370
JG
417 if (pblk->r_end_wq)
418 destroy_workqueue(pblk->r_end_wq);
419
ef576494
JG
420 if (pblk->bb_wq)
421 destroy_workqueue(pblk->bb_wq);
a4bd217b 422
bd432417 423 mempool_destroy(pblk->page_bio_pool);
b84ae4a8 424 mempool_destroy(pblk->gen_ws_pool);
a4bd217b 425 mempool_destroy(pblk->rec_pool);
0d880398
JG
426 mempool_destroy(pblk->r_rq_pool);
427 mempool_destroy(pblk->e_rq_pool);
a4bd217b 428 mempool_destroy(pblk->w_rq_pool);
a4bd217b 429
22a4e061 430 pblk_free_global_caches(pblk);
43d47127 431 kfree(pblk->pad_dist);
a4bd217b
JG
432}
433
e411b331
JG
434static void pblk_line_mg_free(struct pblk *pblk)
435{
436 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
437 int i;
438
439 kfree(l_mg->bb_template);
440 kfree(l_mg->bb_aux);
441 kfree(l_mg->vsc_list);
442
443 for (i = 0; i < PBLK_DATA_LINES; i++) {
444 kfree(l_mg->sline_meta[i]);
445 pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
446 kfree(l_mg->eline_meta[i]);
447 }
e411b331
JG
448}
449
450static void pblk_line_meta_free(struct pblk_line *line)
dffdd960
JG
451{
452 kfree(line->blk_bitmap);
453 kfree(line->erase_bitmap);
454}
455
a4bd217b
JG
456static void pblk_lines_free(struct pblk *pblk)
457{
458 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
459 struct pblk_line *line;
460 int i;
461
462 spin_lock(&l_mg->free_lock);
463 for (i = 0; i < l_mg->nr_lines; i++) {
464 line = &pblk->lines[i];
465
466 pblk_line_free(pblk, line);
e411b331 467 pblk_line_meta_free(line);
a4bd217b
JG
468 }
469 spin_unlock(&l_mg->free_lock);
43d47127
JG
470
471 pblk_line_mg_free(pblk);
472
473 kfree(pblk->luns);
474 kfree(pblk->lines);
a4bd217b
JG
475}
476
e411b331
JG
477static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
478 u8 *blks, int nr_blks)
a4bd217b 479{
a4bd217b 480 struct ppa_addr ppa;
e411b331 481 int ret;
a4bd217b
JG
482
483 ppa.ppa = 0;
484 ppa.g.ch = rlun->bppa.g.ch;
485 ppa.g.lun = rlun->bppa.g.lun;
486
487 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
488 if (ret)
e411b331 489 return ret;
a4bd217b
JG
490
491 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
e411b331
JG
492 if (nr_blks < 0)
493 return -EIO;
a4bd217b 494
5136a4fd 495 return 0;
a4bd217b
JG
496}
497
e411b331 498static void *pblk_bb_get_log(struct pblk *pblk)
a4bd217b 499{
dffdd960
JG
500 struct nvm_tgt_dev *dev = pblk->dev;
501 struct nvm_geo *geo = &dev->geo;
e411b331
JG
502 u8 *log;
503 int i, nr_blks, blk_per_lun;
504 int ret;
a4bd217b 505
e411b331
JG
506 blk_per_lun = geo->nr_chks * geo->plane_mode;
507 nr_blks = blk_per_lun * geo->all_luns;
dffdd960 508
e411b331
JG
509 log = kmalloc(nr_blks, GFP_KERNEL);
510 if (!log)
511 return ERR_PTR(-ENOMEM);
512
513 for (i = 0; i < geo->all_luns; i++) {
514 struct pblk_lun *rlun = &pblk->luns[i];
515 u8 *log_pos = log + i * blk_per_lun;
516
517 ret = pblk_bb_get_tbl(dev, rlun, log_pos, blk_per_lun);
518 if (ret) {
519 kfree(log);
520 return ERR_PTR(-EIO);
521 }
dffdd960
JG
522 }
523
e411b331 524 return log;
dffdd960
JG
525}
526
e411b331
JG
527static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
528 u8 *bb_log, int blk_per_line)
dffdd960 529{
e411b331
JG
530 struct nvm_tgt_dev *dev = pblk->dev;
531 struct nvm_geo *geo = &dev->geo;
532 int i, bb_cnt = 0;
a38c78d8 533 int blk_per_lun = geo->nr_chks * geo->plane_mode;
dffdd960 534
e411b331
JG
535 for (i = 0; i < blk_per_line; i++) {
536 struct pblk_lun *rlun = &pblk->luns[i];
a38c78d8 537 u8 *lun_bb_log = bb_log + i * blk_per_lun;
a4bd217b 538
e411b331
JG
539 if (lun_bb_log[line->id] == NVM_BLK_T_FREE)
540 continue;
541
542 set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
543 bb_cnt++;
a4bd217b
JG
544 }
545
e411b331 546 return bb_cnt;
a4bd217b
JG
547}
548
43d47127 549static int pblk_luns_init(struct pblk *pblk)
a4bd217b
JG
550{
551 struct nvm_tgt_dev *dev = pblk->dev;
552 struct nvm_geo *geo = &dev->geo;
553 struct pblk_lun *rlun;
e411b331 554 int i;
a4bd217b
JG
555
556 /* TODO: Implement unbalanced LUN support */
fae7fae4 557 if (geo->nr_luns < 0) {
a4bd217b
JG
558 pr_err("pblk: unbalanced LUN config.\n");
559 return -EINVAL;
560 }
561
fae7fae4
MB
562 pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
563 GFP_KERNEL);
a4bd217b
JG
564 if (!pblk->luns)
565 return -ENOMEM;
566
fae7fae4 567 for (i = 0; i < geo->all_luns; i++) {
a4bd217b
JG
568 /* Stripe across channels */
569 int ch = i % geo->nr_chnls;
570 int lun_raw = i / geo->nr_chnls;
fae7fae4 571 int lunid = lun_raw + ch * geo->nr_luns;
a4bd217b
JG
572
573 rlun = &pblk->luns[i];
43d47127 574 rlun->bppa = dev->luns[lunid];
a4bd217b
JG
575
576 sema_init(&rlun->wr_sem, 1);
a4bd217b
JG
577 }
578
579 return 0;
580}
581
a4bd217b 582/* See comment over struct line_emeta definition */
dd2a4343 583static unsigned int calc_emeta_len(struct pblk *pblk)
a4bd217b 584{
dd2a4343
JG
585 struct pblk_line_meta *lm = &pblk->lm;
586 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
587 struct nvm_tgt_dev *dev = pblk->dev;
588 struct nvm_geo *geo = &dev->geo;
589
590 /* Round to sector size so that lba_list starts on its own sector */
591 lm->emeta_sec[1] = DIV_ROUND_UP(
76758390 592 sizeof(struct line_emeta) + lm->blk_bitmap_len +
e46f4e48
JG
593 sizeof(struct wa_counters), geo->csecs);
594 lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
dd2a4343
JG
595
596 /* Round to sector size so that vsc_list starts on its own sector */
597 lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
598 lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
e46f4e48
JG
599 geo->csecs);
600 lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
dd2a4343
JG
601
602 lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
e46f4e48
JG
603 geo->csecs);
604 lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
dd2a4343
JG
605
606 lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
607
608 return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
a4bd217b
JG
609}
610
611static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
612{
613 struct nvm_tgt_dev *dev = pblk->dev;
a7689938
JG
614 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
615 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b
JG
616 struct nvm_geo *geo = &dev->geo;
617 sector_t provisioned;
a7689938 618 int sec_meta, blk_meta;
a4bd217b 619
e5392739
JG
620 if (geo->op == NVM_TARGET_DEFAULT_OP)
621 pblk->op = PBLK_DEFAULT_OP;
622 else
623 pblk->op = geo->op;
a4bd217b
JG
624
625 provisioned = nr_free_blks;
a7689938 626 provisioned *= (100 - pblk->op);
a4bd217b
JG
627 sector_div(provisioned, 100);
628
a7689938
JG
629 pblk->op_blks = nr_free_blks - provisioned;
630
a4bd217b
JG
631 /* Internally pblk manages all free blocks, but all calculations based
632 * on user capacity consider only provisioned blocks
633 */
634 pblk->rl.total_blocks = nr_free_blks;
e46f4e48 635 pblk->rl.nr_secs = nr_free_blks * geo->clba;
a7689938
JG
636
637 /* Consider sectors used for metadata */
638 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
e46f4e48 639 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
a7689938 640
e46f4e48 641 pblk->capacity = (provisioned - blk_meta) * geo->clba;
a7689938 642
a4bd217b 643 atomic_set(&pblk->rl.free_blocks, nr_free_blks);
a7689938 644 atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
a4bd217b
JG
645}
646
43d47127
JG
647static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
648 void *chunk_log, long *nr_bad_blks)
649{
650 struct pblk_line_meta *lm = &pblk->lm;
651
652 line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
653 if (!line->blk_bitmap)
654 return -ENOMEM;
655
656 line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
657 if (!line->erase_bitmap) {
658 kfree(line->blk_bitmap);
659 return -ENOMEM;
660 }
661
662 *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line);
663
664 return 0;
665}
666
667static int pblk_line_mg_init(struct pblk *pblk)
dd2a4343 668{
43d47127
JG
669 struct nvm_tgt_dev *dev = pblk->dev;
670 struct nvm_geo *geo = &dev->geo;
dd2a4343
JG
671 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
672 struct pblk_line_meta *lm = &pblk->lm;
43d47127
JG
673 int i, bb_distance;
674
675 l_mg->nr_lines = geo->nr_chks;
676 l_mg->log_line = l_mg->data_line = NULL;
677 l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
678 l_mg->nr_free_lines = 0;
679 bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
680
681 INIT_LIST_HEAD(&l_mg->free_list);
682 INIT_LIST_HEAD(&l_mg->corrupt_list);
683 INIT_LIST_HEAD(&l_mg->bad_list);
684 INIT_LIST_HEAD(&l_mg->gc_full_list);
685 INIT_LIST_HEAD(&l_mg->gc_high_list);
686 INIT_LIST_HEAD(&l_mg->gc_mid_list);
687 INIT_LIST_HEAD(&l_mg->gc_low_list);
688 INIT_LIST_HEAD(&l_mg->gc_empty_list);
689
690 INIT_LIST_HEAD(&l_mg->emeta_list);
691
692 l_mg->gc_lists[0] = &l_mg->gc_high_list;
693 l_mg->gc_lists[1] = &l_mg->gc_mid_list;
694 l_mg->gc_lists[2] = &l_mg->gc_low_list;
695
696 spin_lock_init(&l_mg->free_lock);
697 spin_lock_init(&l_mg->close_lock);
698 spin_lock_init(&l_mg->gc_lock);
699
700 l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
701 if (!l_mg->vsc_list)
702 goto fail;
703
704 l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
705 if (!l_mg->bb_template)
706 goto fail_free_vsc_list;
707
708 l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
709 if (!l_mg->bb_aux)
710 goto fail_free_bb_template;
dd2a4343
JG
711
712 /* smeta is always small enough to fit on a kmalloc memory allocation,
713 * emeta depends on the number of LUNs allocated to the pblk instance
714 */
dd2a4343
JG
715 for (i = 0; i < PBLK_DATA_LINES; i++) {
716 l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
717 if (!l_mg->sline_meta[i])
718 goto fail_free_smeta;
719 }
720
721 /* emeta allocates three different buffers for managing metadata with
722 * in-memory and in-media layouts
723 */
724 for (i = 0; i < PBLK_DATA_LINES; i++) {
725 struct pblk_emeta *emeta;
726
727 emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
728 if (!emeta)
729 goto fail_free_emeta;
730
731 if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
732 l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
733
734 emeta->buf = vmalloc(lm->emeta_len[0]);
735 if (!emeta->buf) {
736 kfree(emeta);
737 goto fail_free_emeta;
738 }
739
740 emeta->nr_entries = lm->emeta_sec[0];
741 l_mg->eline_meta[i] = emeta;
742 } else {
743 l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
744
745 emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
746 if (!emeta->buf) {
747 kfree(emeta);
748 goto fail_free_emeta;
749 }
750
751 emeta->nr_entries = lm->emeta_sec[0];
752 l_mg->eline_meta[i] = emeta;
753 }
754 }
755
dd2a4343
JG
756 for (i = 0; i < l_mg->nr_lines; i++)
757 l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
758
43d47127
JG
759 bb_distance = (geo->all_luns) * geo->ws_opt;
760 for (i = 0; i < lm->sec_per_line; i += bb_distance)
761 bitmap_set(l_mg->bb_template, i, geo->ws_opt);
762
dd2a4343
JG
763 return 0;
764
765fail_free_emeta:
766 while (--i >= 0) {
c9d84b35
RP
767 if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
768 vfree(l_mg->eline_meta[i]->buf);
769 else
770 kfree(l_mg->eline_meta[i]->buf);
f680f19a 771 kfree(l_mg->eline_meta[i]);
dd2a4343 772 }
dd2a4343
JG
773fail_free_smeta:
774 for (i = 0; i < PBLK_DATA_LINES; i++)
f680f19a 775 kfree(l_mg->sline_meta[i]);
43d47127
JG
776 kfree(l_mg->bb_aux);
777fail_free_bb_template:
778 kfree(l_mg->bb_template);
779fail_free_vsc_list:
780 kfree(l_mg->vsc_list);
781fail:
dd2a4343
JG
782 return -ENOMEM;
783}
784
43d47127 785static int pblk_line_meta_init(struct pblk *pblk)
a4bd217b
JG
786{
787 struct nvm_tgt_dev *dev = pblk->dev;
788 struct nvm_geo *geo = &dev->geo;
a4bd217b 789 struct pblk_line_meta *lm = &pblk->lm;
a4bd217b 790 unsigned int smeta_len, emeta_len;
43d47127 791 int i;
a4bd217b 792
e46f4e48 793 lm->sec_per_line = geo->clba * geo->all_luns;
fae7fae4
MB
794 lm->blk_per_line = geo->all_luns;
795 lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
a4bd217b 796 lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
fae7fae4 797 lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
27b97872
RP
798 lm->mid_thrs = lm->sec_per_line / 2;
799 lm->high_thrs = lm->sec_per_line / 4;
fae7fae4 800 lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
a4bd217b
JG
801
802 /* Calculate necessary pages for smeta. See comment over struct
803 * line_smeta definition
804 */
a4bd217b
JG
805 i = 1;
806add_smeta_page:
e46f4e48
JG
807 lm->smeta_sec = i * geo->ws_opt;
808 lm->smeta_len = lm->smeta_sec * geo->csecs;
a4bd217b 809
dd2a4343 810 smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
a4bd217b
JG
811 if (smeta_len > lm->smeta_len) {
812 i++;
813 goto add_smeta_page;
814 }
815
816 /* Calculate necessary pages for emeta. See comment over struct
817 * line_emeta definition
818 */
819 i = 1;
820add_emeta_page:
e46f4e48
JG
821 lm->emeta_sec[0] = i * geo->ws_opt;
822 lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
a4bd217b 823
dd2a4343
JG
824 emeta_len = calc_emeta_len(pblk);
825 if (emeta_len > lm->emeta_len[0]) {
a4bd217b
JG
826 i++;
827 goto add_emeta_page;
828 }
a4bd217b 829
fae7fae4 830 lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
21d22871
JG
831
832 lm->min_blk_line = 1;
fae7fae4 833 if (geo->all_luns > 1)
21d22871 834 lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
e46f4e48 835 lm->emeta_sec[0], geo->clba);
21d22871 836
b5e063a2
JG
837 if (lm->min_blk_line > lm->blk_per_line) {
838 pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
839 lm->blk_per_line);
e411b331 840 return -EINVAL;
b5e063a2 841 }
a4bd217b 842
43d47127
JG
843 return 0;
844}
845
846static int pblk_lines_init(struct pblk *pblk)
847{
848 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
849 struct pblk_line_meta *lm = &pblk->lm;
850 struct pblk_line *line;
851 void *chunk_log;
852 long nr_bad_blks = 0, nr_free_blks = 0;
853 int i, ret;
854
855 ret = pblk_line_meta_init(pblk);
dd2a4343 856 if (ret)
e411b331 857 return ret;
a4bd217b 858
43d47127
JG
859 ret = pblk_line_mg_init(pblk);
860 if (ret)
861 return ret;
862
863 ret = pblk_luns_init(pblk);
864 if (ret)
a4bd217b
JG
865 goto fail_free_meta;
866
43d47127
JG
867 chunk_log = pblk_bb_get_log(pblk);
868 if (IS_ERR(chunk_log)) {
869 pr_err("pblk: could not get bad block log (%lu)\n",
870 PTR_ERR(chunk_log));
871 ret = PTR_ERR(chunk_log);
872 goto fail_free_luns;
1c6286f2 873 }
a4bd217b 874
a4bd217b
JG
875 pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
876 GFP_KERNEL);
1c6286f2
DC
877 if (!pblk->lines) {
878 ret = -ENOMEM;
43d47127 879 goto fail_free_chunk_log;
e411b331
JG
880 }
881
a4bd217b 882 for (i = 0; i < l_mg->nr_lines; i++) {
e411b331 883 int chk_in_line;
a44f53fa 884
a4bd217b
JG
885 line = &pblk->lines[i];
886
887 line->pblk = pblk;
888 line->id = i;
889 line->type = PBLK_LINETYPE_FREE;
890 line->state = PBLK_LINESTATE_FREE;
891 line->gc_group = PBLK_LINEGC_NONE;
dd2a4343 892 line->vsc = &l_mg->vsc_list[i];
a4bd217b
JG
893 spin_lock_init(&line->lock);
894
e411b331 895 ret = pblk_setup_line_meta(pblk, line, chunk_log, &nr_bad_blks);
dffdd960 896 if (ret)
43d47127 897 goto fail_free_lines;
dffdd960 898
e411b331
JG
899 chk_in_line = lm->blk_per_line - nr_bad_blks;
900 if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line ||
901 chk_in_line < lm->min_blk_line) {
a4bd217b
JG
902 line->state = PBLK_LINESTATE_BAD;
903 list_add_tail(&line->list, &l_mg->bad_list);
904 continue;
905 }
906
e411b331
JG
907 nr_free_blks += chk_in_line;
908 atomic_set(&line->blk_in_line, chk_in_line);
a4bd217b
JG
909
910 l_mg->nr_free_lines++;
911 list_add_tail(&line->list, &l_mg->free_list);
912 }
913
914 pblk_set_provision(pblk, nr_free_blks);
915
e411b331 916 kfree(chunk_log);
a4bd217b 917 return 0;
e411b331 918
43d47127 919fail_free_lines:
dffdd960 920 while (--i >= 0)
e411b331 921 pblk_line_meta_free(&pblk->lines[i]);
43d47127
JG
922 kfree(pblk->lines);
923fail_free_chunk_log:
924 kfree(chunk_log);
925fail_free_luns:
926 kfree(pblk->luns);
a4bd217b 927fail_free_meta:
e411b331 928 pblk_line_mg_free(pblk);
a4bd217b
JG
929
930 return ret;
931}
932
933static int pblk_writer_init(struct pblk *pblk)
934{
a4bd217b
JG
935 pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
936 if (IS_ERR(pblk->writer_ts)) {
cc4f5ba1
JG
937 int err = PTR_ERR(pblk->writer_ts);
938
939 if (err != -EINTR)
940 pr_err("pblk: could not allocate writer kthread (%d)\n",
941 err);
942 return err;
a4bd217b
JG
943 }
944
cc4f5ba1
JG
945 timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
946 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
947
a4bd217b
JG
948 return 0;
949}
950
951static void pblk_writer_stop(struct pblk *pblk)
952{
ee8d5c1a
JG
953 /* The pipeline must be stopped and the write buffer emptied before the
954 * write thread is stopped
955 */
956 WARN(pblk_rb_read_count(&pblk->rwb),
957 "Stopping not fully persisted write buffer\n");
958
959 WARN(pblk_rb_sync_count(&pblk->rwb),
960 "Stopping not fully synced write buffer\n");
961
7be970b2 962 del_timer_sync(&pblk->wtimer);
a4bd217b
JG
963 if (pblk->writer_ts)
964 kthread_stop(pblk->writer_ts);
a4bd217b
JG
965}
966
967static void pblk_free(struct pblk *pblk)
968{
a4bd217b 969 pblk_lines_free(pblk);
a4bd217b 970 pblk_l2p_free(pblk);
43d47127
JG
971 pblk_rwb_free(pblk);
972 pblk_core_free(pblk);
a4bd217b
JG
973
974 kfree(pblk);
975}
976
977static void pblk_tear_down(struct pblk *pblk)
978{
588726d3 979 pblk_pipeline_stop(pblk);
a4bd217b
JG
980 pblk_writer_stop(pblk);
981 pblk_rb_sync_l2p(&pblk->rwb);
a4bd217b
JG
982 pblk_rl_free(&pblk->rl);
983
984 pr_debug("pblk: consistent tear down\n");
985}
986
987static void pblk_exit(void *private)
988{
989 struct pblk *pblk = private;
990
991 down_write(&pblk_lock);
992 pblk_gc_exit(pblk);
993 pblk_tear_down(pblk);
c5586192
HH
994
995#ifdef CONFIG_NVM_DEBUG
996 pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
997#endif
998
a4bd217b
JG
999 pblk_free(pblk);
1000 up_write(&pblk_lock);
1001}
1002
1003static sector_t pblk_capacity(void *private)
1004{
1005 struct pblk *pblk = private;
1006
1007 return pblk->capacity * NR_PHY_IN_LOG;
1008}
1009
1010static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1011 int flags)
1012{
1013 struct nvm_geo *geo = &dev->geo;
1014 struct request_queue *bqueue = dev->q;
1015 struct request_queue *tqueue = tdisk->queue;
1016 struct pblk *pblk;
1017 int ret;
1018
e46f4e48 1019 if (dev->geo.dom & NVM_RSP_L2P) {
4e76af53 1020 pr_err("pblk: host-side L2P table not supported. (%x)\n",
e46f4e48 1021 dev->geo.dom);
a4bd217b
JG
1022 return ERR_PTR(-EINVAL);
1023 }
1024
1025 pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1026 if (!pblk)
1027 return ERR_PTR(-ENOMEM);
1028
1029 pblk->dev = dev;
1030 pblk->disk = tdisk;
588726d3 1031 pblk->state = PBLK_STATE_RUNNING;
3e3a5b8e 1032 pblk->gc.gc_enabled = 0;
a4bd217b
JG
1033
1034 spin_lock_init(&pblk->trans_lock);
1035 spin_lock_init(&pblk->lock);
1036
a4bd217b
JG
1037#ifdef CONFIG_NVM_DEBUG
1038 atomic_long_set(&pblk->inflight_writes, 0);
1039 atomic_long_set(&pblk->padded_writes, 0);
1040 atomic_long_set(&pblk->padded_wb, 0);
a4bd217b
JG
1041 atomic_long_set(&pblk->req_writes, 0);
1042 atomic_long_set(&pblk->sub_writes, 0);
1043 atomic_long_set(&pblk->sync_writes, 0);
a4bd217b 1044 atomic_long_set(&pblk->inflight_reads, 0);
db7ada33 1045 atomic_long_set(&pblk->cache_reads, 0);
a4bd217b
JG
1046 atomic_long_set(&pblk->sync_reads, 0);
1047 atomic_long_set(&pblk->recov_writes, 0);
1048 atomic_long_set(&pblk->recov_writes, 0);
1049 atomic_long_set(&pblk->recov_gc_writes, 0);
a1121176 1050 atomic_long_set(&pblk->recov_gc_reads, 0);
a4bd217b
JG
1051#endif
1052
1053 atomic_long_set(&pblk->read_failed, 0);
1054 atomic_long_set(&pblk->read_empty, 0);
1055 atomic_long_set(&pblk->read_high_ecc, 0);
1056 atomic_long_set(&pblk->read_failed_gc, 0);
1057 atomic_long_set(&pblk->write_failed, 0);
1058 atomic_long_set(&pblk->erase_failed, 0);
1059
43d47127 1060 ret = pblk_core_init(pblk);
a4bd217b 1061 if (ret) {
43d47127 1062 pr_err("pblk: could not initialize core\n");
a4bd217b
JG
1063 goto fail;
1064 }
1065
1066 ret = pblk_lines_init(pblk);
1067 if (ret) {
1068 pr_err("pblk: could not initialize lines\n");
43d47127 1069 goto fail_free_core;
5d149bfa
HH
1070 }
1071
43d47127 1072 ret = pblk_rwb_init(pblk);
a4bd217b 1073 if (ret) {
43d47127
JG
1074 pr_err("pblk: could not initialize write buffer\n");
1075 goto fail_free_lines;
a4bd217b
JG
1076 }
1077
43d47127 1078 ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
a4bd217b
JG
1079 if (ret) {
1080 pr_err("pblk: could not initialize maps\n");
43d47127 1081 goto fail_free_rwb;
a4bd217b
JG
1082 }
1083
1084 ret = pblk_writer_init(pblk);
1085 if (ret) {
cc4f5ba1
JG
1086 if (ret != -EINTR)
1087 pr_err("pblk: could not initialize write thread\n");
43d47127 1088 goto fail_free_l2p;
a4bd217b
JG
1089 }
1090
1091 ret = pblk_gc_init(pblk);
1092 if (ret) {
1093 pr_err("pblk: could not initialize gc\n");
1094 goto fail_stop_writer;
1095 }
1096
1097 /* inherit the size from the underlying device */
1098 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1099 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1100
1101 blk_queue_write_cache(tqueue, true, false);
1102
e46f4e48 1103 tqueue->limits.discard_granularity = geo->clba * geo->csecs;
a4bd217b
JG
1104 tqueue->limits.discard_alignment = 0;
1105 blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
8b904b5b 1106 blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
a4bd217b 1107
30d82a86
JG
1108 pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1109 tdisk->disk_name,
fae7fae4 1110 geo->all_luns, pblk->l_mg.nr_lines,
a4bd217b
JG
1111 (unsigned long long)pblk->rl.nr_secs,
1112 pblk->rwb.nr_entries);
1113
1114 wake_up_process(pblk->writer_ts);
03661b5f
HH
1115
1116 /* Check if we need to start GC */
1117 pblk_gc_should_kick(pblk);
1118
a4bd217b
JG
1119 return pblk;
1120
1121fail_stop_writer:
1122 pblk_writer_stop(pblk);
a4bd217b
JG
1123fail_free_l2p:
1124 pblk_l2p_free(pblk);
43d47127
JG
1125fail_free_rwb:
1126 pblk_rwb_free(pblk);
1127fail_free_lines:
1128 pblk_lines_free(pblk);
a4bd217b
JG
1129fail_free_core:
1130 pblk_core_free(pblk);
a4bd217b
JG
1131fail:
1132 kfree(pblk);
1133 return ERR_PTR(ret);
1134}
1135
1136/* physical block device target */
1137static struct nvm_tgt_type tt_pblk = {
1138 .name = "pblk",
1139 .version = {1, 0, 0},
1140
1141 .make_rq = pblk_make_rq,
1142 .capacity = pblk_capacity,
1143
1144 .init = pblk_init,
1145 .exit = pblk_exit,
1146
1147 .sysfs_init = pblk_sysfs_init,
1148 .sysfs_exit = pblk_sysfs_exit,
90014829 1149 .owner = THIS_MODULE,
a4bd217b
JG
1150};
1151
1152static int __init pblk_module_init(void)
1153{
b25d5237
N
1154 int ret;
1155
1156 pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
1157 if (!pblk_bio_set)
1158 return -ENOMEM;
1159 ret = nvm_register_tgt_type(&tt_pblk);
1160 if (ret)
1161 bioset_free(pblk_bio_set);
1162 return ret;
a4bd217b
JG
1163}
1164
1165static void pblk_module_exit(void)
1166{
b25d5237 1167 bioset_free(pblk_bio_set);
a4bd217b
JG
1168 nvm_unregister_tgt_type(&tt_pblk);
1169}
1170
1171module_init(pblk_module_init);
1172module_exit(pblk_module_exit);
1173MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1174MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1175MODULE_LICENSE("GPL v2");
1176MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");