]>
Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #include "rrpc.h" | |
18 | ||
19 | static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; | |
20 | static DECLARE_RWSEM(rrpc_lock); | |
21 | ||
22 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
23 | struct nvm_rq *rqd, unsigned long flags); | |
24 | ||
25 | #define rrpc_for_each_lun(rrpc, rlun, i) \ | |
26 | for ((i) = 0, rlun = &(rrpc)->luns[0]; \ | |
27 | (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) | |
28 | ||
29 | static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) | |
30 | { | |
31 | struct rrpc_block *rblk = a->rblk; | |
32 | unsigned int pg_offset; | |
33 | ||
34 | lockdep_assert_held(&rrpc->rev_lock); | |
35 | ||
36 | if (a->addr == ADDR_EMPTY || !rblk) | |
37 | return; | |
38 | ||
39 | spin_lock(&rblk->lock); | |
40 | ||
afb18e0e | 41 | div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset); |
ae1519ec MB |
42 | WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); |
43 | rblk->nr_invalid_pages++; | |
44 | ||
45 | spin_unlock(&rblk->lock); | |
46 | ||
47 | rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; | |
48 | } | |
49 | ||
50 | static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, | |
51 | unsigned len) | |
52 | { | |
53 | sector_t i; | |
54 | ||
55 | spin_lock(&rrpc->rev_lock); | |
56 | for (i = slba; i < slba + len; i++) { | |
57 | struct rrpc_addr *gp = &rrpc->trans_map[i]; | |
58 | ||
59 | rrpc_page_invalidate(rrpc, gp); | |
60 | gp->rblk = NULL; | |
61 | } | |
62 | spin_unlock(&rrpc->rev_lock); | |
63 | } | |
64 | ||
65 | static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, | |
66 | sector_t laddr, unsigned int pages) | |
67 | { | |
68 | struct nvm_rq *rqd; | |
69 | struct rrpc_inflight_rq *inf; | |
70 | ||
71 | rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); | |
72 | if (!rqd) | |
73 | return ERR_PTR(-ENOMEM); | |
74 | ||
75 | inf = rrpc_get_inflight_rq(rqd); | |
76 | if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { | |
77 | mempool_free(rqd, rrpc->rq_pool); | |
78 | return NULL; | |
79 | } | |
80 | ||
81 | return rqd; | |
82 | } | |
83 | ||
84 | static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) | |
85 | { | |
86 | struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); | |
87 | ||
88 | rrpc_unlock_laddr(rrpc, inf); | |
89 | ||
90 | mempool_free(rqd, rrpc->rq_pool); | |
91 | } | |
92 | ||
93 | static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) | |
94 | { | |
95 | sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
96 | sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
97 | struct nvm_rq *rqd; | |
98 | ||
99 | do { | |
100 | rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); | |
101 | schedule(); | |
102 | } while (!rqd); | |
103 | ||
104 | if (IS_ERR(rqd)) { | |
105 | pr_err("rrpc: unable to acquire inflight IO\n"); | |
106 | bio_io_error(bio); | |
107 | return; | |
108 | } | |
109 | ||
110 | rrpc_invalidate_range(rrpc, slba, len); | |
111 | rrpc_inflight_laddr_release(rrpc, rqd); | |
112 | } | |
113 | ||
114 | static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) | |
115 | { | |
afb18e0e | 116 | return (rblk->next_page == rrpc->dev->sec_per_blk); |
ae1519ec MB |
117 | } |
118 | ||
afb18e0e JG |
119 | /* Calculate relative addr for the given block, considering instantiated LUNs */ |
120 | static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |
121 | { | |
122 | struct nvm_block *blk = rblk->parent; | |
123 | int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns); | |
124 | ||
125 | return lun_blk * rrpc->dev->sec_per_blk; | |
126 | } | |
127 | ||
128 | /* Calculate global addr for the given block */ | |
b7ceb7d5 | 129 | static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec MB |
130 | { |
131 | struct nvm_block *blk = rblk->parent; | |
132 | ||
afb18e0e | 133 | return blk->id * rrpc->dev->sec_per_blk; |
ae1519ec MB |
134 | } |
135 | ||
7386af27 MB |
136 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, |
137 | struct ppa_addr r) | |
138 | { | |
139 | struct ppa_addr l; | |
140 | int secs, pgs, blks, luns; | |
141 | sector_t ppa = r.ppa; | |
142 | ||
143 | l.ppa = 0; | |
144 | ||
145 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | |
146 | l.g.sec = secs; | |
147 | ||
148 | sector_div(ppa, dev->sec_per_pg); | |
afb18e0e | 149 | div_u64_rem(ppa, dev->pgs_per_blk, &pgs); |
7386af27 MB |
150 | l.g.pg = pgs; |
151 | ||
152 | sector_div(ppa, dev->pgs_per_blk); | |
153 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | |
154 | l.g.blk = blks; | |
155 | ||
156 | sector_div(ppa, dev->blks_per_lun); | |
157 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | |
158 | l.g.lun = luns; | |
159 | ||
160 | sector_div(ppa, dev->luns_per_chnl); | |
161 | l.g.ch = ppa; | |
162 | ||
163 | return l; | |
164 | } | |
165 | ||
b7ceb7d5 | 166 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
ae1519ec MB |
167 | { |
168 | struct ppa_addr paddr; | |
169 | ||
170 | paddr.ppa = addr; | |
7386af27 | 171 | return linear_to_generic_addr(dev, paddr); |
ae1519ec MB |
172 | } |
173 | ||
174 | /* requires lun->lock taken */ | |
175 | static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) | |
176 | { | |
177 | struct rrpc *rrpc = rlun->rrpc; | |
178 | ||
179 | BUG_ON(!rblk); | |
180 | ||
181 | if (rlun->cur) { | |
182 | spin_lock(&rlun->cur->lock); | |
183 | WARN_ON(!block_is_full(rrpc, rlun->cur)); | |
184 | spin_unlock(&rlun->cur->lock); | |
185 | } | |
186 | rlun->cur = rblk; | |
187 | } | |
188 | ||
189 | static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, | |
190 | unsigned long flags) | |
191 | { | |
ff0e498b | 192 | struct nvm_lun *lun = rlun->parent; |
ae1519ec MB |
193 | struct nvm_block *blk; |
194 | struct rrpc_block *rblk; | |
195 | ||
ff0e498b JG |
196 | spin_lock(&lun->lock); |
197 | blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); | |
198 | if (!blk) { | |
199 | pr_err("nvm: rrpc: cannot get new block from media manager\n"); | |
200 | spin_unlock(&lun->lock); | |
ae1519ec | 201 | return NULL; |
ff0e498b | 202 | } |
ae1519ec | 203 | |
afb18e0e | 204 | rblk = rrpc_get_rblk(rlun, blk->id); |
ff0e498b JG |
205 | list_add_tail(&rblk->list, &rlun->open_list); |
206 | spin_unlock(&lun->lock); | |
ae1519ec | 207 | |
ff0e498b | 208 | blk->priv = rblk; |
afb18e0e | 209 | bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk); |
ae1519ec MB |
210 | rblk->next_page = 0; |
211 | rblk->nr_invalid_pages = 0; | |
212 | atomic_set(&rblk->data_cmnt_size, 0); | |
213 | ||
214 | return rblk; | |
215 | } | |
216 | ||
217 | static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) | |
218 | { | |
ff0e498b JG |
219 | struct rrpc_lun *rlun = rblk->rlun; |
220 | struct nvm_lun *lun = rlun->parent; | |
221 | ||
222 | spin_lock(&lun->lock); | |
223 | nvm_put_blk_unlocked(rrpc->dev, rblk->parent); | |
224 | list_del(&rblk->list); | |
225 | spin_unlock(&lun->lock); | |
ae1519ec MB |
226 | } |
227 | ||
d3d1a438 WT |
228 | static void rrpc_put_blks(struct rrpc *rrpc) |
229 | { | |
230 | struct rrpc_lun *rlun; | |
231 | int i; | |
232 | ||
233 | for (i = 0; i < rrpc->nr_luns; i++) { | |
234 | rlun = &rrpc->luns[i]; | |
235 | if (rlun->cur) | |
236 | rrpc_put_blk(rrpc, rlun->cur); | |
237 | if (rlun->gc_cur) | |
238 | rrpc_put_blk(rrpc, rlun->gc_cur); | |
239 | } | |
240 | } | |
241 | ||
ae1519ec MB |
242 | static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) |
243 | { | |
244 | int next = atomic_inc_return(&rrpc->next_lun); | |
245 | ||
246 | return &rrpc->luns[next % rrpc->nr_luns]; | |
247 | } | |
248 | ||
249 | static void rrpc_gc_kick(struct rrpc *rrpc) | |
250 | { | |
251 | struct rrpc_lun *rlun; | |
252 | unsigned int i; | |
253 | ||
254 | for (i = 0; i < rrpc->nr_luns; i++) { | |
255 | rlun = &rrpc->luns[i]; | |
256 | queue_work(rrpc->krqd_wq, &rlun->ws_gc); | |
257 | } | |
258 | } | |
259 | ||
260 | /* | |
261 | * timed GC every interval. | |
262 | */ | |
263 | static void rrpc_gc_timer(unsigned long data) | |
264 | { | |
265 | struct rrpc *rrpc = (struct rrpc *)data; | |
266 | ||
267 | rrpc_gc_kick(rrpc); | |
268 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
269 | } | |
270 | ||
271 | static void rrpc_end_sync_bio(struct bio *bio) | |
272 | { | |
273 | struct completion *waiting = bio->bi_private; | |
274 | ||
275 | if (bio->bi_error) | |
276 | pr_err("nvm: gc request failed (%u).\n", bio->bi_error); | |
277 | ||
278 | complete(waiting); | |
279 | } | |
280 | ||
281 | /* | |
282 | * rrpc_move_valid_pages -- migrate live data off the block | |
283 | * @rrpc: the 'rrpc' structure | |
284 | * @block: the block from which to migrate live pages | |
285 | * | |
286 | * Description: | |
287 | * GC algorithms may call this function to migrate remaining live | |
288 | * pages off the block prior to erasing it. This function blocks | |
289 | * further execution until the operation is complete. | |
290 | */ | |
291 | static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |
292 | { | |
293 | struct request_queue *q = rrpc->dev->q; | |
294 | struct rrpc_rev_addr *rev; | |
295 | struct nvm_rq *rqd; | |
296 | struct bio *bio; | |
297 | struct page *page; | |
298 | int slot; | |
afb18e0e | 299 | int nr_sec_per_blk = rrpc->dev->sec_per_blk; |
b7ceb7d5 | 300 | u64 phys_addr; |
ae1519ec MB |
301 | DECLARE_COMPLETION_ONSTACK(wait); |
302 | ||
afb18e0e | 303 | if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) |
ae1519ec MB |
304 | return 0; |
305 | ||
306 | bio = bio_alloc(GFP_NOIO, 1); | |
307 | if (!bio) { | |
308 | pr_err("nvm: could not alloc bio to gc\n"); | |
309 | return -ENOMEM; | |
310 | } | |
311 | ||
312 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | |
16c6d048 WT |
313 | if (!page) { |
314 | bio_put(bio); | |
3bfbc6ad | 315 | return -ENOMEM; |
16c6d048 | 316 | } |
ae1519ec MB |
317 | |
318 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | |
afb18e0e | 319 | nr_sec_per_blk)) < nr_sec_per_blk) { |
ae1519ec MB |
320 | |
321 | /* Lock laddr */ | |
afb18e0e | 322 | phys_addr = rblk->parent->id * nr_sec_per_blk + slot; |
ae1519ec MB |
323 | |
324 | try: | |
325 | spin_lock(&rrpc->rev_lock); | |
326 | /* Get logical address from physical to logical table */ | |
327 | rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; | |
328 | /* already updated by previous regular write */ | |
329 | if (rev->addr == ADDR_EMPTY) { | |
330 | spin_unlock(&rrpc->rev_lock); | |
331 | continue; | |
332 | } | |
333 | ||
334 | rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); | |
335 | if (IS_ERR_OR_NULL(rqd)) { | |
336 | spin_unlock(&rrpc->rev_lock); | |
337 | schedule(); | |
338 | goto try; | |
339 | } | |
340 | ||
341 | spin_unlock(&rrpc->rev_lock); | |
342 | ||
343 | /* Perform read to do GC */ | |
344 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
345 | bio->bi_rw = READ; | |
346 | bio->bi_private = &wait; | |
347 | bio->bi_end_io = rrpc_end_sync_bio; | |
348 | ||
349 | /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ | |
350 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
351 | ||
352 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
353 | pr_err("rrpc: gc read failed.\n"); | |
354 | rrpc_inflight_laddr_release(rrpc, rqd); | |
355 | goto finished; | |
356 | } | |
357 | wait_for_completion_io(&wait); | |
2b11c1b2 WT |
358 | if (bio->bi_error) { |
359 | rrpc_inflight_laddr_release(rrpc, rqd); | |
360 | goto finished; | |
361 | } | |
ae1519ec MB |
362 | |
363 | bio_reset(bio); | |
364 | reinit_completion(&wait); | |
365 | ||
366 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
367 | bio->bi_rw = WRITE; | |
368 | bio->bi_private = &wait; | |
369 | bio->bi_end_io = rrpc_end_sync_bio; | |
370 | ||
371 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
372 | ||
373 | /* turn the command around and write the data back to a new | |
374 | * address | |
375 | */ | |
376 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
377 | pr_err("rrpc: gc write failed.\n"); | |
378 | rrpc_inflight_laddr_release(rrpc, rqd); | |
379 | goto finished; | |
380 | } | |
381 | wait_for_completion_io(&wait); | |
382 | ||
383 | rrpc_inflight_laddr_release(rrpc, rqd); | |
2b11c1b2 WT |
384 | if (bio->bi_error) |
385 | goto finished; | |
ae1519ec MB |
386 | |
387 | bio_reset(bio); | |
388 | } | |
389 | ||
390 | finished: | |
391 | mempool_free(page, rrpc->page_pool); | |
392 | bio_put(bio); | |
393 | ||
afb18e0e | 394 | if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { |
ae1519ec MB |
395 | pr_err("nvm: failed to garbage collect block\n"); |
396 | return -EIO; | |
397 | } | |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
402 | static void rrpc_block_gc(struct work_struct *work) | |
403 | { | |
404 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
405 | ws_gc); | |
406 | struct rrpc *rrpc = gcb->rrpc; | |
407 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 408 | struct rrpc_lun *rlun = rblk->rlun; |
ae1519ec MB |
409 | struct nvm_dev *dev = rrpc->dev; |
410 | ||
d0ca798f | 411 | mempool_free(gcb, rrpc->gcb_pool); |
ae1519ec MB |
412 | pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); |
413 | ||
414 | if (rrpc_move_valid_pages(rrpc, rblk)) | |
d0ca798f WT |
415 | goto put_back; |
416 | ||
417 | if (nvm_erase_blk(dev, rblk->parent)) | |
418 | goto put_back; | |
ae1519ec | 419 | |
ae1519ec | 420 | rrpc_put_blk(rrpc, rblk); |
d0ca798f WT |
421 | |
422 | return; | |
423 | ||
424 | put_back: | |
425 | spin_lock(&rlun->lock); | |
426 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
427 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
428 | } |
429 | ||
430 | /* the block with highest number of invalid pages, will be in the beginning | |
431 | * of the list | |
432 | */ | |
433 | static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra, | |
434 | struct rrpc_block *rb) | |
435 | { | |
436 | if (ra->nr_invalid_pages == rb->nr_invalid_pages) | |
437 | return ra; | |
438 | ||
439 | return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; | |
440 | } | |
441 | ||
442 | /* linearly find the block with highest number of invalid pages | |
443 | * requires lun->lock | |
444 | */ | |
445 | static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) | |
446 | { | |
447 | struct list_head *prio_list = &rlun->prio_list; | |
448 | struct rrpc_block *rblock, *max; | |
449 | ||
450 | BUG_ON(list_empty(prio_list)); | |
451 | ||
452 | max = list_first_entry(prio_list, struct rrpc_block, prio); | |
453 | list_for_each_entry(rblock, prio_list, prio) | |
454 | max = rblock_max_invalid(max, rblock); | |
455 | ||
456 | return max; | |
457 | } | |
458 | ||
459 | static void rrpc_lun_gc(struct work_struct *work) | |
460 | { | |
461 | struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); | |
462 | struct rrpc *rrpc = rlun->rrpc; | |
463 | struct nvm_lun *lun = rlun->parent; | |
464 | struct rrpc_block_gc *gcb; | |
465 | unsigned int nr_blocks_need; | |
466 | ||
467 | nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE; | |
468 | ||
469 | if (nr_blocks_need < rrpc->nr_luns) | |
470 | nr_blocks_need = rrpc->nr_luns; | |
471 | ||
b262924b | 472 | spin_lock(&rlun->lock); |
ae1519ec MB |
473 | while (nr_blocks_need > lun->nr_free_blocks && |
474 | !list_empty(&rlun->prio_list)) { | |
475 | struct rrpc_block *rblock = block_prio_find_max(rlun); | |
476 | struct nvm_block *block = rblock->parent; | |
477 | ||
478 | if (!rblock->nr_invalid_pages) | |
479 | break; | |
480 | ||
b262924b WT |
481 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); |
482 | if (!gcb) | |
483 | break; | |
484 | ||
ae1519ec MB |
485 | list_del_init(&rblock->prio); |
486 | ||
487 | BUG_ON(!block_is_full(rrpc, rblock)); | |
488 | ||
489 | pr_debug("rrpc: selected block '%lu' for GC\n", block->id); | |
490 | ||
ae1519ec MB |
491 | gcb->rrpc = rrpc; |
492 | gcb->rblk = rblock; | |
493 | INIT_WORK(&gcb->ws_gc, rrpc_block_gc); | |
494 | ||
495 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
496 | ||
497 | nr_blocks_need--; | |
498 | } | |
b262924b | 499 | spin_unlock(&rlun->lock); |
ae1519ec MB |
500 | |
501 | /* TODO: Hint that request queue can be started again */ | |
502 | } | |
503 | ||
504 | static void rrpc_gc_queue(struct work_struct *work) | |
505 | { | |
506 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
507 | ws_gc); | |
508 | struct rrpc *rrpc = gcb->rrpc; | |
509 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 510 | struct rrpc_lun *rlun = rblk->rlun; |
ae1519ec | 511 | struct nvm_lun *lun = rblk->parent->lun; |
6adb03de | 512 | struct nvm_block *blk = rblk->parent; |
ae1519ec MB |
513 | |
514 | spin_lock(&rlun->lock); | |
515 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
516 | spin_unlock(&rlun->lock); | |
517 | ||
6adb03de JG |
518 | spin_lock(&lun->lock); |
519 | lun->nr_open_blocks--; | |
520 | lun->nr_closed_blocks++; | |
521 | blk->state &= ~NVM_BLK_ST_OPEN; | |
522 | blk->state |= NVM_BLK_ST_CLOSED; | |
523 | list_move_tail(&rblk->list, &rlun->closed_list); | |
524 | spin_unlock(&lun->lock); | |
525 | ||
ae1519ec MB |
526 | mempool_free(gcb, rrpc->gcb_pool); |
527 | pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", | |
528 | rblk->parent->id); | |
529 | } | |
530 | ||
531 | static const struct block_device_operations rrpc_fops = { | |
532 | .owner = THIS_MODULE, | |
533 | }; | |
534 | ||
535 | static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) | |
536 | { | |
537 | unsigned int i; | |
538 | struct rrpc_lun *rlun, *max_free; | |
539 | ||
540 | if (!is_gc) | |
541 | return get_next_lun(rrpc); | |
542 | ||
543 | /* during GC, we don't care about RR, instead we want to make | |
544 | * sure that we maintain evenness between the block luns. | |
545 | */ | |
546 | max_free = &rrpc->luns[0]; | |
547 | /* prevent GC-ing lun from devouring pages of a lun with | |
548 | * little free blocks. We don't take the lock as we only need an | |
549 | * estimate. | |
550 | */ | |
551 | rrpc_for_each_lun(rrpc, rlun, i) { | |
552 | if (rlun->parent->nr_free_blocks > | |
553 | max_free->parent->nr_free_blocks) | |
554 | max_free = rlun; | |
555 | } | |
556 | ||
557 | return max_free; | |
558 | } | |
559 | ||
560 | static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, | |
b7ceb7d5 | 561 | struct rrpc_block *rblk, u64 paddr) |
ae1519ec MB |
562 | { |
563 | struct rrpc_addr *gp; | |
564 | struct rrpc_rev_addr *rev; | |
565 | ||
4ece44af | 566 | BUG_ON(laddr >= rrpc->nr_sects); |
ae1519ec MB |
567 | |
568 | gp = &rrpc->trans_map[laddr]; | |
569 | spin_lock(&rrpc->rev_lock); | |
570 | if (gp->rblk) | |
571 | rrpc_page_invalidate(rrpc, gp); | |
572 | ||
573 | gp->addr = paddr; | |
574 | gp->rblk = rblk; | |
575 | ||
576 | rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; | |
577 | rev->addr = laddr; | |
578 | spin_unlock(&rrpc->rev_lock); | |
579 | ||
580 | return gp; | |
581 | } | |
582 | ||
b7ceb7d5 | 583 | static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec | 584 | { |
b7ceb7d5 | 585 | u64 addr = ADDR_EMPTY; |
ae1519ec MB |
586 | |
587 | spin_lock(&rblk->lock); | |
588 | if (block_is_full(rrpc, rblk)) | |
589 | goto out; | |
590 | ||
591 | addr = block_to_addr(rrpc, rblk) + rblk->next_page; | |
592 | ||
593 | rblk->next_page++; | |
594 | out: | |
595 | spin_unlock(&rblk->lock); | |
596 | return addr; | |
597 | } | |
598 | ||
599 | /* Simple round-robin Logical to physical address translation. | |
600 | * | |
601 | * Retrieve the mapping using the active append point. Then update the ap for | |
602 | * the next write to the disk. | |
603 | * | |
604 | * Returns rrpc_addr with the physical address and block. Remember to return to | |
605 | * rrpc->addr_cache when request is finished. | |
606 | */ | |
607 | static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, | |
608 | int is_gc) | |
609 | { | |
610 | struct rrpc_lun *rlun; | |
611 | struct rrpc_block *rblk; | |
612 | struct nvm_lun *lun; | |
b7ceb7d5 | 613 | u64 paddr; |
ae1519ec MB |
614 | |
615 | rlun = rrpc_get_lun_rr(rrpc, is_gc); | |
616 | lun = rlun->parent; | |
617 | ||
618 | if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) | |
619 | return NULL; | |
620 | ||
621 | spin_lock(&rlun->lock); | |
622 | ||
623 | rblk = rlun->cur; | |
624 | retry: | |
625 | paddr = rrpc_alloc_addr(rrpc, rblk); | |
626 | ||
627 | if (paddr == ADDR_EMPTY) { | |
628 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
629 | if (rblk) { | |
630 | rrpc_set_lun_cur(rlun, rblk); | |
631 | goto retry; | |
632 | } | |
633 | ||
634 | if (is_gc) { | |
635 | /* retry from emergency gc block */ | |
636 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
637 | if (paddr == ADDR_EMPTY) { | |
638 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
639 | if (!rblk) { | |
640 | pr_err("rrpc: no more blocks"); | |
641 | goto err; | |
642 | } | |
643 | ||
644 | rlun->gc_cur = rblk; | |
645 | paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); | |
646 | } | |
647 | rblk = rlun->gc_cur; | |
648 | } | |
649 | } | |
650 | ||
651 | spin_unlock(&rlun->lock); | |
652 | return rrpc_update_map(rrpc, laddr, rblk, paddr); | |
653 | err: | |
654 | spin_unlock(&rlun->lock); | |
655 | return NULL; | |
656 | } | |
657 | ||
658 | static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) | |
659 | { | |
660 | struct rrpc_block_gc *gcb; | |
661 | ||
662 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); | |
663 | if (!gcb) { | |
664 | pr_err("rrpc: unable to queue block for gc."); | |
665 | return; | |
666 | } | |
667 | ||
668 | gcb->rrpc = rrpc; | |
669 | gcb->rblk = rblk; | |
670 | ||
671 | INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); | |
672 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
673 | } | |
674 | ||
675 | static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, | |
676 | sector_t laddr, uint8_t npages) | |
677 | { | |
678 | struct rrpc_addr *p; | |
679 | struct rrpc_block *rblk; | |
680 | struct nvm_lun *lun; | |
681 | int cmnt_size, i; | |
682 | ||
683 | for (i = 0; i < npages; i++) { | |
684 | p = &rrpc->trans_map[laddr + i]; | |
685 | rblk = p->rblk; | |
686 | lun = rblk->parent->lun; | |
687 | ||
688 | cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); | |
afb18e0e | 689 | if (unlikely(cmnt_size == rrpc->dev->sec_per_blk)) |
ae1519ec MB |
690 | rrpc_run_gc(rrpc, rblk); |
691 | } | |
692 | } | |
693 | ||
72d256ec | 694 | static void rrpc_end_io(struct nvm_rq *rqd) |
ae1519ec MB |
695 | { |
696 | struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); | |
697 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
6d5be959 | 698 | uint8_t npages = rqd->nr_ppas; |
ae1519ec MB |
699 | sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; |
700 | ||
701 | if (bio_data_dir(rqd->bio) == WRITE) | |
702 | rrpc_end_io_write(rrpc, rrqd, laddr, npages); | |
703 | ||
3cd485b1 WT |
704 | bio_put(rqd->bio); |
705 | ||
ae1519ec | 706 | if (rrqd->flags & NVM_IOTYPE_GC) |
91276162 | 707 | return; |
ae1519ec MB |
708 | |
709 | rrpc_unlock_rq(rrpc, rqd); | |
ae1519ec MB |
710 | |
711 | if (npages > 1) | |
712 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
ae1519ec MB |
713 | |
714 | mempool_free(rqd, rrpc->rq_pool); | |
ae1519ec MB |
715 | } |
716 | ||
717 | static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
718 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
719 | { | |
720 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
721 | struct rrpc_addr *gp; | |
722 | sector_t laddr = rrpc_get_laddr(bio); | |
723 | int is_gc = flags & NVM_IOTYPE_GC; | |
724 | int i; | |
725 | ||
726 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
727 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
728 | return NVM_IO_REQUEUE; | |
729 | } | |
730 | ||
731 | for (i = 0; i < npages; i++) { | |
732 | /* We assume that mapping occurs at 4KB granularity */ | |
4ece44af | 733 | BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects)); |
ae1519ec MB |
734 | gp = &rrpc->trans_map[laddr + i]; |
735 | ||
736 | if (gp->rblk) { | |
737 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
738 | gp->addr); | |
739 | } else { | |
740 | BUG_ON(is_gc); | |
741 | rrpc_unlock_laddr(rrpc, r); | |
742 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
743 | rqd->dma_ppa_list); | |
744 | return NVM_IO_DONE; | |
745 | } | |
746 | } | |
747 | ||
748 | rqd->opcode = NVM_OP_HBREAD; | |
749 | ||
750 | return NVM_IO_OK; | |
751 | } | |
752 | ||
753 | static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, | |
754 | unsigned long flags) | |
755 | { | |
756 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
757 | int is_gc = flags & NVM_IOTYPE_GC; | |
758 | sector_t laddr = rrpc_get_laddr(bio); | |
759 | struct rrpc_addr *gp; | |
760 | ||
761 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
762 | return NVM_IO_REQUEUE; | |
763 | ||
4ece44af | 764 | BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects)); |
ae1519ec MB |
765 | gp = &rrpc->trans_map[laddr]; |
766 | ||
767 | if (gp->rblk) { | |
768 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); | |
769 | } else { | |
770 | BUG_ON(is_gc); | |
771 | rrpc_unlock_rq(rrpc, rqd); | |
772 | return NVM_IO_DONE; | |
773 | } | |
774 | ||
775 | rqd->opcode = NVM_OP_HBREAD; | |
776 | rrqd->addr = gp; | |
777 | ||
778 | return NVM_IO_OK; | |
779 | } | |
780 | ||
781 | static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
782 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
783 | { | |
784 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
785 | struct rrpc_addr *p; | |
786 | sector_t laddr = rrpc_get_laddr(bio); | |
787 | int is_gc = flags & NVM_IOTYPE_GC; | |
788 | int i; | |
789 | ||
790 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
791 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); | |
792 | return NVM_IO_REQUEUE; | |
793 | } | |
794 | ||
795 | for (i = 0; i < npages; i++) { | |
796 | /* We assume that mapping occurs at 4KB granularity */ | |
797 | p = rrpc_map_page(rrpc, laddr + i, is_gc); | |
798 | if (!p) { | |
799 | BUG_ON(is_gc); | |
800 | rrpc_unlock_laddr(rrpc, r); | |
801 | nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, | |
802 | rqd->dma_ppa_list); | |
803 | rrpc_gc_kick(rrpc); | |
804 | return NVM_IO_REQUEUE; | |
805 | } | |
806 | ||
807 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, | |
808 | p->addr); | |
809 | } | |
810 | ||
811 | rqd->opcode = NVM_OP_HBWRITE; | |
812 | ||
813 | return NVM_IO_OK; | |
814 | } | |
815 | ||
816 | static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, | |
817 | struct nvm_rq *rqd, unsigned long flags) | |
818 | { | |
819 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
820 | struct rrpc_addr *p; | |
821 | int is_gc = flags & NVM_IOTYPE_GC; | |
822 | sector_t laddr = rrpc_get_laddr(bio); | |
823 | ||
824 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
825 | return NVM_IO_REQUEUE; | |
826 | ||
827 | p = rrpc_map_page(rrpc, laddr, is_gc); | |
828 | if (!p) { | |
829 | BUG_ON(is_gc); | |
830 | rrpc_unlock_rq(rrpc, rqd); | |
831 | rrpc_gc_kick(rrpc); | |
832 | return NVM_IO_REQUEUE; | |
833 | } | |
834 | ||
835 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); | |
836 | rqd->opcode = NVM_OP_HBWRITE; | |
837 | rrqd->addr = p; | |
838 | ||
839 | return NVM_IO_OK; | |
840 | } | |
841 | ||
842 | static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, | |
843 | struct nvm_rq *rqd, unsigned long flags, uint8_t npages) | |
844 | { | |
845 | if (npages > 1) { | |
846 | rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, | |
847 | &rqd->dma_ppa_list); | |
848 | if (!rqd->ppa_list) { | |
849 | pr_err("rrpc: not able to allocate ppa list\n"); | |
850 | return NVM_IO_ERR; | |
851 | } | |
852 | ||
853 | if (bio_rw(bio) == WRITE) | |
854 | return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, | |
855 | npages); | |
856 | ||
857 | return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); | |
858 | } | |
859 | ||
860 | if (bio_rw(bio) == WRITE) | |
861 | return rrpc_write_rq(rrpc, bio, rqd, flags); | |
862 | ||
863 | return rrpc_read_rq(rrpc, bio, rqd, flags); | |
864 | } | |
865 | ||
866 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
867 | struct nvm_rq *rqd, unsigned long flags) | |
868 | { | |
869 | int err; | |
870 | struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); | |
871 | uint8_t nr_pages = rrpc_get_pages(bio); | |
872 | int bio_size = bio_sectors(bio) << 9; | |
873 | ||
874 | if (bio_size < rrpc->dev->sec_size) | |
875 | return NVM_IO_ERR; | |
876 | else if (bio_size > rrpc->dev->max_rq_size) | |
877 | return NVM_IO_ERR; | |
878 | ||
879 | err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); | |
880 | if (err) | |
881 | return err; | |
882 | ||
883 | bio_get(bio); | |
884 | rqd->bio = bio; | |
885 | rqd->ins = &rrpc->instance; | |
6d5be959 | 886 | rqd->nr_ppas = nr_pages; |
ae1519ec MB |
887 | rrq->flags = flags; |
888 | ||
889 | err = nvm_submit_io(rrpc->dev, rqd); | |
890 | if (err) { | |
891 | pr_err("rrpc: I/O submission failed: %d\n", err); | |
3cd485b1 | 892 | bio_put(bio); |
c27278bd WT |
893 | if (!(flags & NVM_IOTYPE_GC)) { |
894 | rrpc_unlock_rq(rrpc, rqd); | |
6d5be959 | 895 | if (rqd->nr_ppas > 1) |
c27278bd WT |
896 | nvm_dev_dma_free(rrpc->dev, |
897 | rqd->ppa_list, rqd->dma_ppa_list); | |
898 | } | |
ae1519ec MB |
899 | return NVM_IO_ERR; |
900 | } | |
901 | ||
902 | return NVM_IO_OK; | |
903 | } | |
904 | ||
dece1635 | 905 | static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) |
ae1519ec MB |
906 | { |
907 | struct rrpc *rrpc = q->queuedata; | |
908 | struct nvm_rq *rqd; | |
909 | int err; | |
910 | ||
911 | if (bio->bi_rw & REQ_DISCARD) { | |
912 | rrpc_discard(rrpc, bio); | |
dece1635 | 913 | return BLK_QC_T_NONE; |
ae1519ec MB |
914 | } |
915 | ||
916 | rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); | |
917 | if (!rqd) { | |
918 | pr_err_ratelimited("rrpc: not able to queue bio."); | |
919 | bio_io_error(bio); | |
dece1635 | 920 | return BLK_QC_T_NONE; |
ae1519ec MB |
921 | } |
922 | memset(rqd, 0, sizeof(struct nvm_rq)); | |
923 | ||
924 | err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); | |
925 | switch (err) { | |
926 | case NVM_IO_OK: | |
dece1635 | 927 | return BLK_QC_T_NONE; |
ae1519ec MB |
928 | case NVM_IO_ERR: |
929 | bio_io_error(bio); | |
930 | break; | |
931 | case NVM_IO_DONE: | |
932 | bio_endio(bio); | |
933 | break; | |
934 | case NVM_IO_REQUEUE: | |
935 | spin_lock(&rrpc->bio_lock); | |
936 | bio_list_add(&rrpc->requeue_bios, bio); | |
937 | spin_unlock(&rrpc->bio_lock); | |
938 | queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); | |
939 | break; | |
940 | } | |
941 | ||
942 | mempool_free(rqd, rrpc->rq_pool); | |
dece1635 | 943 | return BLK_QC_T_NONE; |
ae1519ec MB |
944 | } |
945 | ||
946 | static void rrpc_requeue(struct work_struct *work) | |
947 | { | |
948 | struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); | |
949 | struct bio_list bios; | |
950 | struct bio *bio; | |
951 | ||
952 | bio_list_init(&bios); | |
953 | ||
954 | spin_lock(&rrpc->bio_lock); | |
955 | bio_list_merge(&bios, &rrpc->requeue_bios); | |
956 | bio_list_init(&rrpc->requeue_bios); | |
957 | spin_unlock(&rrpc->bio_lock); | |
958 | ||
959 | while ((bio = bio_list_pop(&bios))) | |
960 | rrpc_make_rq(rrpc->disk->queue, bio); | |
961 | } | |
962 | ||
963 | static void rrpc_gc_free(struct rrpc *rrpc) | |
964 | { | |
ae1519ec MB |
965 | if (rrpc->krqd_wq) |
966 | destroy_workqueue(rrpc->krqd_wq); | |
967 | ||
968 | if (rrpc->kgc_wq) | |
969 | destroy_workqueue(rrpc->kgc_wq); | |
ae1519ec MB |
970 | } |
971 | ||
972 | static int rrpc_gc_init(struct rrpc *rrpc) | |
973 | { | |
974 | rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, | |
975 | rrpc->nr_luns); | |
976 | if (!rrpc->krqd_wq) | |
977 | return -ENOMEM; | |
978 | ||
979 | rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); | |
980 | if (!rrpc->kgc_wq) | |
981 | return -ENOMEM; | |
982 | ||
983 | setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); | |
984 | ||
985 | return 0; | |
986 | } | |
987 | ||
988 | static void rrpc_map_free(struct rrpc *rrpc) | |
989 | { | |
990 | vfree(rrpc->rev_trans_map); | |
991 | vfree(rrpc->trans_map); | |
992 | } | |
993 | ||
994 | static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) | |
995 | { | |
996 | struct rrpc *rrpc = (struct rrpc *)private; | |
997 | struct nvm_dev *dev = rrpc->dev; | |
998 | struct rrpc_addr *addr = rrpc->trans_map + slba; | |
999 | struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; | |
ae1519ec MB |
1000 | u64 elba = slba + nlb; |
1001 | u64 i; | |
1002 | ||
4ece44af | 1003 | if (unlikely(elba > dev->total_secs)) { |
ae1519ec MB |
1004 | pr_err("nvm: L2P data from device is out of bounds!\n"); |
1005 | return -EINVAL; | |
1006 | } | |
1007 | ||
1008 | for (i = 0; i < nlb; i++) { | |
1009 | u64 pba = le64_to_cpu(entries[i]); | |
afb18e0e | 1010 | unsigned int mod; |
ae1519ec MB |
1011 | /* LNVM treats address-spaces as silos, LBA and PBA are |
1012 | * equally large and zero-indexed. | |
1013 | */ | |
4ece44af | 1014 | if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { |
ae1519ec MB |
1015 | pr_err("nvm: L2P data entry is out of bounds!\n"); |
1016 | return -EINVAL; | |
1017 | } | |
1018 | ||
1019 | /* Address zero is a special one. The first page on a disk is | |
1020 | * protected. As it often holds internal device boot | |
1021 | * information. | |
1022 | */ | |
1023 | if (!pba) | |
1024 | continue; | |
1025 | ||
afb18e0e JG |
1026 | div_u64_rem(pba, rrpc->nr_sects, &mod); |
1027 | ||
ae1519ec | 1028 | addr[i].addr = pba; |
afb18e0e | 1029 | raddr[mod].addr = slba + i; |
ae1519ec MB |
1030 | } |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | static int rrpc_map_init(struct rrpc *rrpc) | |
1036 | { | |
1037 | struct nvm_dev *dev = rrpc->dev; | |
1038 | sector_t i; | |
1039 | int ret; | |
1040 | ||
4ece44af | 1041 | rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); |
ae1519ec MB |
1042 | if (!rrpc->trans_map) |
1043 | return -ENOMEM; | |
1044 | ||
1045 | rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) | |
4ece44af | 1046 | * rrpc->nr_sects); |
ae1519ec MB |
1047 | if (!rrpc->rev_trans_map) |
1048 | return -ENOMEM; | |
1049 | ||
4ece44af | 1050 | for (i = 0; i < rrpc->nr_sects; i++) { |
ae1519ec MB |
1051 | struct rrpc_addr *p = &rrpc->trans_map[i]; |
1052 | struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; | |
1053 | ||
1054 | p->addr = ADDR_EMPTY; | |
1055 | r->addr = ADDR_EMPTY; | |
1056 | } | |
1057 | ||
1058 | if (!dev->ops->get_l2p_tbl) | |
1059 | return 0; | |
1060 | ||
1061 | /* Bring up the mapping table from device */ | |
909049a7 WT |
1062 | ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects, |
1063 | rrpc_l2p_update, rrpc); | |
ae1519ec MB |
1064 | if (ret) { |
1065 | pr_err("nvm: rrpc: could not read L2P table.\n"); | |
1066 | return -EINVAL; | |
1067 | } | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
ae1519ec MB |
1072 | /* Minimum pages needed within a lun */ |
1073 | #define PAGE_POOL_SIZE 16 | |
1074 | #define ADDR_POOL_SIZE 64 | |
1075 | ||
1076 | static int rrpc_core_init(struct rrpc *rrpc) | |
1077 | { | |
1078 | down_write(&rrpc_lock); | |
1079 | if (!rrpc_gcb_cache) { | |
1080 | rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", | |
1081 | sizeof(struct rrpc_block_gc), 0, 0, NULL); | |
1082 | if (!rrpc_gcb_cache) { | |
1083 | up_write(&rrpc_lock); | |
1084 | return -ENOMEM; | |
1085 | } | |
1086 | ||
1087 | rrpc_rq_cache = kmem_cache_create("rrpc_rq", | |
1088 | sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), | |
1089 | 0, 0, NULL); | |
1090 | if (!rrpc_rq_cache) { | |
1091 | kmem_cache_destroy(rrpc_gcb_cache); | |
1092 | up_write(&rrpc_lock); | |
1093 | return -ENOMEM; | |
1094 | } | |
1095 | } | |
1096 | up_write(&rrpc_lock); | |
1097 | ||
1098 | rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); | |
1099 | if (!rrpc->page_pool) | |
1100 | return -ENOMEM; | |
1101 | ||
1102 | rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns, | |
1103 | rrpc_gcb_cache); | |
1104 | if (!rrpc->gcb_pool) | |
1105 | return -ENOMEM; | |
1106 | ||
1107 | rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); | |
1108 | if (!rrpc->rq_pool) | |
1109 | return -ENOMEM; | |
1110 | ||
1111 | spin_lock_init(&rrpc->inflights.lock); | |
1112 | INIT_LIST_HEAD(&rrpc->inflights.reqs); | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | static void rrpc_core_free(struct rrpc *rrpc) | |
1118 | { | |
1119 | mempool_destroy(rrpc->page_pool); | |
1120 | mempool_destroy(rrpc->gcb_pool); | |
1121 | mempool_destroy(rrpc->rq_pool); | |
1122 | } | |
1123 | ||
1124 | static void rrpc_luns_free(struct rrpc *rrpc) | |
1125 | { | |
da1e2849 WT |
1126 | struct nvm_dev *dev = rrpc->dev; |
1127 | struct nvm_lun *lun; | |
1128 | struct rrpc_lun *rlun; | |
1129 | int i; | |
1130 | ||
1131 | if (!rrpc->luns) | |
1132 | return; | |
1133 | ||
1134 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1135 | rlun = &rrpc->luns[i]; | |
1136 | lun = rlun->parent; | |
1137 | if (!lun) | |
1138 | break; | |
1139 | dev->mt->release_lun(dev, lun->id); | |
1140 | vfree(rlun->blocks); | |
1141 | } | |
1142 | ||
ae1519ec MB |
1143 | kfree(rrpc->luns); |
1144 | } | |
1145 | ||
1146 | static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) | |
1147 | { | |
1148 | struct nvm_dev *dev = rrpc->dev; | |
1149 | struct rrpc_lun *rlun; | |
da1e2849 | 1150 | int i, j, ret = -EINVAL; |
ae1519ec | 1151 | |
afb18e0e | 1152 | if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { |
4b79beb4 WT |
1153 | pr_err("rrpc: number of pages per block too high."); |
1154 | return -EINVAL; | |
1155 | } | |
1156 | ||
ae1519ec MB |
1157 | spin_lock_init(&rrpc->rev_lock); |
1158 | ||
1159 | rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), | |
1160 | GFP_KERNEL); | |
1161 | if (!rrpc->luns) | |
1162 | return -ENOMEM; | |
1163 | ||
1164 | /* 1:1 mapping */ | |
1165 | for (i = 0; i < rrpc->nr_luns; i++) { | |
da1e2849 WT |
1166 | int lunid = lun_begin + i; |
1167 | struct nvm_lun *lun; | |
ff0e498b | 1168 | |
da1e2849 WT |
1169 | if (dev->mt->reserve_lun(dev, lunid)) { |
1170 | pr_err("rrpc: lun %u is already allocated\n", lunid); | |
1171 | goto err; | |
1172 | } | |
ae1519ec | 1173 | |
da1e2849 WT |
1174 | lun = dev->mt->get_lun(dev, lunid); |
1175 | if (!lun) | |
1176 | goto err; | |
ae1519ec | 1177 | |
da1e2849 WT |
1178 | rlun = &rrpc->luns[i]; |
1179 | rlun->parent = lun; | |
ae1519ec MB |
1180 | rlun->blocks = vzalloc(sizeof(struct rrpc_block) * |
1181 | rrpc->dev->blks_per_lun); | |
da1e2849 WT |
1182 | if (!rlun->blocks) { |
1183 | ret = -ENOMEM; | |
ae1519ec | 1184 | goto err; |
da1e2849 | 1185 | } |
ae1519ec MB |
1186 | |
1187 | for (j = 0; j < rrpc->dev->blks_per_lun; j++) { | |
1188 | struct rrpc_block *rblk = &rlun->blocks[j]; | |
1189 | struct nvm_block *blk = &lun->blocks[j]; | |
1190 | ||
1191 | rblk->parent = blk; | |
d7a64d27 | 1192 | rblk->rlun = rlun; |
ae1519ec MB |
1193 | INIT_LIST_HEAD(&rblk->prio); |
1194 | spin_lock_init(&rblk->lock); | |
1195 | } | |
da1e2849 WT |
1196 | |
1197 | rlun->rrpc = rrpc; | |
1198 | INIT_LIST_HEAD(&rlun->prio_list); | |
1199 | INIT_LIST_HEAD(&rlun->open_list); | |
1200 | INIT_LIST_HEAD(&rlun->closed_list); | |
1201 | ||
1202 | INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); | |
1203 | spin_lock_init(&rlun->lock); | |
ae1519ec MB |
1204 | } |
1205 | ||
1206 | return 0; | |
1207 | err: | |
da1e2849 | 1208 | return ret; |
ae1519ec MB |
1209 | } |
1210 | ||
4c9dacb8 WT |
1211 | /* returns 0 on success and stores the beginning address in *begin */ |
1212 | static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) | |
1213 | { | |
1214 | struct nvm_dev *dev = rrpc->dev; | |
1215 | struct nvmm_type *mt = dev->mt; | |
1216 | sector_t size = rrpc->nr_sects * dev->sec_size; | |
909049a7 | 1217 | int ret; |
4c9dacb8 WT |
1218 | |
1219 | size >>= 9; | |
1220 | ||
909049a7 WT |
1221 | ret = mt->get_area(dev, begin, size); |
1222 | if (!ret) | |
1223 | *begin >>= (ilog2(dev->sec_size) - 9); | |
1224 | ||
1225 | return ret; | |
4c9dacb8 WT |
1226 | } |
1227 | ||
1228 | static void rrpc_area_free(struct rrpc *rrpc) | |
1229 | { | |
1230 | struct nvm_dev *dev = rrpc->dev; | |
1231 | struct nvmm_type *mt = dev->mt; | |
909049a7 | 1232 | sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9); |
4c9dacb8 | 1233 | |
909049a7 | 1234 | mt->put_area(dev, begin); |
4c9dacb8 WT |
1235 | } |
1236 | ||
ae1519ec MB |
1237 | static void rrpc_free(struct rrpc *rrpc) |
1238 | { | |
1239 | rrpc_gc_free(rrpc); | |
1240 | rrpc_map_free(rrpc); | |
1241 | rrpc_core_free(rrpc); | |
1242 | rrpc_luns_free(rrpc); | |
4c9dacb8 | 1243 | rrpc_area_free(rrpc); |
ae1519ec MB |
1244 | |
1245 | kfree(rrpc); | |
1246 | } | |
1247 | ||
1248 | static void rrpc_exit(void *private) | |
1249 | { | |
1250 | struct rrpc *rrpc = private; | |
1251 | ||
1252 | del_timer(&rrpc->gc_timer); | |
1253 | ||
1254 | flush_workqueue(rrpc->krqd_wq); | |
1255 | flush_workqueue(rrpc->kgc_wq); | |
1256 | ||
1257 | rrpc_free(rrpc); | |
1258 | } | |
1259 | ||
1260 | static sector_t rrpc_capacity(void *private) | |
1261 | { | |
1262 | struct rrpc *rrpc = private; | |
1263 | struct nvm_dev *dev = rrpc->dev; | |
1264 | sector_t reserved, provisioned; | |
1265 | ||
1266 | /* cur, gc, and two emergency blocks for each lun */ | |
116f7d4a | 1267 | reserved = rrpc->nr_luns * dev->sec_per_blk * 4; |
4ece44af | 1268 | provisioned = rrpc->nr_sects - reserved; |
ae1519ec | 1269 | |
4ece44af | 1270 | if (reserved > rrpc->nr_sects) { |
ae1519ec MB |
1271 | pr_err("rrpc: not enough space available to expose storage.\n"); |
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | sector_div(provisioned, 10); | |
1276 | return provisioned * 9 * NR_PHY_IN_LOG; | |
1277 | } | |
1278 | ||
1279 | /* | |
1280 | * Looks up the logical address from reverse trans map and check if its valid by | |
1281 | * comparing the logical to physical address with the physical address. | |
1282 | * Returns 0 on free, otherwise 1 if in use | |
1283 | */ | |
1284 | static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) | |
1285 | { | |
1286 | struct nvm_dev *dev = rrpc->dev; | |
1287 | int offset; | |
1288 | struct rrpc_addr *laddr; | |
afb18e0e | 1289 | u64 bpaddr, paddr, pladdr; |
ae1519ec | 1290 | |
afb18e0e JG |
1291 | bpaddr = block_to_rel_addr(rrpc, rblk); |
1292 | for (offset = 0; offset < dev->sec_per_blk; offset++) { | |
1293 | paddr = bpaddr + offset; | |
ae1519ec MB |
1294 | |
1295 | pladdr = rrpc->rev_trans_map[paddr].addr; | |
1296 | if (pladdr == ADDR_EMPTY) | |
1297 | continue; | |
1298 | ||
1299 | laddr = &rrpc->trans_map[pladdr]; | |
1300 | ||
1301 | if (paddr == laddr->addr) { | |
1302 | laddr->rblk = rblk; | |
1303 | } else { | |
1304 | set_bit(offset, rblk->invalid_pages); | |
1305 | rblk->nr_invalid_pages++; | |
1306 | } | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | static int rrpc_blocks_init(struct rrpc *rrpc) | |
1311 | { | |
1312 | struct rrpc_lun *rlun; | |
1313 | struct rrpc_block *rblk; | |
1314 | int lun_iter, blk_iter; | |
1315 | ||
1316 | for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { | |
1317 | rlun = &rrpc->luns[lun_iter]; | |
1318 | ||
1319 | for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun; | |
1320 | blk_iter++) { | |
1321 | rblk = &rlun->blocks[blk_iter]; | |
1322 | rrpc_block_map_update(rrpc, rblk); | |
1323 | } | |
1324 | } | |
1325 | ||
1326 | return 0; | |
1327 | } | |
1328 | ||
1329 | static int rrpc_luns_configure(struct rrpc *rrpc) | |
1330 | { | |
1331 | struct rrpc_lun *rlun; | |
1332 | struct rrpc_block *rblk; | |
1333 | int i; | |
1334 | ||
1335 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1336 | rlun = &rrpc->luns[i]; | |
1337 | ||
1338 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
1339 | if (!rblk) | |
d3d1a438 | 1340 | goto err; |
ae1519ec MB |
1341 | |
1342 | rrpc_set_lun_cur(rlun, rblk); | |
1343 | ||
1344 | /* Emergency gc block */ | |
1345 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
1346 | if (!rblk) | |
d3d1a438 | 1347 | goto err; |
ae1519ec MB |
1348 | rlun->gc_cur = rblk; |
1349 | } | |
1350 | ||
1351 | return 0; | |
d3d1a438 WT |
1352 | err: |
1353 | rrpc_put_blks(rrpc); | |
1354 | return -EINVAL; | |
ae1519ec MB |
1355 | } |
1356 | ||
1357 | static struct nvm_tgt_type tt_rrpc; | |
1358 | ||
1359 | static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, | |
1360 | int lun_begin, int lun_end) | |
1361 | { | |
1362 | struct request_queue *bqueue = dev->q; | |
1363 | struct request_queue *tqueue = tdisk->queue; | |
1364 | struct rrpc *rrpc; | |
4c9dacb8 | 1365 | sector_t soffset; |
ae1519ec MB |
1366 | int ret; |
1367 | ||
1368 | if (!(dev->identity.dom & NVM_RSP_L2P)) { | |
1369 | pr_err("nvm: rrpc: device does not support l2p (%x)\n", | |
1370 | dev->identity.dom); | |
1371 | return ERR_PTR(-EINVAL); | |
1372 | } | |
1373 | ||
1374 | rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); | |
1375 | if (!rrpc) | |
1376 | return ERR_PTR(-ENOMEM); | |
1377 | ||
1378 | rrpc->instance.tt = &tt_rrpc; | |
1379 | rrpc->dev = dev; | |
1380 | rrpc->disk = tdisk; | |
1381 | ||
1382 | bio_list_init(&rrpc->requeue_bios); | |
1383 | spin_lock_init(&rrpc->bio_lock); | |
1384 | INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); | |
1385 | ||
1386 | rrpc->nr_luns = lun_end - lun_begin + 1; | |
66e3d07f WT |
1387 | rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns; |
1388 | rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns; | |
ae1519ec MB |
1389 | |
1390 | /* simple round-robin strategy */ | |
1391 | atomic_set(&rrpc->next_lun, -1); | |
1392 | ||
4c9dacb8 WT |
1393 | ret = rrpc_area_init(rrpc, &soffset); |
1394 | if (ret < 0) { | |
1395 | pr_err("nvm: rrpc: could not initialize area\n"); | |
1396 | return ERR_PTR(ret); | |
1397 | } | |
1398 | rrpc->soffset = soffset; | |
1399 | ||
ae1519ec MB |
1400 | ret = rrpc_luns_init(rrpc, lun_begin, lun_end); |
1401 | if (ret) { | |
1402 | pr_err("nvm: rrpc: could not initialize luns\n"); | |
1403 | goto err; | |
1404 | } | |
1405 | ||
1406 | rrpc->poffset = dev->sec_per_lun * lun_begin; | |
1407 | rrpc->lun_offset = lun_begin; | |
1408 | ||
1409 | ret = rrpc_core_init(rrpc); | |
1410 | if (ret) { | |
1411 | pr_err("nvm: rrpc: could not initialize core\n"); | |
1412 | goto err; | |
1413 | } | |
1414 | ||
1415 | ret = rrpc_map_init(rrpc); | |
1416 | if (ret) { | |
1417 | pr_err("nvm: rrpc: could not initialize maps\n"); | |
1418 | goto err; | |
1419 | } | |
1420 | ||
1421 | ret = rrpc_blocks_init(rrpc); | |
1422 | if (ret) { | |
1423 | pr_err("nvm: rrpc: could not initialize state for blocks\n"); | |
1424 | goto err; | |
1425 | } | |
1426 | ||
1427 | ret = rrpc_luns_configure(rrpc); | |
1428 | if (ret) { | |
1429 | pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); | |
1430 | goto err; | |
1431 | } | |
1432 | ||
1433 | ret = rrpc_gc_init(rrpc); | |
1434 | if (ret) { | |
1435 | pr_err("nvm: rrpc: could not initialize gc\n"); | |
1436 | goto err; | |
1437 | } | |
1438 | ||
1439 | /* inherit the size from the underlying device */ | |
1440 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1441 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1442 | ||
1443 | pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", | |
4ece44af | 1444 | rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); |
ae1519ec MB |
1445 | |
1446 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
1447 | ||
1448 | return rrpc; | |
1449 | err: | |
1450 | rrpc_free(rrpc); | |
1451 | return ERR_PTR(ret); | |
1452 | } | |
1453 | ||
1454 | /* round robin, page-based FTL, and cost-based GC */ | |
1455 | static struct nvm_tgt_type tt_rrpc = { | |
1456 | .name = "rrpc", | |
1457 | .version = {1, 0, 0}, | |
1458 | ||
1459 | .make_rq = rrpc_make_rq, | |
1460 | .capacity = rrpc_capacity, | |
1461 | .end_io = rrpc_end_io, | |
1462 | ||
1463 | .init = rrpc_init, | |
1464 | .exit = rrpc_exit, | |
1465 | }; | |
1466 | ||
1467 | static int __init rrpc_module_init(void) | |
1468 | { | |
6063fe39 | 1469 | return nvm_register_tgt_type(&tt_rrpc); |
ae1519ec MB |
1470 | } |
1471 | ||
1472 | static void rrpc_module_exit(void) | |
1473 | { | |
6063fe39 | 1474 | nvm_unregister_tgt_type(&tt_rrpc); |
ae1519ec MB |
1475 | } |
1476 | ||
1477 | module_init(rrpc_module_init); | |
1478 | module_exit(rrpc_module_exit); | |
1479 | MODULE_LICENSE("GPL v2"); | |
1480 | MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs"); |