]>
Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #include "rrpc.h" | |
18 | ||
19 | static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; | |
20 | static DECLARE_RWSEM(rrpc_lock); | |
21 | ||
22 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
23 | struct nvm_rq *rqd, unsigned long flags); | |
24 | ||
25 | #define rrpc_for_each_lun(rrpc, rlun, i) \ | |
26 | for ((i) = 0, rlun = &(rrpc)->luns[0]; \ | |
27 | (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) | |
28 | ||
29 | static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) | |
30 | { | |
8e79b5cb | 31 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
32 | struct rrpc_block *rblk = a->rblk; |
33 | unsigned int pg_offset; | |
34 | ||
35 | lockdep_assert_held(&rrpc->rev_lock); | |
36 | ||
37 | if (a->addr == ADDR_EMPTY || !rblk) | |
38 | return; | |
39 | ||
40 | spin_lock(&rblk->lock); | |
41 | ||
8e79b5cb | 42 | div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset); |
ae1519ec MB |
43 | WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); |
44 | rblk->nr_invalid_pages++; | |
45 | ||
46 | spin_unlock(&rblk->lock); | |
47 | ||
8e53624d | 48 | rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY; |
ae1519ec MB |
49 | } |
50 | ||
51 | static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, | |
5114e277 | 52 | unsigned int len) |
ae1519ec MB |
53 | { |
54 | sector_t i; | |
55 | ||
56 | spin_lock(&rrpc->rev_lock); | |
57 | for (i = slba; i < slba + len; i++) { | |
58 | struct rrpc_addr *gp = &rrpc->trans_map[i]; | |
59 | ||
60 | rrpc_page_invalidate(rrpc, gp); | |
61 | gp->rblk = NULL; | |
62 | } | |
63 | spin_unlock(&rrpc->rev_lock); | |
64 | } | |
65 | ||
66 | static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, | |
67 | sector_t laddr, unsigned int pages) | |
68 | { | |
69 | struct nvm_rq *rqd; | |
70 | struct rrpc_inflight_rq *inf; | |
71 | ||
72 | rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); | |
73 | if (!rqd) | |
74 | return ERR_PTR(-ENOMEM); | |
75 | ||
76 | inf = rrpc_get_inflight_rq(rqd); | |
77 | if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { | |
78 | mempool_free(rqd, rrpc->rq_pool); | |
79 | return NULL; | |
80 | } | |
81 | ||
82 | return rqd; | |
83 | } | |
84 | ||
85 | static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) | |
86 | { | |
87 | struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); | |
88 | ||
89 | rrpc_unlock_laddr(rrpc, inf); | |
90 | ||
91 | mempool_free(rqd, rrpc->rq_pool); | |
92 | } | |
93 | ||
94 | static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) | |
95 | { | |
96 | sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
97 | sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
98 | struct nvm_rq *rqd; | |
99 | ||
0de2415b | 100 | while (1) { |
ae1519ec | 101 | rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); |
0de2415b WT |
102 | if (rqd) |
103 | break; | |
104 | ||
ae1519ec | 105 | schedule(); |
0de2415b | 106 | } |
ae1519ec MB |
107 | |
108 | if (IS_ERR(rqd)) { | |
109 | pr_err("rrpc: unable to acquire inflight IO\n"); | |
110 | bio_io_error(bio); | |
111 | return; | |
112 | } | |
113 | ||
114 | rrpc_invalidate_range(rrpc, slba, len); | |
115 | rrpc_inflight_laddr_release(rrpc, rqd); | |
116 | } | |
117 | ||
118 | static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) | |
119 | { | |
8e79b5cb JG |
120 | struct nvm_tgt_dev *dev = rrpc->dev; |
121 | ||
122 | return (rblk->next_page == dev->geo.sec_per_blk); | |
ae1519ec MB |
123 | } |
124 | ||
afb18e0e JG |
125 | /* Calculate relative addr for the given block, considering instantiated LUNs */ |
126 | static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |
127 | { | |
8e79b5cb | 128 | struct nvm_tgt_dev *dev = rrpc->dev; |
2a02e627 | 129 | struct rrpc_lun *rlun = rblk->rlun; |
afb18e0e | 130 | |
8e53624d | 131 | return rlun->id * dev->geo.sec_per_blk; |
afb18e0e JG |
132 | } |
133 | ||
8e53624d JG |
134 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev, |
135 | struct rrpc_addr *gp) | |
ae1519ec | 136 | { |
8e53624d | 137 | struct rrpc_block *rblk = gp->rblk; |
2a02e627 | 138 | struct rrpc_lun *rlun = rblk->rlun; |
8e53624d | 139 | u64 addr = gp->addr; |
ae1519ec MB |
140 | struct ppa_addr paddr; |
141 | ||
142 | paddr.ppa = addr; | |
8e53624d JG |
143 | paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr); |
144 | paddr.g.ch = rlun->bppa.g.ch; | |
145 | paddr.g.lun = rlun->bppa.g.lun; | |
146 | paddr.g.blk = rblk->id; | |
147 | ||
148 | return paddr; | |
ae1519ec MB |
149 | } |
150 | ||
151 | /* requires lun->lock taken */ | |
855cdd2c MB |
152 | static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk, |
153 | struct rrpc_block **cur_rblk) | |
ae1519ec MB |
154 | { |
155 | struct rrpc *rrpc = rlun->rrpc; | |
156 | ||
855cdd2c MB |
157 | if (*cur_rblk) { |
158 | spin_lock(&(*cur_rblk)->lock); | |
159 | WARN_ON(!block_is_full(rrpc, *cur_rblk)); | |
160 | spin_unlock(&(*cur_rblk)->lock); | |
ae1519ec | 161 | } |
855cdd2c | 162 | *cur_rblk = new_rblk; |
ae1519ec MB |
163 | } |
164 | ||
2a02e627 | 165 | static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc, |
8e79b5cb JG |
166 | struct rrpc_lun *rlun) |
167 | { | |
2a02e627 | 168 | struct rrpc_block *rblk = NULL; |
8e79b5cb | 169 | |
2a02e627 | 170 | if (list_empty(&rlun->free_list)) |
8e79b5cb JG |
171 | goto out; |
172 | ||
2a02e627 | 173 | rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list); |
8e79b5cb | 174 | |
2a02e627 JG |
175 | list_move_tail(&rblk->list, &rlun->used_list); |
176 | rblk->state = NVM_BLK_ST_TGT; | |
177 | rlun->nr_free_blocks--; | |
8e79b5cb JG |
178 | |
179 | out: | |
2a02e627 | 180 | return rblk; |
8e79b5cb JG |
181 | } |
182 | ||
ae1519ec MB |
183 | static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, |
184 | unsigned long flags) | |
185 | { | |
8e79b5cb | 186 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 187 | struct rrpc_block *rblk; |
8e79b5cb JG |
188 | int is_gc = flags & NVM_IOTYPE_GC; |
189 | ||
190 | spin_lock(&rlun->lock); | |
2a02e627 | 191 | if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) { |
8e79b5cb JG |
192 | pr_err("nvm: rrpc: cannot give block to non GC request\n"); |
193 | spin_unlock(&rlun->lock); | |
194 | return NULL; | |
195 | } | |
ae1519ec | 196 | |
2a02e627 JG |
197 | rblk = __rrpc_get_blk(rrpc, rlun); |
198 | if (!rblk) { | |
8e79b5cb JG |
199 | pr_err("nvm: rrpc: cannot get new block\n"); |
200 | spin_unlock(&rlun->lock); | |
ae1519ec | 201 | return NULL; |
ff0e498b | 202 | } |
8e79b5cb | 203 | spin_unlock(&rlun->lock); |
ae1519ec | 204 | |
8e79b5cb | 205 | bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk); |
ae1519ec MB |
206 | rblk->next_page = 0; |
207 | rblk->nr_invalid_pages = 0; | |
208 | atomic_set(&rblk->data_cmnt_size, 0); | |
209 | ||
210 | return rblk; | |
211 | } | |
212 | ||
213 | static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) | |
214 | { | |
8e79b5cb | 215 | struct rrpc_lun *rlun = rblk->rlun; |
8e79b5cb JG |
216 | |
217 | spin_lock(&rlun->lock); | |
2a02e627 JG |
218 | if (rblk->state & NVM_BLK_ST_TGT) { |
219 | list_move_tail(&rblk->list, &rlun->free_list); | |
220 | rlun->nr_free_blocks++; | |
221 | rblk->state = NVM_BLK_ST_FREE; | |
222 | } else if (rblk->state & NVM_BLK_ST_BAD) { | |
223 | list_move_tail(&rblk->list, &rlun->bb_list); | |
224 | rblk->state = NVM_BLK_ST_BAD; | |
8e79b5cb JG |
225 | } else { |
226 | WARN_ON_ONCE(1); | |
2a02e627 | 227 | pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n", |
8e53624d JG |
228 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
229 | rblk->id, rblk->state); | |
2a02e627 | 230 | list_move_tail(&rblk->list, &rlun->bb_list); |
8e79b5cb JG |
231 | } |
232 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
233 | } |
234 | ||
d3d1a438 WT |
235 | static void rrpc_put_blks(struct rrpc *rrpc) |
236 | { | |
237 | struct rrpc_lun *rlun; | |
238 | int i; | |
239 | ||
240 | for (i = 0; i < rrpc->nr_luns; i++) { | |
241 | rlun = &rrpc->luns[i]; | |
242 | if (rlun->cur) | |
243 | rrpc_put_blk(rrpc, rlun->cur); | |
244 | if (rlun->gc_cur) | |
245 | rrpc_put_blk(rrpc, rlun->gc_cur); | |
246 | } | |
247 | } | |
248 | ||
ae1519ec MB |
249 | static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) |
250 | { | |
251 | int next = atomic_inc_return(&rrpc->next_lun); | |
252 | ||
253 | return &rrpc->luns[next % rrpc->nr_luns]; | |
254 | } | |
255 | ||
256 | static void rrpc_gc_kick(struct rrpc *rrpc) | |
257 | { | |
258 | struct rrpc_lun *rlun; | |
259 | unsigned int i; | |
260 | ||
261 | for (i = 0; i < rrpc->nr_luns; i++) { | |
262 | rlun = &rrpc->luns[i]; | |
263 | queue_work(rrpc->krqd_wq, &rlun->ws_gc); | |
264 | } | |
265 | } | |
266 | ||
267 | /* | |
268 | * timed GC every interval. | |
269 | */ | |
270 | static void rrpc_gc_timer(unsigned long data) | |
271 | { | |
272 | struct rrpc *rrpc = (struct rrpc *)data; | |
273 | ||
274 | rrpc_gc_kick(rrpc); | |
275 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
276 | } | |
277 | ||
278 | static void rrpc_end_sync_bio(struct bio *bio) | |
279 | { | |
280 | struct completion *waiting = bio->bi_private; | |
281 | ||
282 | if (bio->bi_error) | |
283 | pr_err("nvm: gc request failed (%u).\n", bio->bi_error); | |
284 | ||
285 | complete(waiting); | |
286 | } | |
287 | ||
288 | /* | |
289 | * rrpc_move_valid_pages -- migrate live data off the block | |
290 | * @rrpc: the 'rrpc' structure | |
291 | * @block: the block from which to migrate live pages | |
292 | * | |
293 | * Description: | |
294 | * GC algorithms may call this function to migrate remaining live | |
295 | * pages off the block prior to erasing it. This function blocks | |
296 | * further execution until the operation is complete. | |
297 | */ | |
298 | static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) | |
299 | { | |
8e79b5cb JG |
300 | struct nvm_tgt_dev *dev = rrpc->dev; |
301 | struct request_queue *q = dev->q; | |
ae1519ec MB |
302 | struct rrpc_rev_addr *rev; |
303 | struct nvm_rq *rqd; | |
304 | struct bio *bio; | |
305 | struct page *page; | |
306 | int slot; | |
8e79b5cb | 307 | int nr_sec_per_blk = dev->geo.sec_per_blk; |
b7ceb7d5 | 308 | u64 phys_addr; |
ae1519ec MB |
309 | DECLARE_COMPLETION_ONSTACK(wait); |
310 | ||
afb18e0e | 311 | if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) |
ae1519ec MB |
312 | return 0; |
313 | ||
314 | bio = bio_alloc(GFP_NOIO, 1); | |
315 | if (!bio) { | |
316 | pr_err("nvm: could not alloc bio to gc\n"); | |
317 | return -ENOMEM; | |
318 | } | |
319 | ||
320 | page = mempool_alloc(rrpc->page_pool, GFP_NOIO); | |
16c6d048 WT |
321 | if (!page) { |
322 | bio_put(bio); | |
3bfbc6ad | 323 | return -ENOMEM; |
16c6d048 | 324 | } |
ae1519ec MB |
325 | |
326 | while ((slot = find_first_zero_bit(rblk->invalid_pages, | |
afb18e0e | 327 | nr_sec_per_blk)) < nr_sec_per_blk) { |
ae1519ec MB |
328 | |
329 | /* Lock laddr */ | |
2a02e627 | 330 | phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot; |
ae1519ec MB |
331 | |
332 | try: | |
333 | spin_lock(&rrpc->rev_lock); | |
334 | /* Get logical address from physical to logical table */ | |
8e53624d | 335 | rev = &rrpc->rev_trans_map[phys_addr]; |
ae1519ec MB |
336 | /* already updated by previous regular write */ |
337 | if (rev->addr == ADDR_EMPTY) { | |
338 | spin_unlock(&rrpc->rev_lock); | |
339 | continue; | |
340 | } | |
341 | ||
342 | rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); | |
343 | if (IS_ERR_OR_NULL(rqd)) { | |
344 | spin_unlock(&rrpc->rev_lock); | |
345 | schedule(); | |
346 | goto try; | |
347 | } | |
348 | ||
349 | spin_unlock(&rrpc->rev_lock); | |
350 | ||
351 | /* Perform read to do GC */ | |
352 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
95fe6c1a | 353 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
ae1519ec MB |
354 | bio->bi_private = &wait; |
355 | bio->bi_end_io = rrpc_end_sync_bio; | |
356 | ||
357 | /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ | |
358 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
359 | ||
360 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
361 | pr_err("rrpc: gc read failed.\n"); | |
362 | rrpc_inflight_laddr_release(rrpc, rqd); | |
363 | goto finished; | |
364 | } | |
365 | wait_for_completion_io(&wait); | |
2b11c1b2 WT |
366 | if (bio->bi_error) { |
367 | rrpc_inflight_laddr_release(rrpc, rqd); | |
368 | goto finished; | |
369 | } | |
ae1519ec MB |
370 | |
371 | bio_reset(bio); | |
372 | reinit_completion(&wait); | |
373 | ||
374 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | |
95fe6c1a | 375 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
ae1519ec MB |
376 | bio->bi_private = &wait; |
377 | bio->bi_end_io = rrpc_end_sync_bio; | |
378 | ||
379 | bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); | |
380 | ||
381 | /* turn the command around and write the data back to a new | |
382 | * address | |
383 | */ | |
384 | if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { | |
385 | pr_err("rrpc: gc write failed.\n"); | |
386 | rrpc_inflight_laddr_release(rrpc, rqd); | |
387 | goto finished; | |
388 | } | |
389 | wait_for_completion_io(&wait); | |
390 | ||
391 | rrpc_inflight_laddr_release(rrpc, rqd); | |
2b11c1b2 WT |
392 | if (bio->bi_error) |
393 | goto finished; | |
ae1519ec MB |
394 | |
395 | bio_reset(bio); | |
396 | } | |
397 | ||
398 | finished: | |
399 | mempool_free(page, rrpc->page_pool); | |
400 | bio_put(bio); | |
401 | ||
afb18e0e | 402 | if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { |
ae1519ec MB |
403 | pr_err("nvm: failed to garbage collect block\n"); |
404 | return -EIO; | |
405 | } | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
410 | static void rrpc_block_gc(struct work_struct *work) | |
411 | { | |
412 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
413 | ws_gc); | |
414 | struct rrpc *rrpc = gcb->rrpc; | |
415 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 416 | struct rrpc_lun *rlun = rblk->rlun; |
8e79b5cb | 417 | struct nvm_tgt_dev *dev = rrpc->dev; |
2a02e627 | 418 | struct ppa_addr ppa; |
ae1519ec | 419 | |
d0ca798f | 420 | mempool_free(gcb, rrpc->gcb_pool); |
2a02e627 | 421 | pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n", |
8e53624d | 422 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 423 | rblk->id); |
ae1519ec MB |
424 | |
425 | if (rrpc_move_valid_pages(rrpc, rblk)) | |
d0ca798f WT |
426 | goto put_back; |
427 | ||
2a02e627 | 428 | ppa.ppa = 0; |
8e53624d JG |
429 | ppa.g.ch = rlun->bppa.g.ch; |
430 | ppa.g.lun = rlun->bppa.g.lun; | |
2a02e627 JG |
431 | ppa.g.blk = rblk->id; |
432 | ||
8e53624d | 433 | if (nvm_erase_blk(dev, &ppa, 0)) |
d0ca798f | 434 | goto put_back; |
ae1519ec | 435 | |
ae1519ec | 436 | rrpc_put_blk(rrpc, rblk); |
d0ca798f WT |
437 | |
438 | return; | |
439 | ||
440 | put_back: | |
441 | spin_lock(&rlun->lock); | |
442 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
443 | spin_unlock(&rlun->lock); | |
ae1519ec MB |
444 | } |
445 | ||
446 | /* the block with highest number of invalid pages, will be in the beginning | |
447 | * of the list | |
448 | */ | |
2a02e627 | 449 | static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra, |
ae1519ec MB |
450 | struct rrpc_block *rb) |
451 | { | |
452 | if (ra->nr_invalid_pages == rb->nr_invalid_pages) | |
453 | return ra; | |
454 | ||
455 | return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; | |
456 | } | |
457 | ||
458 | /* linearly find the block with highest number of invalid pages | |
459 | * requires lun->lock | |
460 | */ | |
461 | static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) | |
462 | { | |
463 | struct list_head *prio_list = &rlun->prio_list; | |
2a02e627 | 464 | struct rrpc_block *rblk, *max; |
ae1519ec MB |
465 | |
466 | BUG_ON(list_empty(prio_list)); | |
467 | ||
468 | max = list_first_entry(prio_list, struct rrpc_block, prio); | |
2a02e627 JG |
469 | list_for_each_entry(rblk, prio_list, prio) |
470 | max = rblk_max_invalid(max, rblk); | |
ae1519ec MB |
471 | |
472 | return max; | |
473 | } | |
474 | ||
475 | static void rrpc_lun_gc(struct work_struct *work) | |
476 | { | |
477 | struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); | |
478 | struct rrpc *rrpc = rlun->rrpc; | |
8e79b5cb | 479 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
480 | struct rrpc_block_gc *gcb; |
481 | unsigned int nr_blocks_need; | |
482 | ||
8e79b5cb | 483 | nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE; |
ae1519ec MB |
484 | |
485 | if (nr_blocks_need < rrpc->nr_luns) | |
486 | nr_blocks_need = rrpc->nr_luns; | |
487 | ||
b262924b | 488 | spin_lock(&rlun->lock); |
2a02e627 | 489 | while (nr_blocks_need > rlun->nr_free_blocks && |
ae1519ec | 490 | !list_empty(&rlun->prio_list)) { |
2a02e627 | 491 | struct rrpc_block *rblk = block_prio_find_max(rlun); |
ae1519ec | 492 | |
2a02e627 | 493 | if (!rblk->nr_invalid_pages) |
ae1519ec MB |
494 | break; |
495 | ||
b262924b WT |
496 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); |
497 | if (!gcb) | |
498 | break; | |
499 | ||
2a02e627 | 500 | list_del_init(&rblk->prio); |
ae1519ec | 501 | |
2a02e627 | 502 | WARN_ON(!block_is_full(rrpc, rblk)); |
ae1519ec | 503 | |
2a02e627 | 504 | pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n", |
8e53624d | 505 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 506 | rblk->id); |
ae1519ec | 507 | |
ae1519ec | 508 | gcb->rrpc = rrpc; |
2a02e627 | 509 | gcb->rblk = rblk; |
ae1519ec MB |
510 | INIT_WORK(&gcb->ws_gc, rrpc_block_gc); |
511 | ||
512 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
513 | ||
514 | nr_blocks_need--; | |
515 | } | |
b262924b | 516 | spin_unlock(&rlun->lock); |
ae1519ec MB |
517 | |
518 | /* TODO: Hint that request queue can be started again */ | |
519 | } | |
520 | ||
521 | static void rrpc_gc_queue(struct work_struct *work) | |
522 | { | |
523 | struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, | |
524 | ws_gc); | |
525 | struct rrpc *rrpc = gcb->rrpc; | |
526 | struct rrpc_block *rblk = gcb->rblk; | |
cca87bc9 | 527 | struct rrpc_lun *rlun = rblk->rlun; |
ae1519ec MB |
528 | |
529 | spin_lock(&rlun->lock); | |
530 | list_add_tail(&rblk->prio, &rlun->prio_list); | |
531 | spin_unlock(&rlun->lock); | |
532 | ||
533 | mempool_free(gcb, rrpc->gcb_pool); | |
2a02e627 | 534 | pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n", |
8e53624d | 535 | rlun->bppa.g.ch, rlun->bppa.g.lun, |
2a02e627 | 536 | rblk->id); |
ae1519ec MB |
537 | } |
538 | ||
539 | static const struct block_device_operations rrpc_fops = { | |
540 | .owner = THIS_MODULE, | |
541 | }; | |
542 | ||
543 | static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) | |
544 | { | |
545 | unsigned int i; | |
546 | struct rrpc_lun *rlun, *max_free; | |
547 | ||
548 | if (!is_gc) | |
549 | return get_next_lun(rrpc); | |
550 | ||
551 | /* during GC, we don't care about RR, instead we want to make | |
552 | * sure that we maintain evenness between the block luns. | |
553 | */ | |
554 | max_free = &rrpc->luns[0]; | |
555 | /* prevent GC-ing lun from devouring pages of a lun with | |
556 | * little free blocks. We don't take the lock as we only need an | |
557 | * estimate. | |
558 | */ | |
559 | rrpc_for_each_lun(rrpc, rlun, i) { | |
2a02e627 | 560 | if (rlun->nr_free_blocks > max_free->nr_free_blocks) |
ae1519ec MB |
561 | max_free = rlun; |
562 | } | |
563 | ||
564 | return max_free; | |
565 | } | |
566 | ||
567 | static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, | |
b7ceb7d5 | 568 | struct rrpc_block *rblk, u64 paddr) |
ae1519ec MB |
569 | { |
570 | struct rrpc_addr *gp; | |
571 | struct rrpc_rev_addr *rev; | |
572 | ||
4ece44af | 573 | BUG_ON(laddr >= rrpc->nr_sects); |
ae1519ec MB |
574 | |
575 | gp = &rrpc->trans_map[laddr]; | |
576 | spin_lock(&rrpc->rev_lock); | |
577 | if (gp->rblk) | |
578 | rrpc_page_invalidate(rrpc, gp); | |
579 | ||
580 | gp->addr = paddr; | |
581 | gp->rblk = rblk; | |
582 | ||
8e53624d | 583 | rev = &rrpc->rev_trans_map[gp->addr]; |
ae1519ec MB |
584 | rev->addr = laddr; |
585 | spin_unlock(&rrpc->rev_lock); | |
586 | ||
587 | return gp; | |
588 | } | |
589 | ||
b7ceb7d5 | 590 | static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) |
ae1519ec | 591 | { |
b7ceb7d5 | 592 | u64 addr = ADDR_EMPTY; |
ae1519ec MB |
593 | |
594 | spin_lock(&rblk->lock); | |
595 | if (block_is_full(rrpc, rblk)) | |
596 | goto out; | |
597 | ||
8e53624d | 598 | addr = rblk->next_page; |
ae1519ec MB |
599 | |
600 | rblk->next_page++; | |
601 | out: | |
602 | spin_unlock(&rblk->lock); | |
603 | return addr; | |
604 | } | |
605 | ||
855cdd2c MB |
606 | /* Map logical address to a physical page. The mapping implements a round robin |
607 | * approach and allocates a page from the next lun available. | |
ae1519ec | 608 | * |
855cdd2c MB |
609 | * Returns rrpc_addr with the physical address and block. Returns NULL if no |
610 | * blocks in the next rlun are available. | |
ae1519ec | 611 | */ |
8e53624d | 612 | static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr, |
ae1519ec MB |
613 | int is_gc) |
614 | { | |
8e53624d | 615 | struct nvm_tgt_dev *tgt_dev = rrpc->dev; |
ae1519ec | 616 | struct rrpc_lun *rlun; |
855cdd2c | 617 | struct rrpc_block *rblk, **cur_rblk; |
8e53624d JG |
618 | struct rrpc_addr *p; |
619 | struct ppa_addr ppa; | |
b7ceb7d5 | 620 | u64 paddr; |
855cdd2c | 621 | int gc_force = 0; |
ae1519ec | 622 | |
8e53624d | 623 | ppa.ppa = ADDR_EMPTY; |
ae1519ec | 624 | rlun = rrpc_get_lun_rr(rrpc, is_gc); |
ae1519ec | 625 | |
2a02e627 | 626 | if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4) |
8e53624d | 627 | return ppa; |
ae1519ec | 628 | |
855cdd2c MB |
629 | /* |
630 | * page allocation steps: | |
631 | * 1. Try to allocate new page from current rblk | |
632 | * 2a. If succeed, proceed to map it in and return | |
633 | * 2b. If fail, first try to allocate a new block from media manger, | |
634 | * and then retry step 1. Retry until the normal block pool is | |
635 | * exhausted. | |
636 | * 3. If exhausted, and garbage collector is requesting the block, | |
637 | * go to the reserved block and retry step 1. | |
638 | * In the case that this fails as well, or it is not GC | |
639 | * requesting, report not able to retrieve a block and let the | |
640 | * caller handle further processing. | |
641 | */ | |
ae1519ec | 642 | |
855cdd2c MB |
643 | spin_lock(&rlun->lock); |
644 | cur_rblk = &rlun->cur; | |
ae1519ec MB |
645 | rblk = rlun->cur; |
646 | retry: | |
647 | paddr = rrpc_alloc_addr(rrpc, rblk); | |
648 | ||
855cdd2c MB |
649 | if (paddr != ADDR_EMPTY) |
650 | goto done; | |
ae1519ec | 651 | |
855cdd2c MB |
652 | if (!list_empty(&rlun->wblk_list)) { |
653 | new_blk: | |
654 | rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block, | |
655 | prio); | |
656 | rrpc_set_lun_cur(rlun, rblk, cur_rblk); | |
657 | list_del(&rblk->prio); | |
658 | goto retry; | |
659 | } | |
660 | spin_unlock(&rlun->lock); | |
661 | ||
662 | rblk = rrpc_get_blk(rrpc, rlun, gc_force); | |
663 | if (rblk) { | |
664 | spin_lock(&rlun->lock); | |
665 | list_add_tail(&rblk->prio, &rlun->wblk_list); | |
666 | /* | |
667 | * another thread might already have added a new block, | |
668 | * Therefore, make sure that one is used, instead of the | |
669 | * one just added. | |
670 | */ | |
671 | goto new_blk; | |
672 | } | |
673 | ||
674 | if (unlikely(is_gc) && !gc_force) { | |
675 | /* retry from emergency gc block */ | |
676 | cur_rblk = &rlun->gc_cur; | |
677 | rblk = rlun->gc_cur; | |
678 | gc_force = 1; | |
679 | spin_lock(&rlun->lock); | |
680 | goto retry; | |
ae1519ec MB |
681 | } |
682 | ||
855cdd2c | 683 | pr_err("rrpc: failed to allocate new block\n"); |
8e53624d | 684 | return ppa; |
855cdd2c | 685 | done: |
ae1519ec | 686 | spin_unlock(&rlun->lock); |
8e53624d JG |
687 | p = rrpc_update_map(rrpc, laddr, rblk, paddr); |
688 | if (!p) | |
689 | return ppa; | |
690 | ||
691 | /* return global address */ | |
692 | return rrpc_ppa_to_gaddr(tgt_dev, p); | |
ae1519ec MB |
693 | } |
694 | ||
695 | static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) | |
696 | { | |
697 | struct rrpc_block_gc *gcb; | |
698 | ||
699 | gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); | |
700 | if (!gcb) { | |
701 | pr_err("rrpc: unable to queue block for gc."); | |
702 | return; | |
703 | } | |
704 | ||
705 | gcb->rrpc = rrpc; | |
706 | gcb->rblk = rblk; | |
707 | ||
708 | INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); | |
709 | queue_work(rrpc->kgc_wq, &gcb->ws_gc); | |
710 | } | |
711 | ||
2a02e627 | 712 | static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p) |
a24ba464 | 713 | { |
2a02e627 JG |
714 | struct rrpc_lun *rlun = NULL; |
715 | int i; | |
716 | ||
717 | for (i = 0; i < rrpc->nr_luns; i++) { | |
8e53624d JG |
718 | if (rrpc->luns[i].bppa.g.ch == p.g.ch && |
719 | rrpc->luns[i].bppa.g.lun == p.g.lun) { | |
2a02e627 JG |
720 | rlun = &rrpc->luns[i]; |
721 | break; | |
722 | } | |
723 | } | |
724 | ||
725 | return rlun; | |
a24ba464 JG |
726 | } |
727 | ||
2a02e627 | 728 | static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa) |
a24ba464 | 729 | { |
8e79b5cb | 730 | struct nvm_tgt_dev *dev = rrpc->dev; |
2a02e627 JG |
731 | struct rrpc_lun *rlun; |
732 | struct rrpc_block *rblk; | |
733 | ||
734 | rlun = rrpc_ppa_to_lun(rrpc, ppa); | |
735 | rblk = &rlun->blocks[ppa.g.blk]; | |
736 | rblk->state = NVM_BLK_ST_BAD; | |
737 | ||
333ba053 | 738 | nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD); |
2a02e627 JG |
739 | } |
740 | ||
741 | static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd) | |
742 | { | |
a24ba464 JG |
743 | void *comp_bits = &rqd->ppa_status; |
744 | struct ppa_addr ppa, prev_ppa; | |
745 | int nr_ppas = rqd->nr_ppas; | |
746 | int bit; | |
747 | ||
748 | if (rqd->nr_ppas == 1) | |
2a02e627 | 749 | __rrpc_mark_bad_block(rrpc, rqd->ppa_addr); |
a24ba464 JG |
750 | |
751 | ppa_set_empty(&prev_ppa); | |
752 | bit = -1; | |
753 | while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) { | |
754 | ppa = rqd->ppa_list[bit]; | |
755 | if (ppa_cmp_blk(ppa, prev_ppa)) | |
756 | continue; | |
757 | ||
2a02e627 | 758 | __rrpc_mark_bad_block(rrpc, ppa); |
a24ba464 JG |
759 | } |
760 | } | |
761 | ||
ae1519ec MB |
762 | static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, |
763 | sector_t laddr, uint8_t npages) | |
764 | { | |
8e79b5cb | 765 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
766 | struct rrpc_addr *p; |
767 | struct rrpc_block *rblk; | |
ae1519ec MB |
768 | int cmnt_size, i; |
769 | ||
770 | for (i = 0; i < npages; i++) { | |
771 | p = &rrpc->trans_map[laddr + i]; | |
772 | rblk = p->rblk; | |
ae1519ec MB |
773 | |
774 | cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); | |
8e79b5cb | 775 | if (unlikely(cmnt_size == dev->geo.sec_per_blk)) |
ae1519ec MB |
776 | rrpc_run_gc(rrpc, rblk); |
777 | } | |
778 | } | |
779 | ||
72d256ec | 780 | static void rrpc_end_io(struct nvm_rq *rqd) |
ae1519ec MB |
781 | { |
782 | struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); | |
8e79b5cb | 783 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 784 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); |
6d5be959 | 785 | uint8_t npages = rqd->nr_ppas; |
ae1519ec MB |
786 | sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; |
787 | ||
a24ba464 JG |
788 | if (bio_data_dir(rqd->bio) == WRITE) { |
789 | if (rqd->error == NVM_RSP_ERR_FAILWRITE) | |
790 | rrpc_mark_bad_block(rrpc, rqd); | |
791 | ||
ae1519ec | 792 | rrpc_end_io_write(rrpc, rrqd, laddr, npages); |
a24ba464 | 793 | } |
ae1519ec | 794 | |
3cd485b1 WT |
795 | bio_put(rqd->bio); |
796 | ||
ae1519ec | 797 | if (rrqd->flags & NVM_IOTYPE_GC) |
91276162 | 798 | return; |
ae1519ec MB |
799 | |
800 | rrpc_unlock_rq(rrpc, rqd); | |
ae1519ec MB |
801 | |
802 | if (npages > 1) | |
8e79b5cb | 803 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
804 | |
805 | mempool_free(rqd, rrpc->rq_pool); | |
ae1519ec MB |
806 | } |
807 | ||
808 | static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
809 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
810 | { | |
8e79b5cb | 811 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
812 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); |
813 | struct rrpc_addr *gp; | |
814 | sector_t laddr = rrpc_get_laddr(bio); | |
815 | int is_gc = flags & NVM_IOTYPE_GC; | |
816 | int i; | |
817 | ||
818 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
8e79b5cb | 819 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
820 | return NVM_IO_REQUEUE; |
821 | } | |
822 | ||
823 | for (i = 0; i < npages; i++) { | |
824 | /* We assume that mapping occurs at 4KB granularity */ | |
4ece44af | 825 | BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects)); |
ae1519ec MB |
826 | gp = &rrpc->trans_map[laddr + i]; |
827 | ||
828 | if (gp->rblk) { | |
8e53624d | 829 | rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp); |
ae1519ec MB |
830 | } else { |
831 | BUG_ON(is_gc); | |
832 | rrpc_unlock_laddr(rrpc, r); | |
8e79b5cb | 833 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
ae1519ec MB |
834 | rqd->dma_ppa_list); |
835 | return NVM_IO_DONE; | |
836 | } | |
837 | } | |
838 | ||
839 | rqd->opcode = NVM_OP_HBREAD; | |
840 | ||
841 | return NVM_IO_OK; | |
842 | } | |
843 | ||
844 | static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, | |
845 | unsigned long flags) | |
846 | { | |
ae1519ec MB |
847 | int is_gc = flags & NVM_IOTYPE_GC; |
848 | sector_t laddr = rrpc_get_laddr(bio); | |
849 | struct rrpc_addr *gp; | |
850 | ||
851 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
852 | return NVM_IO_REQUEUE; | |
853 | ||
4ece44af | 854 | BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects)); |
ae1519ec MB |
855 | gp = &rrpc->trans_map[laddr]; |
856 | ||
857 | if (gp->rblk) { | |
8e53624d | 858 | rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp); |
ae1519ec MB |
859 | } else { |
860 | BUG_ON(is_gc); | |
861 | rrpc_unlock_rq(rrpc, rqd); | |
862 | return NVM_IO_DONE; | |
863 | } | |
864 | ||
865 | rqd->opcode = NVM_OP_HBREAD; | |
ae1519ec MB |
866 | |
867 | return NVM_IO_OK; | |
868 | } | |
869 | ||
870 | static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, | |
871 | struct nvm_rq *rqd, unsigned long flags, int npages) | |
872 | { | |
8e79b5cb | 873 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec | 874 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); |
8e53624d | 875 | struct ppa_addr p; |
ae1519ec MB |
876 | sector_t laddr = rrpc_get_laddr(bio); |
877 | int is_gc = flags & NVM_IOTYPE_GC; | |
878 | int i; | |
879 | ||
880 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { | |
8e79b5cb | 881 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
ae1519ec MB |
882 | return NVM_IO_REQUEUE; |
883 | } | |
884 | ||
885 | for (i = 0; i < npages; i++) { | |
886 | /* We assume that mapping occurs at 4KB granularity */ | |
887 | p = rrpc_map_page(rrpc, laddr + i, is_gc); | |
8e53624d | 888 | if (p.ppa == ADDR_EMPTY) { |
ae1519ec MB |
889 | BUG_ON(is_gc); |
890 | rrpc_unlock_laddr(rrpc, r); | |
8e79b5cb | 891 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
ae1519ec MB |
892 | rqd->dma_ppa_list); |
893 | rrpc_gc_kick(rrpc); | |
894 | return NVM_IO_REQUEUE; | |
895 | } | |
896 | ||
8e53624d | 897 | rqd->ppa_list[i] = p; |
ae1519ec MB |
898 | } |
899 | ||
900 | rqd->opcode = NVM_OP_HBWRITE; | |
901 | ||
902 | return NVM_IO_OK; | |
903 | } | |
904 | ||
905 | static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, | |
906 | struct nvm_rq *rqd, unsigned long flags) | |
907 | { | |
8e53624d | 908 | struct ppa_addr p; |
ae1519ec MB |
909 | int is_gc = flags & NVM_IOTYPE_GC; |
910 | sector_t laddr = rrpc_get_laddr(bio); | |
911 | ||
912 | if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) | |
913 | return NVM_IO_REQUEUE; | |
914 | ||
915 | p = rrpc_map_page(rrpc, laddr, is_gc); | |
8e53624d | 916 | if (p.ppa == ADDR_EMPTY) { |
ae1519ec MB |
917 | BUG_ON(is_gc); |
918 | rrpc_unlock_rq(rrpc, rqd); | |
919 | rrpc_gc_kick(rrpc); | |
920 | return NVM_IO_REQUEUE; | |
921 | } | |
922 | ||
8e53624d | 923 | rqd->ppa_addr = p; |
ae1519ec | 924 | rqd->opcode = NVM_OP_HBWRITE; |
ae1519ec MB |
925 | |
926 | return NVM_IO_OK; | |
927 | } | |
928 | ||
929 | static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, | |
930 | struct nvm_rq *rqd, unsigned long flags, uint8_t npages) | |
931 | { | |
8e79b5cb JG |
932 | struct nvm_tgt_dev *dev = rrpc->dev; |
933 | ||
ae1519ec | 934 | if (npages > 1) { |
8e79b5cb | 935 | rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, |
ae1519ec MB |
936 | &rqd->dma_ppa_list); |
937 | if (!rqd->ppa_list) { | |
938 | pr_err("rrpc: not able to allocate ppa list\n"); | |
939 | return NVM_IO_ERR; | |
940 | } | |
941 | ||
70246286 | 942 | if (bio_op(bio) == REQ_OP_WRITE) |
ae1519ec MB |
943 | return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, |
944 | npages); | |
945 | ||
946 | return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); | |
947 | } | |
948 | ||
70246286 | 949 | if (bio_op(bio) == REQ_OP_WRITE) |
ae1519ec MB |
950 | return rrpc_write_rq(rrpc, bio, rqd, flags); |
951 | ||
952 | return rrpc_read_rq(rrpc, bio, rqd, flags); | |
953 | } | |
954 | ||
955 | static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, | |
956 | struct nvm_rq *rqd, unsigned long flags) | |
957 | { | |
8e79b5cb | 958 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
959 | struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); |
960 | uint8_t nr_pages = rrpc_get_pages(bio); | |
961 | int bio_size = bio_sectors(bio) << 9; | |
8e79b5cb | 962 | int err; |
ae1519ec | 963 | |
8e79b5cb | 964 | if (bio_size < dev->geo.sec_size) |
ae1519ec | 965 | return NVM_IO_ERR; |
8e79b5cb | 966 | else if (bio_size > dev->geo.max_rq_size) |
ae1519ec MB |
967 | return NVM_IO_ERR; |
968 | ||
969 | err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); | |
970 | if (err) | |
971 | return err; | |
972 | ||
973 | bio_get(bio); | |
974 | rqd->bio = bio; | |
975 | rqd->ins = &rrpc->instance; | |
6d5be959 | 976 | rqd->nr_ppas = nr_pages; |
ae1519ec MB |
977 | rrq->flags = flags; |
978 | ||
8e53624d | 979 | err = nvm_submit_io(dev, rqd); |
ae1519ec MB |
980 | if (err) { |
981 | pr_err("rrpc: I/O submission failed: %d\n", err); | |
3cd485b1 | 982 | bio_put(bio); |
c27278bd WT |
983 | if (!(flags & NVM_IOTYPE_GC)) { |
984 | rrpc_unlock_rq(rrpc, rqd); | |
6d5be959 | 985 | if (rqd->nr_ppas > 1) |
da2d7cb8 JG |
986 | nvm_dev_dma_free(dev->parent, rqd->ppa_list, |
987 | rqd->dma_ppa_list); | |
c27278bd | 988 | } |
ae1519ec MB |
989 | return NVM_IO_ERR; |
990 | } | |
991 | ||
992 | return NVM_IO_OK; | |
993 | } | |
994 | ||
dece1635 | 995 | static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) |
ae1519ec MB |
996 | { |
997 | struct rrpc *rrpc = q->queuedata; | |
998 | struct nvm_rq *rqd; | |
999 | int err; | |
1000 | ||
f0b01b6a JG |
1001 | blk_queue_split(q, &bio, q->bio_split); |
1002 | ||
95fe6c1a | 1003 | if (bio_op(bio) == REQ_OP_DISCARD) { |
ae1519ec | 1004 | rrpc_discard(rrpc, bio); |
dece1635 | 1005 | return BLK_QC_T_NONE; |
ae1519ec MB |
1006 | } |
1007 | ||
1008 | rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); | |
1009 | if (!rqd) { | |
1010 | pr_err_ratelimited("rrpc: not able to queue bio."); | |
1011 | bio_io_error(bio); | |
dece1635 | 1012 | return BLK_QC_T_NONE; |
ae1519ec MB |
1013 | } |
1014 | memset(rqd, 0, sizeof(struct nvm_rq)); | |
1015 | ||
1016 | err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); | |
1017 | switch (err) { | |
1018 | case NVM_IO_OK: | |
dece1635 | 1019 | return BLK_QC_T_NONE; |
ae1519ec MB |
1020 | case NVM_IO_ERR: |
1021 | bio_io_error(bio); | |
1022 | break; | |
1023 | case NVM_IO_DONE: | |
1024 | bio_endio(bio); | |
1025 | break; | |
1026 | case NVM_IO_REQUEUE: | |
1027 | spin_lock(&rrpc->bio_lock); | |
1028 | bio_list_add(&rrpc->requeue_bios, bio); | |
1029 | spin_unlock(&rrpc->bio_lock); | |
1030 | queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); | |
1031 | break; | |
1032 | } | |
1033 | ||
1034 | mempool_free(rqd, rrpc->rq_pool); | |
dece1635 | 1035 | return BLK_QC_T_NONE; |
ae1519ec MB |
1036 | } |
1037 | ||
1038 | static void rrpc_requeue(struct work_struct *work) | |
1039 | { | |
1040 | struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); | |
1041 | struct bio_list bios; | |
1042 | struct bio *bio; | |
1043 | ||
1044 | bio_list_init(&bios); | |
1045 | ||
1046 | spin_lock(&rrpc->bio_lock); | |
1047 | bio_list_merge(&bios, &rrpc->requeue_bios); | |
1048 | bio_list_init(&rrpc->requeue_bios); | |
1049 | spin_unlock(&rrpc->bio_lock); | |
1050 | ||
1051 | while ((bio = bio_list_pop(&bios))) | |
1052 | rrpc_make_rq(rrpc->disk->queue, bio); | |
1053 | } | |
1054 | ||
1055 | static void rrpc_gc_free(struct rrpc *rrpc) | |
1056 | { | |
ae1519ec MB |
1057 | if (rrpc->krqd_wq) |
1058 | destroy_workqueue(rrpc->krqd_wq); | |
1059 | ||
1060 | if (rrpc->kgc_wq) | |
1061 | destroy_workqueue(rrpc->kgc_wq); | |
ae1519ec MB |
1062 | } |
1063 | ||
1064 | static int rrpc_gc_init(struct rrpc *rrpc) | |
1065 | { | |
1066 | rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, | |
1067 | rrpc->nr_luns); | |
1068 | if (!rrpc->krqd_wq) | |
1069 | return -ENOMEM; | |
1070 | ||
1071 | rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); | |
1072 | if (!rrpc->kgc_wq) | |
1073 | return -ENOMEM; | |
1074 | ||
1075 | setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); | |
1076 | ||
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | static void rrpc_map_free(struct rrpc *rrpc) | |
1081 | { | |
1082 | vfree(rrpc->rev_trans_map); | |
1083 | vfree(rrpc->trans_map); | |
1084 | } | |
1085 | ||
1086 | static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) | |
1087 | { | |
1088 | struct rrpc *rrpc = (struct rrpc *)private; | |
8e79b5cb | 1089 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1090 | struct rrpc_addr *addr = rrpc->trans_map + slba; |
1091 | struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; | |
2a02e627 JG |
1092 | struct rrpc_lun *rlun; |
1093 | struct rrpc_block *rblk; | |
ae1519ec MB |
1094 | u64 i; |
1095 | ||
ae1519ec | 1096 | for (i = 0; i < nlb; i++) { |
2a02e627 | 1097 | struct ppa_addr gaddr; |
ae1519ec | 1098 | u64 pba = le64_to_cpu(entries[i]); |
afb18e0e | 1099 | unsigned int mod; |
2a02e627 | 1100 | |
ae1519ec MB |
1101 | /* LNVM treats address-spaces as silos, LBA and PBA are |
1102 | * equally large and zero-indexed. | |
1103 | */ | |
4ece44af | 1104 | if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { |
ae1519ec | 1105 | pr_err("nvm: L2P data entry is out of bounds!\n"); |
2a02e627 | 1106 | pr_err("nvm: Maybe loaded an old target L2P\n"); |
ae1519ec MB |
1107 | return -EINVAL; |
1108 | } | |
1109 | ||
1110 | /* Address zero is a special one. The first page on a disk is | |
1111 | * protected. As it often holds internal device boot | |
1112 | * information. | |
1113 | */ | |
1114 | if (!pba) | |
1115 | continue; | |
1116 | ||
afb18e0e JG |
1117 | div_u64_rem(pba, rrpc->nr_sects, &mod); |
1118 | ||
da2d7cb8 | 1119 | gaddr = rrpc_recov_addr(dev, pba); |
2a02e627 JG |
1120 | rlun = rrpc_ppa_to_lun(rrpc, gaddr); |
1121 | if (!rlun) { | |
1122 | pr_err("rrpc: l2p corruption on lba %llu\n", | |
1123 | slba + i); | |
1124 | return -EINVAL; | |
1125 | } | |
1126 | ||
1127 | rblk = &rlun->blocks[gaddr.g.blk]; | |
1128 | if (!rblk->state) { | |
1129 | /* at this point, we don't know anything about the | |
1130 | * block. It's up to the FTL on top to re-etablish the | |
1131 | * block state. The block is assumed to be open. | |
1132 | */ | |
1133 | list_move_tail(&rblk->list, &rlun->used_list); | |
1134 | rblk->state = NVM_BLK_ST_TGT; | |
1135 | rlun->nr_free_blocks--; | |
1136 | } | |
8e53624d JG |
1137 | |
1138 | addr[i].addr = pba; | |
1139 | addr[i].rblk = rblk; | |
1140 | raddr[mod].addr = slba + i; | |
ae1519ec MB |
1141 | } |
1142 | ||
1143 | return 0; | |
1144 | } | |
1145 | ||
1146 | static int rrpc_map_init(struct rrpc *rrpc) | |
1147 | { | |
8e79b5cb | 1148 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1149 | sector_t i; |
1150 | int ret; | |
1151 | ||
4ece44af | 1152 | rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); |
ae1519ec MB |
1153 | if (!rrpc->trans_map) |
1154 | return -ENOMEM; | |
1155 | ||
1156 | rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) | |
4ece44af | 1157 | * rrpc->nr_sects); |
ae1519ec MB |
1158 | if (!rrpc->rev_trans_map) |
1159 | return -ENOMEM; | |
1160 | ||
4ece44af | 1161 | for (i = 0; i < rrpc->nr_sects; i++) { |
ae1519ec MB |
1162 | struct rrpc_addr *p = &rrpc->trans_map[i]; |
1163 | struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; | |
1164 | ||
1165 | p->addr = ADDR_EMPTY; | |
1166 | r->addr = ADDR_EMPTY; | |
1167 | } | |
1168 | ||
ae1519ec | 1169 | /* Bring up the mapping table from device */ |
da2d7cb8 JG |
1170 | ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects, |
1171 | rrpc_l2p_update, rrpc); | |
ae1519ec MB |
1172 | if (ret) { |
1173 | pr_err("nvm: rrpc: could not read L2P table.\n"); | |
1174 | return -EINVAL; | |
1175 | } | |
1176 | ||
1177 | return 0; | |
1178 | } | |
1179 | ||
ae1519ec MB |
1180 | /* Minimum pages needed within a lun */ |
1181 | #define PAGE_POOL_SIZE 16 | |
1182 | #define ADDR_POOL_SIZE 64 | |
1183 | ||
1184 | static int rrpc_core_init(struct rrpc *rrpc) | |
1185 | { | |
1186 | down_write(&rrpc_lock); | |
1187 | if (!rrpc_gcb_cache) { | |
1188 | rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", | |
1189 | sizeof(struct rrpc_block_gc), 0, 0, NULL); | |
1190 | if (!rrpc_gcb_cache) { | |
1191 | up_write(&rrpc_lock); | |
1192 | return -ENOMEM; | |
1193 | } | |
1194 | ||
1195 | rrpc_rq_cache = kmem_cache_create("rrpc_rq", | |
1196 | sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), | |
1197 | 0, 0, NULL); | |
1198 | if (!rrpc_rq_cache) { | |
1199 | kmem_cache_destroy(rrpc_gcb_cache); | |
1200 | up_write(&rrpc_lock); | |
1201 | return -ENOMEM; | |
1202 | } | |
1203 | } | |
1204 | up_write(&rrpc_lock); | |
1205 | ||
1206 | rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); | |
1207 | if (!rrpc->page_pool) | |
1208 | return -ENOMEM; | |
1209 | ||
8e79b5cb | 1210 | rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns, |
ae1519ec MB |
1211 | rrpc_gcb_cache); |
1212 | if (!rrpc->gcb_pool) | |
1213 | return -ENOMEM; | |
1214 | ||
1215 | rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); | |
1216 | if (!rrpc->rq_pool) | |
1217 | return -ENOMEM; | |
1218 | ||
1219 | spin_lock_init(&rrpc->inflights.lock); | |
1220 | INIT_LIST_HEAD(&rrpc->inflights.reqs); | |
1221 | ||
1222 | return 0; | |
1223 | } | |
1224 | ||
1225 | static void rrpc_core_free(struct rrpc *rrpc) | |
1226 | { | |
1227 | mempool_destroy(rrpc->page_pool); | |
1228 | mempool_destroy(rrpc->gcb_pool); | |
1229 | mempool_destroy(rrpc->rq_pool); | |
1230 | } | |
1231 | ||
1232 | static void rrpc_luns_free(struct rrpc *rrpc) | |
1233 | { | |
da1e2849 WT |
1234 | struct rrpc_lun *rlun; |
1235 | int i; | |
1236 | ||
1237 | if (!rrpc->luns) | |
1238 | return; | |
1239 | ||
1240 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1241 | rlun = &rrpc->luns[i]; | |
da1e2849 WT |
1242 | vfree(rlun->blocks); |
1243 | } | |
1244 | ||
ae1519ec MB |
1245 | kfree(rrpc->luns); |
1246 | } | |
1247 | ||
2a02e627 JG |
1248 | static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun) |
1249 | { | |
1250 | struct nvm_geo *geo = &dev->geo; | |
1251 | struct rrpc_block *rblk; | |
1252 | struct ppa_addr ppa; | |
1253 | u8 *blks; | |
1254 | int nr_blks; | |
1255 | int i; | |
1256 | int ret; | |
1257 | ||
959e911b JG |
1258 | if (!dev->parent->ops->get_bb_tbl) |
1259 | return 0; | |
1260 | ||
2a02e627 JG |
1261 | nr_blks = geo->blks_per_lun * geo->plane_mode; |
1262 | blks = kmalloc(nr_blks, GFP_KERNEL); | |
1263 | if (!blks) | |
1264 | return -ENOMEM; | |
1265 | ||
1266 | ppa.ppa = 0; | |
8e53624d JG |
1267 | ppa.g.ch = rlun->bppa.g.ch; |
1268 | ppa.g.lun = rlun->bppa.g.lun; | |
2a02e627 | 1269 | |
333ba053 | 1270 | ret = nvm_get_tgt_bb_tbl(dev, ppa, blks); |
2a02e627 JG |
1271 | if (ret) { |
1272 | pr_err("rrpc: could not get BB table\n"); | |
1273 | goto out; | |
1274 | } | |
1275 | ||
1276 | nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks); | |
1277 | if (nr_blks < 0) | |
1278 | return nr_blks; | |
1279 | ||
2a02e627 JG |
1280 | for (i = 0; i < nr_blks; i++) { |
1281 | if (blks[i] == NVM_BLK_T_FREE) | |
1282 | continue; | |
1283 | ||
1284 | rblk = &rlun->blocks[i]; | |
1285 | list_move_tail(&rblk->list, &rlun->bb_list); | |
1286 | rblk->state = NVM_BLK_ST_BAD; | |
1287 | rlun->nr_free_blocks--; | |
1288 | } | |
1289 | ||
1290 | out: | |
1291 | kfree(blks); | |
1292 | return ret; | |
1293 | } | |
1294 | ||
8e53624d JG |
1295 | static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa) |
1296 | { | |
1297 | rlun->bppa.ppa = 0; | |
1298 | rlun->bppa.g.ch = ppa.g.ch; | |
1299 | rlun->bppa.g.lun = ppa.g.lun; | |
1300 | } | |
1301 | ||
1302 | static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns) | |
ae1519ec | 1303 | { |
8e79b5cb JG |
1304 | struct nvm_tgt_dev *dev = rrpc->dev; |
1305 | struct nvm_geo *geo = &dev->geo; | |
ae1519ec | 1306 | struct rrpc_lun *rlun; |
da1e2849 | 1307 | int i, j, ret = -EINVAL; |
ae1519ec | 1308 | |
8e79b5cb | 1309 | if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { |
4b79beb4 WT |
1310 | pr_err("rrpc: number of pages per block too high."); |
1311 | return -EINVAL; | |
1312 | } | |
1313 | ||
ae1519ec MB |
1314 | spin_lock_init(&rrpc->rev_lock); |
1315 | ||
1316 | rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), | |
1317 | GFP_KERNEL); | |
1318 | if (!rrpc->luns) | |
1319 | return -ENOMEM; | |
1320 | ||
0ac4072e | 1321 | /* 1:1 mapping */ |
8e53624d JG |
1322 | for (i = 0; i < rrpc->nr_luns; i++) { |
1323 | rlun = &rrpc->luns[i]; | |
1324 | rlun->id = i; | |
1325 | rrpc_set_lun_ppa(rlun, luns[i]); | |
ae1519ec | 1326 | rlun->blocks = vzalloc(sizeof(struct rrpc_block) * |
8e79b5cb | 1327 | geo->blks_per_lun); |
da1e2849 WT |
1328 | if (!rlun->blocks) { |
1329 | ret = -ENOMEM; | |
ae1519ec | 1330 | goto err; |
da1e2849 | 1331 | } |
ae1519ec | 1332 | |
2a02e627 JG |
1333 | INIT_LIST_HEAD(&rlun->free_list); |
1334 | INIT_LIST_HEAD(&rlun->used_list); | |
1335 | INIT_LIST_HEAD(&rlun->bb_list); | |
1336 | ||
8e79b5cb | 1337 | for (j = 0; j < geo->blks_per_lun; j++) { |
ae1519ec | 1338 | struct rrpc_block *rblk = &rlun->blocks[j]; |
ae1519ec | 1339 | |
2a02e627 | 1340 | rblk->id = j; |
d7a64d27 | 1341 | rblk->rlun = rlun; |
2a02e627 | 1342 | rblk->state = NVM_BLK_T_FREE; |
ae1519ec | 1343 | INIT_LIST_HEAD(&rblk->prio); |
2a02e627 | 1344 | INIT_LIST_HEAD(&rblk->list); |
ae1519ec | 1345 | spin_lock_init(&rblk->lock); |
2a02e627 JG |
1346 | |
1347 | list_add_tail(&rblk->list, &rlun->free_list); | |
ae1519ec | 1348 | } |
da1e2849 | 1349 | |
959e911b JG |
1350 | rlun->rrpc = rrpc; |
1351 | rlun->nr_free_blocks = geo->blks_per_lun; | |
8e79b5cb JG |
1352 | rlun->reserved_blocks = 2; /* for GC only */ |
1353 | ||
da1e2849 | 1354 | INIT_LIST_HEAD(&rlun->prio_list); |
855cdd2c | 1355 | INIT_LIST_HEAD(&rlun->wblk_list); |
da1e2849 WT |
1356 | |
1357 | INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); | |
1358 | spin_lock_init(&rlun->lock); | |
959e911b JG |
1359 | |
1360 | if (rrpc_bb_discovery(dev, rlun)) | |
1361 | goto err; | |
1362 | ||
ae1519ec MB |
1363 | } |
1364 | ||
1365 | return 0; | |
1366 | err: | |
da1e2849 | 1367 | return ret; |
ae1519ec MB |
1368 | } |
1369 | ||
4c9dacb8 WT |
1370 | /* returns 0 on success and stores the beginning address in *begin */ |
1371 | static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) | |
1372 | { | |
8e79b5cb | 1373 | struct nvm_tgt_dev *dev = rrpc->dev; |
8e79b5cb | 1374 | sector_t size = rrpc->nr_sects * dev->geo.sec_size; |
909049a7 | 1375 | int ret; |
4c9dacb8 WT |
1376 | |
1377 | size >>= 9; | |
1378 | ||
da2d7cb8 | 1379 | ret = nvm_get_area(dev, begin, size); |
909049a7 | 1380 | if (!ret) |
8e79b5cb | 1381 | *begin >>= (ilog2(dev->geo.sec_size) - 9); |
909049a7 WT |
1382 | |
1383 | return ret; | |
4c9dacb8 WT |
1384 | } |
1385 | ||
1386 | static void rrpc_area_free(struct rrpc *rrpc) | |
1387 | { | |
8e79b5cb | 1388 | struct nvm_tgt_dev *dev = rrpc->dev; |
8e79b5cb | 1389 | sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9); |
4c9dacb8 | 1390 | |
da2d7cb8 | 1391 | nvm_put_area(dev, begin); |
4c9dacb8 WT |
1392 | } |
1393 | ||
ae1519ec MB |
1394 | static void rrpc_free(struct rrpc *rrpc) |
1395 | { | |
1396 | rrpc_gc_free(rrpc); | |
1397 | rrpc_map_free(rrpc); | |
1398 | rrpc_core_free(rrpc); | |
1399 | rrpc_luns_free(rrpc); | |
4c9dacb8 | 1400 | rrpc_area_free(rrpc); |
ae1519ec MB |
1401 | |
1402 | kfree(rrpc); | |
1403 | } | |
1404 | ||
1405 | static void rrpc_exit(void *private) | |
1406 | { | |
1407 | struct rrpc *rrpc = private; | |
1408 | ||
1409 | del_timer(&rrpc->gc_timer); | |
1410 | ||
1411 | flush_workqueue(rrpc->krqd_wq); | |
1412 | flush_workqueue(rrpc->kgc_wq); | |
1413 | ||
1414 | rrpc_free(rrpc); | |
1415 | } | |
1416 | ||
1417 | static sector_t rrpc_capacity(void *private) | |
1418 | { | |
1419 | struct rrpc *rrpc = private; | |
8e79b5cb | 1420 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1421 | sector_t reserved, provisioned; |
1422 | ||
1423 | /* cur, gc, and two emergency blocks for each lun */ | |
8e79b5cb | 1424 | reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4; |
4ece44af | 1425 | provisioned = rrpc->nr_sects - reserved; |
ae1519ec | 1426 | |
4ece44af | 1427 | if (reserved > rrpc->nr_sects) { |
ae1519ec MB |
1428 | pr_err("rrpc: not enough space available to expose storage.\n"); |
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | sector_div(provisioned, 10); | |
1433 | return provisioned * 9 * NR_PHY_IN_LOG; | |
1434 | } | |
1435 | ||
1436 | /* | |
1437 | * Looks up the logical address from reverse trans map and check if its valid by | |
1438 | * comparing the logical to physical address with the physical address. | |
1439 | * Returns 0 on free, otherwise 1 if in use | |
1440 | */ | |
1441 | static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) | |
1442 | { | |
8e79b5cb | 1443 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1444 | int offset; |
1445 | struct rrpc_addr *laddr; | |
afb18e0e | 1446 | u64 bpaddr, paddr, pladdr; |
ae1519ec | 1447 | |
afb18e0e | 1448 | bpaddr = block_to_rel_addr(rrpc, rblk); |
8e79b5cb | 1449 | for (offset = 0; offset < dev->geo.sec_per_blk; offset++) { |
afb18e0e | 1450 | paddr = bpaddr + offset; |
ae1519ec MB |
1451 | |
1452 | pladdr = rrpc->rev_trans_map[paddr].addr; | |
1453 | if (pladdr == ADDR_EMPTY) | |
1454 | continue; | |
1455 | ||
1456 | laddr = &rrpc->trans_map[pladdr]; | |
1457 | ||
1458 | if (paddr == laddr->addr) { | |
1459 | laddr->rblk = rblk; | |
1460 | } else { | |
1461 | set_bit(offset, rblk->invalid_pages); | |
1462 | rblk->nr_invalid_pages++; | |
1463 | } | |
1464 | } | |
1465 | } | |
1466 | ||
1467 | static int rrpc_blocks_init(struct rrpc *rrpc) | |
1468 | { | |
8e79b5cb | 1469 | struct nvm_tgt_dev *dev = rrpc->dev; |
ae1519ec MB |
1470 | struct rrpc_lun *rlun; |
1471 | struct rrpc_block *rblk; | |
1472 | int lun_iter, blk_iter; | |
1473 | ||
1474 | for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { | |
1475 | rlun = &rrpc->luns[lun_iter]; | |
1476 | ||
8e79b5cb | 1477 | for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun; |
ae1519ec MB |
1478 | blk_iter++) { |
1479 | rblk = &rlun->blocks[blk_iter]; | |
1480 | rrpc_block_map_update(rrpc, rblk); | |
1481 | } | |
1482 | } | |
1483 | ||
1484 | return 0; | |
1485 | } | |
1486 | ||
1487 | static int rrpc_luns_configure(struct rrpc *rrpc) | |
1488 | { | |
1489 | struct rrpc_lun *rlun; | |
1490 | struct rrpc_block *rblk; | |
1491 | int i; | |
1492 | ||
1493 | for (i = 0; i < rrpc->nr_luns; i++) { | |
1494 | rlun = &rrpc->luns[i]; | |
1495 | ||
1496 | rblk = rrpc_get_blk(rrpc, rlun, 0); | |
1497 | if (!rblk) | |
d3d1a438 | 1498 | goto err; |
855cdd2c | 1499 | rrpc_set_lun_cur(rlun, rblk, &rlun->cur); |
ae1519ec MB |
1500 | |
1501 | /* Emergency gc block */ | |
1502 | rblk = rrpc_get_blk(rrpc, rlun, 1); | |
1503 | if (!rblk) | |
d3d1a438 | 1504 | goto err; |
855cdd2c | 1505 | rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur); |
ae1519ec MB |
1506 | } |
1507 | ||
1508 | return 0; | |
d3d1a438 WT |
1509 | err: |
1510 | rrpc_put_blks(rrpc); | |
1511 | return -EINVAL; | |
ae1519ec MB |
1512 | } |
1513 | ||
1514 | static struct nvm_tgt_type tt_rrpc; | |
1515 | ||
8e53624d | 1516 | static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk) |
ae1519ec MB |
1517 | { |
1518 | struct request_queue *bqueue = dev->q; | |
1519 | struct request_queue *tqueue = tdisk->queue; | |
8e79b5cb | 1520 | struct nvm_geo *geo = &dev->geo; |
ae1519ec | 1521 | struct rrpc *rrpc; |
4c9dacb8 | 1522 | sector_t soffset; |
ae1519ec MB |
1523 | int ret; |
1524 | ||
1525 | if (!(dev->identity.dom & NVM_RSP_L2P)) { | |
1526 | pr_err("nvm: rrpc: device does not support l2p (%x)\n", | |
1527 | dev->identity.dom); | |
1528 | return ERR_PTR(-EINVAL); | |
1529 | } | |
1530 | ||
1531 | rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); | |
1532 | if (!rrpc) | |
1533 | return ERR_PTR(-ENOMEM); | |
1534 | ||
1535 | rrpc->instance.tt = &tt_rrpc; | |
1536 | rrpc->dev = dev; | |
1537 | rrpc->disk = tdisk; | |
1538 | ||
1539 | bio_list_init(&rrpc->requeue_bios); | |
1540 | spin_lock_init(&rrpc->bio_lock); | |
1541 | INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); | |
1542 | ||
8e79b5cb JG |
1543 | rrpc->nr_luns = geo->nr_luns; |
1544 | rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns; | |
ae1519ec MB |
1545 | |
1546 | /* simple round-robin strategy */ | |
1547 | atomic_set(&rrpc->next_lun, -1); | |
1548 | ||
4c9dacb8 WT |
1549 | ret = rrpc_area_init(rrpc, &soffset); |
1550 | if (ret < 0) { | |
1551 | pr_err("nvm: rrpc: could not initialize area\n"); | |
1552 | return ERR_PTR(ret); | |
1553 | } | |
1554 | rrpc->soffset = soffset; | |
1555 | ||
8e53624d | 1556 | ret = rrpc_luns_init(rrpc, dev->luns); |
ae1519ec MB |
1557 | if (ret) { |
1558 | pr_err("nvm: rrpc: could not initialize luns\n"); | |
1559 | goto err; | |
1560 | } | |
1561 | ||
ae1519ec MB |
1562 | ret = rrpc_core_init(rrpc); |
1563 | if (ret) { | |
1564 | pr_err("nvm: rrpc: could not initialize core\n"); | |
1565 | goto err; | |
1566 | } | |
1567 | ||
1568 | ret = rrpc_map_init(rrpc); | |
1569 | if (ret) { | |
1570 | pr_err("nvm: rrpc: could not initialize maps\n"); | |
1571 | goto err; | |
1572 | } | |
1573 | ||
1574 | ret = rrpc_blocks_init(rrpc); | |
1575 | if (ret) { | |
1576 | pr_err("nvm: rrpc: could not initialize state for blocks\n"); | |
1577 | goto err; | |
1578 | } | |
1579 | ||
1580 | ret = rrpc_luns_configure(rrpc); | |
1581 | if (ret) { | |
1582 | pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); | |
1583 | goto err; | |
1584 | } | |
1585 | ||
1586 | ret = rrpc_gc_init(rrpc); | |
1587 | if (ret) { | |
1588 | pr_err("nvm: rrpc: could not initialize gc\n"); | |
1589 | goto err; | |
1590 | } | |
1591 | ||
1592 | /* inherit the size from the underlying device */ | |
1593 | blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); | |
1594 | blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); | |
1595 | ||
1596 | pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", | |
4ece44af | 1597 | rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); |
ae1519ec MB |
1598 | |
1599 | mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); | |
1600 | ||
1601 | return rrpc; | |
1602 | err: | |
1603 | rrpc_free(rrpc); | |
1604 | return ERR_PTR(ret); | |
1605 | } | |
1606 | ||
1607 | /* round robin, page-based FTL, and cost-based GC */ | |
1608 | static struct nvm_tgt_type tt_rrpc = { | |
1609 | .name = "rrpc", | |
1610 | .version = {1, 0, 0}, | |
1611 | ||
1612 | .make_rq = rrpc_make_rq, | |
1613 | .capacity = rrpc_capacity, | |
1614 | .end_io = rrpc_end_io, | |
1615 | ||
1616 | .init = rrpc_init, | |
1617 | .exit = rrpc_exit, | |
1618 | }; | |
1619 | ||
1620 | static int __init rrpc_module_init(void) | |
1621 | { | |
6063fe39 | 1622 | return nvm_register_tgt_type(&tt_rrpc); |
ae1519ec MB |
1623 | } |
1624 | ||
1625 | static void rrpc_module_exit(void) | |
1626 | { | |
6063fe39 | 1627 | nvm_unregister_tgt_type(&tt_rrpc); |
ae1519ec MB |
1628 | } |
1629 | ||
1630 | module_init(rrpc_module_init); | |
1631 | module_exit(rrpc_module_exit); | |
1632 | MODULE_LICENSE("GPL v2"); | |
1633 | MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs"); |