2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
27 #include <linux/lightnvm.h>
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
38 struct rrpc_inflight
{
39 struct list_head reqs
;
43 struct rrpc_inflight_rq
{
44 struct list_head list
;
50 struct rrpc_inflight_rq inflight_rq
;
55 int id
; /* id inside of LUN */
56 struct rrpc_lun
*rlun
;
58 struct list_head prio
; /* LUN CG list */
59 struct list_head list
; /* LUN free, used, bb list */
61 #define MAX_INVALID_PAGES_STORAGE 8
62 /* Bitmap for invalid page intries */
63 unsigned long invalid_pages
[MAX_INVALID_PAGES_STORAGE
];
64 /* points to the next writable page within a block */
65 unsigned int next_page
;
66 /* number of pages that are invalid, wrt host page size */
67 unsigned int nr_invalid_pages
;
72 atomic_t data_cmnt_size
; /* data pages committed to stable storage */
81 struct rrpc_block
*cur
, *gc_cur
;
82 struct rrpc_block
*blocks
; /* Reference to block allocation */
84 struct list_head prio_list
; /* Blocks that may be GC'ed */
85 struct list_head wblk_list
; /* Queued blocks to be written to */
88 struct list_head used_list
; /* In-use blocks */
89 struct list_head free_list
; /* Not used blocks i.e. released
92 struct list_head bb_list
; /* Bad blocks. Mutually exclusive with
93 * free_list and used_list
95 unsigned int nr_free_blocks
; /* Number of unused blocks */
97 struct work_struct ws_gc
;
105 struct nvm_tgt_dev
*dev
;
106 struct gendisk
*disk
;
108 sector_t soffset
; /* logical sector offset */
111 struct rrpc_lun
*luns
;
113 /* calculated values */
114 unsigned long long nr_sects
;
116 /* Write strategy variables. Move these into each for structure for each
119 atomic_t next_lun
; /* Whenever a page is written, this is updated
120 * to point to the next write lun
124 struct bio_list requeue_bios
;
125 struct work_struct ws_requeue
;
127 /* Simple translation map of logical addresses to physical addresses.
128 * The logical addresses is known by the host system, while the physical
129 * addresses are used when writing to the disk block device.
131 struct rrpc_addr
*trans_map
;
132 /* also store a reverse map for garbage collection */
133 struct rrpc_rev_addr
*rev_trans_map
;
136 struct rrpc_inflight inflights
;
138 mempool_t
*addr_pool
;
139 mempool_t
*page_pool
;
143 struct timer_list gc_timer
;
144 struct workqueue_struct
*krqd_wq
;
145 struct workqueue_struct
*kgc_wq
;
148 struct rrpc_block_gc
{
150 struct rrpc_block
*rblk
;
151 struct work_struct ws_gc
;
154 /* Logical to physical mapping */
157 struct rrpc_block
*rblk
;
160 /* Physical to logical mapping */
161 struct rrpc_rev_addr
{
165 static inline struct ppa_addr
rrpc_linear_to_generic_addr(struct nvm_geo
*geo
,
170 sector_t ppa
= r
.ppa
;
174 div_u64_rem(ppa
, geo
->sec_per_pg
, &secs
);
177 sector_div(ppa
, geo
->sec_per_pg
);
178 div_u64_rem(ppa
, geo
->pgs_per_blk
, &pgs
);
184 static inline struct ppa_addr
rrpc_recov_addr(struct nvm_tgt_dev
*dev
, u64 pba
)
186 return linear_to_generic_addr(&dev
->geo
, pba
);
189 static inline u64
rrpc_blk_to_ppa(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
191 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
192 struct nvm_geo
*geo
= &dev
->geo
;
193 struct rrpc_lun
*rlun
= rblk
->rlun
;
195 return (rlun
->id
* geo
->sec_per_lun
) + (rblk
->id
* geo
->sec_per_blk
);
198 static inline sector_t
rrpc_get_laddr(struct bio
*bio
)
200 return bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
203 static inline unsigned int rrpc_get_pages(struct bio
*bio
)
205 return bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
208 static inline sector_t
rrpc_get_sector(sector_t laddr
)
210 return laddr
* NR_PHY_IN_LOG
;
213 static inline int request_intersects(struct rrpc_inflight_rq
*r
,
214 sector_t laddr_start
, sector_t laddr_end
)
216 return (laddr_end
>= r
->l_start
) && (laddr_start
<= r
->l_end
);
219 static int __rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
220 unsigned int pages
, struct rrpc_inflight_rq
*r
)
222 sector_t laddr_end
= laddr
+ pages
- 1;
223 struct rrpc_inflight_rq
*rtmp
;
225 WARN_ON(irqs_disabled());
227 spin_lock_irq(&rrpc
->inflights
.lock
);
228 list_for_each_entry(rtmp
, &rrpc
->inflights
.reqs
, list
) {
229 if (unlikely(request_intersects(rtmp
, laddr
, laddr_end
))) {
230 /* existing, overlapping request, come back later */
231 spin_unlock_irq(&rrpc
->inflights
.lock
);
237 r
->l_end
= laddr_end
;
239 list_add_tail(&r
->list
, &rrpc
->inflights
.reqs
);
240 spin_unlock_irq(&rrpc
->inflights
.lock
);
244 static inline int rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
246 struct rrpc_inflight_rq
*r
)
248 BUG_ON((laddr
+ pages
) > rrpc
->nr_sects
);
250 return __rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
253 static inline struct rrpc_inflight_rq
*rrpc_get_inflight_rq(struct nvm_rq
*rqd
)
255 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
257 return &rrqd
->inflight_rq
;
260 static inline int rrpc_lock_rq(struct rrpc
*rrpc
, struct bio
*bio
,
263 sector_t laddr
= rrpc_get_laddr(bio
);
264 unsigned int pages
= rrpc_get_pages(bio
);
265 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
267 return rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
270 static inline void rrpc_unlock_laddr(struct rrpc
*rrpc
,
271 struct rrpc_inflight_rq
*r
)
275 spin_lock_irqsave(&rrpc
->inflights
.lock
, flags
);
276 list_del_init(&r
->list
);
277 spin_unlock_irqrestore(&rrpc
->inflights
.lock
, flags
);
280 static inline void rrpc_unlock_rq(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
282 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
283 uint8_t pages
= rqd
->nr_ppas
;
285 BUG_ON((r
->l_start
+ pages
) > rrpc
->nr_sects
);
287 rrpc_unlock_laddr(rrpc
, r
);