2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
27 #include <linux/lightnvm.h>
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
38 struct rrpc_inflight
{
39 struct list_head reqs
;
43 struct rrpc_inflight_rq
{
44 struct list_head list
;
50 struct rrpc_inflight_rq inflight_rq
;
55 int id
; /* id inside of LUN */
56 struct rrpc_lun
*rlun
;
58 struct list_head prio
; /* LUN CG list */
59 struct list_head list
; /* LUN free, used, bb list */
61 #define MAX_INVALID_PAGES_STORAGE 8
62 /* Bitmap for invalid page intries */
63 unsigned long invalid_pages
[MAX_INVALID_PAGES_STORAGE
];
64 /* points to the next writable page within a block */
65 unsigned int next_page
;
66 /* number of pages that are invalid, wrt host page size */
67 unsigned int nr_invalid_pages
;
72 atomic_t data_cmnt_size
; /* data pages committed to stable storage */
81 struct rrpc_block
*cur
, *gc_cur
;
82 struct rrpc_block
*blocks
; /* Reference to block allocation */
84 struct list_head prio_list
; /* Blocks that may be GC'ed */
85 struct list_head wblk_list
; /* Queued blocks to be written to */
88 struct list_head used_list
; /* In-use blocks */
89 struct list_head free_list
; /* Not used blocks i.e. released
92 struct list_head bb_list
; /* Bad blocks. Mutually exclusive with
93 * free_list and used_list
95 unsigned int nr_free_blocks
; /* Number of unused blocks */
97 struct work_struct ws_gc
;
105 /* instance must be kept in top to resolve rrpc in unprep */
106 struct nvm_tgt_instance instance
;
108 struct nvm_tgt_dev
*dev
;
109 struct gendisk
*disk
;
111 sector_t soffset
; /* logical sector offset */
114 struct rrpc_lun
*luns
;
116 /* calculated values */
117 unsigned long long nr_sects
;
119 /* Write strategy variables. Move these into each for structure for each
122 atomic_t next_lun
; /* Whenever a page is written, this is updated
123 * to point to the next write lun
127 struct bio_list requeue_bios
;
128 struct work_struct ws_requeue
;
130 /* Simple translation map of logical addresses to physical addresses.
131 * The logical addresses is known by the host system, while the physical
132 * addresses are used when writing to the disk block device.
134 struct rrpc_addr
*trans_map
;
135 /* also store a reverse map for garbage collection */
136 struct rrpc_rev_addr
*rev_trans_map
;
139 struct rrpc_inflight inflights
;
141 mempool_t
*addr_pool
;
142 mempool_t
*page_pool
;
146 struct timer_list gc_timer
;
147 struct workqueue_struct
*krqd_wq
;
148 struct workqueue_struct
*kgc_wq
;
151 struct rrpc_block_gc
{
153 struct rrpc_block
*rblk
;
154 struct work_struct ws_gc
;
157 /* Logical to physical mapping */
160 struct rrpc_block
*rblk
;
163 /* Physical to logical mapping */
164 struct rrpc_rev_addr
{
168 static inline struct ppa_addr
rrpc_linear_to_generic_addr(struct nvm_geo
*geo
,
173 sector_t ppa
= r
.ppa
;
177 div_u64_rem(ppa
, geo
->sec_per_pg
, &secs
);
180 sector_div(ppa
, geo
->sec_per_pg
);
181 div_u64_rem(ppa
, geo
->pgs_per_blk
, &pgs
);
187 static inline struct ppa_addr
rrpc_recov_addr(struct nvm_tgt_dev
*dev
, u64 pba
)
189 return linear_to_generic_addr(&dev
->geo
, pba
);
192 static inline u64
rrpc_blk_to_ppa(struct rrpc
*rrpc
, struct rrpc_block
*rblk
)
194 struct nvm_tgt_dev
*dev
= rrpc
->dev
;
195 struct nvm_geo
*geo
= &dev
->geo
;
196 struct rrpc_lun
*rlun
= rblk
->rlun
;
198 return (rlun
->id
* geo
->sec_per_lun
) + (rblk
->id
* geo
->sec_per_blk
);
201 static inline sector_t
rrpc_get_laddr(struct bio
*bio
)
203 return bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
206 static inline unsigned int rrpc_get_pages(struct bio
*bio
)
208 return bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
211 static inline sector_t
rrpc_get_sector(sector_t laddr
)
213 return laddr
* NR_PHY_IN_LOG
;
216 static inline int request_intersects(struct rrpc_inflight_rq
*r
,
217 sector_t laddr_start
, sector_t laddr_end
)
219 return (laddr_end
>= r
->l_start
) && (laddr_start
<= r
->l_end
);
222 static int __rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
223 unsigned int pages
, struct rrpc_inflight_rq
*r
)
225 sector_t laddr_end
= laddr
+ pages
- 1;
226 struct rrpc_inflight_rq
*rtmp
;
228 WARN_ON(irqs_disabled());
230 spin_lock_irq(&rrpc
->inflights
.lock
);
231 list_for_each_entry(rtmp
, &rrpc
->inflights
.reqs
, list
) {
232 if (unlikely(request_intersects(rtmp
, laddr
, laddr_end
))) {
233 /* existing, overlapping request, come back later */
234 spin_unlock_irq(&rrpc
->inflights
.lock
);
240 r
->l_end
= laddr_end
;
242 list_add_tail(&r
->list
, &rrpc
->inflights
.reqs
);
243 spin_unlock_irq(&rrpc
->inflights
.lock
);
247 static inline int rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
249 struct rrpc_inflight_rq
*r
)
251 BUG_ON((laddr
+ pages
) > rrpc
->nr_sects
);
253 return __rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
256 static inline struct rrpc_inflight_rq
*rrpc_get_inflight_rq(struct nvm_rq
*rqd
)
258 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
260 return &rrqd
->inflight_rq
;
263 static inline int rrpc_lock_rq(struct rrpc
*rrpc
, struct bio
*bio
,
266 sector_t laddr
= rrpc_get_laddr(bio
);
267 unsigned int pages
= rrpc_get_pages(bio
);
268 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
270 return rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
273 static inline void rrpc_unlock_laddr(struct rrpc
*rrpc
,
274 struct rrpc_inflight_rq
*r
)
278 spin_lock_irqsave(&rrpc
->inflights
.lock
, flags
);
279 list_del_init(&r
->list
);
280 spin_unlock_irqrestore(&rrpc
->inflights
.lock
, flags
);
283 static inline void rrpc_unlock_rq(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
285 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
286 uint8_t pages
= rqd
->nr_ppas
;
288 BUG_ON((r
->l_start
+ pages
) > rrpc
->nr_sects
);
290 rrpc_unlock_laddr(rrpc
, r
);