]>
Commit | Line | Data |
---|---|---|
ae1519ec MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen | |
3 | * Initial release: Matias Bjorling <m@bjorling.me> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. | |
15 | */ | |
16 | ||
17 | #ifndef RRPC_H_ | |
18 | #define RRPC_H_ | |
19 | ||
20 | #include <linux/blkdev.h> | |
21 | #include <linux/blk-mq.h> | |
22 | #include <linux/bio.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/vmalloc.h> | |
26 | ||
27 | #include <linux/lightnvm.h> | |
28 | ||
29 | /* Run only GC if less than 1/X blocks are free */ | |
30 | #define GC_LIMIT_INVERSE 10 | |
31 | #define GC_TIME_SECS 100 | |
32 | ||
33 | #define RRPC_SECTOR (512) | |
34 | #define RRPC_EXPOSED_PAGE_SIZE (4096) | |
35 | ||
36 | #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR) | |
37 | ||
38 | struct rrpc_inflight { | |
39 | struct list_head reqs; | |
40 | spinlock_t lock; | |
41 | }; | |
42 | ||
43 | struct rrpc_inflight_rq { | |
44 | struct list_head list; | |
45 | sector_t l_start; | |
46 | sector_t l_end; | |
47 | }; | |
48 | ||
49 | struct rrpc_rq { | |
50 | struct rrpc_inflight_rq inflight_rq; | |
ae1519ec MB |
51 | unsigned long flags; |
52 | }; | |
53 | ||
54 | struct rrpc_block { | |
8e79b5cb | 55 | unsigned long id; |
ae1519ec | 56 | struct nvm_block *parent; |
d7a64d27 | 57 | struct rrpc_lun *rlun; |
8e79b5cb JG |
58 | |
59 | struct list_head prio; /* LUN CG list */ | |
60 | struct list_head list; /* LUN free, used, bb list */ | |
ae1519ec MB |
61 | |
62 | #define MAX_INVALID_PAGES_STORAGE 8 | |
63 | /* Bitmap for invalid page intries */ | |
64 | unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE]; | |
65 | /* points to the next writable page within a block */ | |
66 | unsigned int next_page; | |
67 | /* number of pages that are invalid, wrt host page size */ | |
68 | unsigned int nr_invalid_pages; | |
69 | ||
8e79b5cb JG |
70 | int state; |
71 | ||
ae1519ec MB |
72 | spinlock_t lock; |
73 | atomic_t data_cmnt_size; /* data pages committed to stable storage */ | |
74 | }; | |
75 | ||
76 | struct rrpc_lun { | |
77 | struct rrpc *rrpc; | |
78 | struct nvm_lun *parent; | |
8e79b5cb | 79 | |
ae1519ec MB |
80 | struct rrpc_block *cur, *gc_cur; |
81 | struct rrpc_block *blocks; /* Reference to block allocation */ | |
ff0e498b JG |
82 | |
83 | struct list_head prio_list; /* Blocks that may be GC'ed */ | |
855cdd2c | 84 | struct list_head wblk_list; /* Queued blocks to be written to */ |
ff0e498b | 85 | |
ae1519ec MB |
86 | struct work_struct ws_gc; |
87 | ||
8e79b5cb JG |
88 | int reserved_blocks; |
89 | ||
ae1519ec MB |
90 | spinlock_t lock; |
91 | }; | |
92 | ||
93 | struct rrpc { | |
94 | /* instance must be kept in top to resolve rrpc in unprep */ | |
95 | struct nvm_tgt_instance instance; | |
96 | ||
8e79b5cb | 97 | struct nvm_tgt_dev *dev; |
ae1519ec MB |
98 | struct gendisk *disk; |
99 | ||
4c9dacb8 | 100 | sector_t soffset; /* logical sector offset */ |
b7ceb7d5 | 101 | u64 poffset; /* physical page offset */ |
ae1519ec MB |
102 | |
103 | int nr_luns; | |
104 | struct rrpc_lun *luns; | |
105 | ||
106 | /* calculated values */ | |
4ece44af | 107 | unsigned long long nr_sects; |
ae1519ec MB |
108 | |
109 | /* Write strategy variables. Move these into each for structure for each | |
110 | * strategy | |
111 | */ | |
112 | atomic_t next_lun; /* Whenever a page is written, this is updated | |
113 | * to point to the next write lun | |
114 | */ | |
115 | ||
116 | spinlock_t bio_lock; | |
117 | struct bio_list requeue_bios; | |
118 | struct work_struct ws_requeue; | |
119 | ||
120 | /* Simple translation map of logical addresses to physical addresses. | |
121 | * The logical addresses is known by the host system, while the physical | |
122 | * addresses are used when writing to the disk block device. | |
123 | */ | |
124 | struct rrpc_addr *trans_map; | |
125 | /* also store a reverse map for garbage collection */ | |
126 | struct rrpc_rev_addr *rev_trans_map; | |
127 | spinlock_t rev_lock; | |
128 | ||
129 | struct rrpc_inflight inflights; | |
130 | ||
131 | mempool_t *addr_pool; | |
132 | mempool_t *page_pool; | |
133 | mempool_t *gcb_pool; | |
134 | mempool_t *rq_pool; | |
135 | ||
136 | struct timer_list gc_timer; | |
137 | struct workqueue_struct *krqd_wq; | |
138 | struct workqueue_struct *kgc_wq; | |
139 | }; | |
140 | ||
141 | struct rrpc_block_gc { | |
142 | struct rrpc *rrpc; | |
143 | struct rrpc_block *rblk; | |
144 | struct work_struct ws_gc; | |
145 | }; | |
146 | ||
147 | /* Logical to physical mapping */ | |
148 | struct rrpc_addr { | |
b7ceb7d5 | 149 | u64 addr; |
ae1519ec MB |
150 | struct rrpc_block *rblk; |
151 | }; | |
152 | ||
153 | /* Physical to logical mapping */ | |
154 | struct rrpc_rev_addr { | |
b7ceb7d5 | 155 | u64 addr; |
ae1519ec MB |
156 | }; |
157 | ||
afb18e0e JG |
158 | static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun, |
159 | int blk_id) | |
160 | { | |
161 | struct rrpc *rrpc = rlun->rrpc; | |
8e79b5cb JG |
162 | struct nvm_tgt_dev *dev = rrpc->dev; |
163 | int lun_blk = blk_id % dev->geo.blks_per_lun; | |
afb18e0e JG |
164 | |
165 | return &rlun->blocks[lun_blk]; | |
166 | } | |
167 | ||
ae1519ec MB |
168 | static inline sector_t rrpc_get_laddr(struct bio *bio) |
169 | { | |
170 | return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; | |
171 | } | |
172 | ||
173 | static inline unsigned int rrpc_get_pages(struct bio *bio) | |
174 | { | |
175 | return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; | |
176 | } | |
177 | ||
178 | static inline sector_t rrpc_get_sector(sector_t laddr) | |
179 | { | |
180 | return laddr * NR_PHY_IN_LOG; | |
181 | } | |
182 | ||
183 | static inline int request_intersects(struct rrpc_inflight_rq *r, | |
184 | sector_t laddr_start, sector_t laddr_end) | |
185 | { | |
3704e098 | 186 | return (laddr_end >= r->l_start) && (laddr_start <= r->l_end); |
ae1519ec MB |
187 | } |
188 | ||
189 | static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, | |
5114e277 | 190 | unsigned int pages, struct rrpc_inflight_rq *r) |
ae1519ec MB |
191 | { |
192 | sector_t laddr_end = laddr + pages - 1; | |
193 | struct rrpc_inflight_rq *rtmp; | |
194 | ||
bba7f40a JG |
195 | WARN_ON(irqs_disabled()); |
196 | ||
ae1519ec MB |
197 | spin_lock_irq(&rrpc->inflights.lock); |
198 | list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { | |
199 | if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { | |
200 | /* existing, overlapping request, come back later */ | |
201 | spin_unlock_irq(&rrpc->inflights.lock); | |
202 | return 1; | |
203 | } | |
204 | } | |
205 | ||
206 | r->l_start = laddr; | |
207 | r->l_end = laddr_end; | |
208 | ||
209 | list_add_tail(&r->list, &rrpc->inflights.reqs); | |
210 | spin_unlock_irq(&rrpc->inflights.lock); | |
211 | return 0; | |
212 | } | |
213 | ||
214 | static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, | |
5114e277 | 215 | unsigned int pages, |
ae1519ec MB |
216 | struct rrpc_inflight_rq *r) |
217 | { | |
4ece44af | 218 | BUG_ON((laddr + pages) > rrpc->nr_sects); |
ae1519ec MB |
219 | |
220 | return __rrpc_lock_laddr(rrpc, laddr, pages, r); | |
221 | } | |
222 | ||
223 | static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd) | |
224 | { | |
225 | struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); | |
226 | ||
227 | return &rrqd->inflight_rq; | |
228 | } | |
229 | ||
230 | static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio, | |
231 | struct nvm_rq *rqd) | |
232 | { | |
233 | sector_t laddr = rrpc_get_laddr(bio); | |
234 | unsigned int pages = rrpc_get_pages(bio); | |
235 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
236 | ||
237 | return rrpc_lock_laddr(rrpc, laddr, pages, r); | |
238 | } | |
239 | ||
240 | static inline void rrpc_unlock_laddr(struct rrpc *rrpc, | |
241 | struct rrpc_inflight_rq *r) | |
242 | { | |
243 | unsigned long flags; | |
244 | ||
245 | spin_lock_irqsave(&rrpc->inflights.lock, flags); | |
246 | list_del_init(&r->list); | |
247 | spin_unlock_irqrestore(&rrpc->inflights.lock, flags); | |
248 | } | |
249 | ||
250 | static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd) | |
251 | { | |
252 | struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); | |
6d5be959 | 253 | uint8_t pages = rqd->nr_ppas; |
ae1519ec | 254 | |
4ece44af | 255 | BUG_ON((r->l_start + pages) > rrpc->nr_sects); |
ae1519ec MB |
256 | |
257 | rrpc_unlock_laddr(rrpc, r); | |
258 | } | |
259 | ||
260 | #endif /* RRPC_H_ */ |