]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/lightnvm/rrpc.h
usb: Don't disable Latency tolerance Messaging (LTM) before port reset
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / rrpc.h
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17 #ifndef RRPC_H_
18 #define RRPC_H_
19
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26
27 #include <linux/lightnvm.h>
28
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
32
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
35
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
37
38 struct rrpc_inflight {
39 struct list_head reqs;
40 spinlock_t lock;
41 };
42
43 struct rrpc_inflight_rq {
44 struct list_head list;
45 sector_t l_start;
46 sector_t l_end;
47 };
48
49 struct rrpc_rq {
50 struct rrpc_inflight_rq inflight_rq;
51 unsigned long flags;
52 };
53
54 struct rrpc_block {
55 int id; /* id inside of LUN */
56 struct rrpc_lun *rlun;
57
58 struct list_head prio; /* LUN CG list */
59 struct list_head list; /* LUN free, used, bb list */
60
61 #define MAX_INVALID_PAGES_STORAGE 8
62 /* Bitmap for invalid page intries */
63 unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
64 /* points to the next writable page within a block */
65 unsigned int next_page;
66 /* number of pages that are invalid, wrt host page size */
67 unsigned int nr_invalid_pages;
68
69 int state;
70
71 spinlock_t lock;
72 atomic_t data_cmnt_size; /* data pages committed to stable storage */
73 };
74
75 struct rrpc_lun {
76 struct rrpc *rrpc;
77
78 int id;
79 struct ppa_addr bppa;
80
81 struct rrpc_block *cur, *gc_cur;
82 struct rrpc_block *blocks; /* Reference to block allocation */
83
84 struct list_head prio_list; /* Blocks that may be GC'ed */
85 struct list_head wblk_list; /* Queued blocks to be written to */
86
87 /* lun block lists */
88 struct list_head used_list; /* In-use blocks */
89 struct list_head free_list; /* Not used blocks i.e. released
90 * and ready for use
91 */
92 struct list_head bb_list; /* Bad blocks. Mutually exclusive with
93 * free_list and used_list
94 */
95 unsigned int nr_free_blocks; /* Number of unused blocks */
96
97 struct work_struct ws_gc;
98
99 int reserved_blocks;
100
101 spinlock_t lock;
102 };
103
104 struct rrpc {
105 struct nvm_tgt_dev *dev;
106 struct gendisk *disk;
107
108 sector_t soffset; /* logical sector offset */
109
110 int nr_luns;
111 struct rrpc_lun *luns;
112
113 /* calculated values */
114 unsigned long long nr_sects;
115
116 /* Write strategy variables. Move these into each for structure for each
117 * strategy
118 */
119 atomic_t next_lun; /* Whenever a page is written, this is updated
120 * to point to the next write lun
121 */
122
123 spinlock_t bio_lock;
124 struct bio_list requeue_bios;
125 struct work_struct ws_requeue;
126
127 /* Simple translation map of logical addresses to physical addresses.
128 * The logical addresses is known by the host system, while the physical
129 * addresses are used when writing to the disk block device.
130 */
131 struct rrpc_addr *trans_map;
132 /* also store a reverse map for garbage collection */
133 struct rrpc_rev_addr *rev_trans_map;
134 spinlock_t rev_lock;
135
136 struct rrpc_inflight inflights;
137
138 mempool_t *addr_pool;
139 mempool_t *page_pool;
140 mempool_t *gcb_pool;
141 mempool_t *rq_pool;
142
143 struct timer_list gc_timer;
144 struct workqueue_struct *krqd_wq;
145 struct workqueue_struct *kgc_wq;
146 };
147
148 struct rrpc_block_gc {
149 struct rrpc *rrpc;
150 struct rrpc_block *rblk;
151 struct work_struct ws_gc;
152 };
153
154 /* Logical to physical mapping */
155 struct rrpc_addr {
156 u64 addr;
157 struct rrpc_block *rblk;
158 };
159
160 /* Physical to logical mapping */
161 struct rrpc_rev_addr {
162 u64 addr;
163 };
164
165 static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
166 struct ppa_addr r)
167 {
168 struct ppa_addr l;
169 int secs, pgs;
170 sector_t ppa = r.ppa;
171
172 l.ppa = 0;
173
174 div_u64_rem(ppa, geo->sec_per_pg, &secs);
175 l.g.sec = secs;
176
177 sector_div(ppa, geo->sec_per_pg);
178 div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
179 l.g.pg = pgs;
180
181 return l;
182 }
183
184 static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
185 {
186 return linear_to_generic_addr(&dev->geo, pba);
187 }
188
189 static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
190 {
191 struct nvm_tgt_dev *dev = rrpc->dev;
192 struct nvm_geo *geo = &dev->geo;
193 struct rrpc_lun *rlun = rblk->rlun;
194
195 return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
196 }
197
198 static inline sector_t rrpc_get_laddr(struct bio *bio)
199 {
200 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
201 }
202
203 static inline unsigned int rrpc_get_pages(struct bio *bio)
204 {
205 return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
206 }
207
208 static inline sector_t rrpc_get_sector(sector_t laddr)
209 {
210 return laddr * NR_PHY_IN_LOG;
211 }
212
213 static inline int request_intersects(struct rrpc_inflight_rq *r,
214 sector_t laddr_start, sector_t laddr_end)
215 {
216 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
217 }
218
219 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
220 unsigned int pages, struct rrpc_inflight_rq *r)
221 {
222 sector_t laddr_end = laddr + pages - 1;
223 struct rrpc_inflight_rq *rtmp;
224
225 WARN_ON(irqs_disabled());
226
227 spin_lock_irq(&rrpc->inflights.lock);
228 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
229 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
230 /* existing, overlapping request, come back later */
231 spin_unlock_irq(&rrpc->inflights.lock);
232 return 1;
233 }
234 }
235
236 r->l_start = laddr;
237 r->l_end = laddr_end;
238
239 list_add_tail(&r->list, &rrpc->inflights.reqs);
240 spin_unlock_irq(&rrpc->inflights.lock);
241 return 0;
242 }
243
244 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
245 unsigned int pages,
246 struct rrpc_inflight_rq *r)
247 {
248 BUG_ON((laddr + pages) > rrpc->nr_sects);
249
250 return __rrpc_lock_laddr(rrpc, laddr, pages, r);
251 }
252
253 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
254 {
255 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
256
257 return &rrqd->inflight_rq;
258 }
259
260 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
261 struct nvm_rq *rqd)
262 {
263 sector_t laddr = rrpc_get_laddr(bio);
264 unsigned int pages = rrpc_get_pages(bio);
265 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
266
267 return rrpc_lock_laddr(rrpc, laddr, pages, r);
268 }
269
270 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
271 struct rrpc_inflight_rq *r)
272 {
273 unsigned long flags;
274
275 spin_lock_irqsave(&rrpc->inflights.lock, flags);
276 list_del_init(&r->list);
277 spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
278 }
279
280 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
281 {
282 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
283 uint8_t pages = rqd->nr_ppas;
284
285 BUG_ON((r->l_start + pages) > rrpc->nr_sects);
286
287 rrpc_unlock_laddr(rrpc, r);
288 }
289
290 #endif /* RRPC_H_ */