]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/lightnvm/rrpc.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-zesty-kernel.git] / drivers / lightnvm / rrpc.h
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17 #ifndef RRPC_H_
18 #define RRPC_H_
19
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26
27 #include <linux/lightnvm.h>
28
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
32
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
35
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
37
38 struct rrpc_inflight {
39 struct list_head reqs;
40 spinlock_t lock;
41 };
42
43 struct rrpc_inflight_rq {
44 struct list_head list;
45 sector_t l_start;
46 sector_t l_end;
47 };
48
49 struct rrpc_rq {
50 struct rrpc_inflight_rq inflight_rq;
51 struct rrpc_addr *addr;
52 unsigned long flags;
53 };
54
55 struct rrpc_block {
56 struct nvm_block *parent;
57 struct rrpc_lun *rlun;
58 struct list_head prio;
59
60 #define MAX_INVALID_PAGES_STORAGE 8
61 /* Bitmap for invalid page intries */
62 unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
63 /* points to the next writable page within a block */
64 unsigned int next_page;
65 /* number of pages that are invalid, wrt host page size */
66 unsigned int nr_invalid_pages;
67
68 spinlock_t lock;
69 atomic_t data_cmnt_size; /* data pages committed to stable storage */
70 };
71
72 struct rrpc_lun {
73 struct rrpc *rrpc;
74 struct nvm_lun *parent;
75 struct rrpc_block *cur, *gc_cur;
76 struct rrpc_block *blocks; /* Reference to block allocation */
77
78 struct list_head prio_list; /* Blocks that may be GC'ed */
79 struct list_head wblk_list; /* Queued blocks to be written to */
80
81 struct work_struct ws_gc;
82
83 spinlock_t lock;
84 };
85
86 struct rrpc {
87 /* instance must be kept in top to resolve rrpc in unprep */
88 struct nvm_tgt_instance instance;
89
90 struct nvm_dev *dev;
91 struct gendisk *disk;
92
93 sector_t soffset; /* logical sector offset */
94 u64 poffset; /* physical page offset */
95 int lun_offset;
96
97 int nr_luns;
98 struct rrpc_lun *luns;
99
100 /* calculated values */
101 unsigned long long nr_sects;
102 unsigned long total_blocks;
103
104 /* Write strategy variables. Move these into each for structure for each
105 * strategy
106 */
107 atomic_t next_lun; /* Whenever a page is written, this is updated
108 * to point to the next write lun
109 */
110
111 spinlock_t bio_lock;
112 struct bio_list requeue_bios;
113 struct work_struct ws_requeue;
114
115 /* Simple translation map of logical addresses to physical addresses.
116 * The logical addresses is known by the host system, while the physical
117 * addresses are used when writing to the disk block device.
118 */
119 struct rrpc_addr *trans_map;
120 /* also store a reverse map for garbage collection */
121 struct rrpc_rev_addr *rev_trans_map;
122 spinlock_t rev_lock;
123
124 struct rrpc_inflight inflights;
125
126 mempool_t *addr_pool;
127 mempool_t *page_pool;
128 mempool_t *gcb_pool;
129 mempool_t *rq_pool;
130
131 struct timer_list gc_timer;
132 struct workqueue_struct *krqd_wq;
133 struct workqueue_struct *kgc_wq;
134 };
135
136 struct rrpc_block_gc {
137 struct rrpc *rrpc;
138 struct rrpc_block *rblk;
139 struct work_struct ws_gc;
140 };
141
142 /* Logical to physical mapping */
143 struct rrpc_addr {
144 u64 addr;
145 struct rrpc_block *rblk;
146 };
147
148 /* Physical to logical mapping */
149 struct rrpc_rev_addr {
150 u64 addr;
151 };
152
153 static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
154 int blk_id)
155 {
156 struct rrpc *rrpc = rlun->rrpc;
157 int lun_blk = blk_id % rrpc->dev->blks_per_lun;
158
159 return &rlun->blocks[lun_blk];
160 }
161
162 static inline sector_t rrpc_get_laddr(struct bio *bio)
163 {
164 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
165 }
166
167 static inline unsigned int rrpc_get_pages(struct bio *bio)
168 {
169 return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
170 }
171
172 static inline sector_t rrpc_get_sector(sector_t laddr)
173 {
174 return laddr * NR_PHY_IN_LOG;
175 }
176
177 static inline int request_intersects(struct rrpc_inflight_rq *r,
178 sector_t laddr_start, sector_t laddr_end)
179 {
180 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
181 }
182
183 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 unsigned int pages, struct rrpc_inflight_rq *r)
185 {
186 sector_t laddr_end = laddr + pages - 1;
187 struct rrpc_inflight_rq *rtmp;
188
189 WARN_ON(irqs_disabled());
190
191 spin_lock_irq(&rrpc->inflights.lock);
192 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
193 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
194 /* existing, overlapping request, come back later */
195 spin_unlock_irq(&rrpc->inflights.lock);
196 return 1;
197 }
198 }
199
200 r->l_start = laddr;
201 r->l_end = laddr_end;
202
203 list_add_tail(&r->list, &rrpc->inflights.reqs);
204 spin_unlock_irq(&rrpc->inflights.lock);
205 return 0;
206 }
207
208 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
209 unsigned int pages,
210 struct rrpc_inflight_rq *r)
211 {
212 BUG_ON((laddr + pages) > rrpc->nr_sects);
213
214 return __rrpc_lock_laddr(rrpc, laddr, pages, r);
215 }
216
217 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
218 {
219 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
220
221 return &rrqd->inflight_rq;
222 }
223
224 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
225 struct nvm_rq *rqd)
226 {
227 sector_t laddr = rrpc_get_laddr(bio);
228 unsigned int pages = rrpc_get_pages(bio);
229 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
230
231 return rrpc_lock_laddr(rrpc, laddr, pages, r);
232 }
233
234 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
235 struct rrpc_inflight_rq *r)
236 {
237 unsigned long flags;
238
239 spin_lock_irqsave(&rrpc->inflights.lock, flags);
240 list_del_init(&r->list);
241 spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
242 }
243
244 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
245 {
246 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
247 uint8_t pages = rqd->nr_ppas;
248
249 BUG_ON((r->l_start + pages) > rrpc->nr_sects);
250
251 rrpc_unlock_laddr(rrpc, r);
252 }
253
254 #endif /* RRPC_H_ */