]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) Intel Corporation. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #ifndef FTL_CORE_H | |
35 | #define FTL_CORE_H | |
36 | ||
37 | #include "spdk/stdinc.h" | |
38 | #include "spdk/nvme.h" | |
39 | #include "spdk/nvme_ocssd.h" | |
40 | #include "spdk/uuid.h" | |
41 | #include "spdk/thread.h" | |
42 | #include "spdk/util.h" | |
43 | #include "spdk_internal/log.h" | |
44 | #include "spdk/queue.h" | |
45 | #include "spdk/ftl.h" | |
46 | #include "spdk/bdev.h" | |
47 | ||
48 | #include "ftl_ppa.h" | |
49 | #include "ftl_io.h" | |
50 | #include "ftl_trace.h" | |
51 | ||
52 | struct spdk_ftl_dev; | |
53 | struct ftl_band; | |
54 | struct ftl_chunk; | |
55 | struct ftl_io; | |
56 | struct ftl_restore; | |
57 | struct ftl_wptr; | |
58 | struct ftl_flush; | |
59 | struct ftl_reloc; | |
60 | struct ftl_anm_event; | |
61 | ||
62 | struct ftl_stats { | |
63 | /* Number of writes scheduled directly by the user */ | |
64 | uint64_t write_user; | |
65 | ||
66 | /* Total number of writes */ | |
67 | uint64_t write_total; | |
68 | ||
69 | /* Traces */ | |
70 | struct ftl_trace trace; | |
71 | ||
72 | /* Number of limits applied */ | |
73 | uint64_t limits[SPDK_FTL_LIMIT_MAX]; | |
74 | }; | |
75 | ||
76 | struct ftl_punit { | |
77 | struct spdk_ftl_dev *dev; | |
78 | ||
79 | struct ftl_ppa start_ppa; | |
80 | }; | |
81 | ||
82 | struct ftl_thread { | |
83 | /* Owner */ | |
84 | struct spdk_ftl_dev *dev; | |
85 | /* I/O queue pair */ | |
86 | struct spdk_nvme_qpair *qpair; | |
87 | ||
88 | /* Thread on which the poller is running */ | |
89 | struct spdk_thread *thread; | |
90 | ||
91 | /* Poller */ | |
92 | struct spdk_poller *poller; | |
93 | /* Poller's function */ | |
94 | spdk_poller_fn poller_fn; | |
95 | /* Poller's frequency */ | |
96 | uint64_t period_us; | |
97 | }; | |
98 | ||
99 | struct ftl_global_md { | |
100 | /* Device instance */ | |
101 | struct spdk_uuid uuid; | |
102 | /* Size of the l2p table */ | |
103 | uint64_t num_lbas; | |
104 | }; | |
105 | ||
106 | struct ftl_nv_cache { | |
107 | /* Write buffer cache bdev */ | |
108 | struct spdk_bdev_desc *bdev_desc; | |
109 | /* Write pointer */ | |
110 | uint64_t current_addr; | |
111 | /* Number of available blocks left */ | |
112 | uint64_t num_available; | |
113 | /* Cache lock */ | |
114 | pthread_spinlock_t lock; | |
115 | }; | |
116 | ||
117 | struct spdk_ftl_dev { | |
118 | /* Device instance */ | |
119 | struct spdk_uuid uuid; | |
120 | /* Device name */ | |
121 | char *name; | |
122 | /* Configuration */ | |
123 | struct spdk_ftl_conf conf; | |
124 | ||
125 | /* Indicates the device is fully initialized */ | |
126 | int initialized; | |
127 | /* Indicates the device is about to be stopped */ | |
128 | int halt; | |
129 | ||
130 | /* Init callback */ | |
131 | spdk_ftl_init_fn init_cb; | |
132 | /* Init callback's context */ | |
133 | void *init_arg; | |
134 | ||
135 | /* Halt callback */ | |
136 | spdk_ftl_fn halt_cb; | |
137 | /* Halt callback's context */ | |
138 | void *halt_arg; | |
139 | /* Halt poller, checks if the device has been halted */ | |
140 | struct spdk_poller *halt_poller; | |
141 | ||
142 | /* IO channel */ | |
143 | struct spdk_io_channel *ioch; | |
144 | ||
145 | /* NVMe controller */ | |
146 | struct spdk_nvme_ctrlr *ctrlr; | |
147 | /* NVMe namespace */ | |
148 | struct spdk_nvme_ns *ns; | |
149 | /* NVMe transport ID */ | |
150 | struct spdk_nvme_transport_id trid; | |
151 | ||
152 | /* Non-volatile write buffer cache */ | |
153 | struct ftl_nv_cache nv_cache; | |
154 | ||
155 | /* LBA map memory pool */ | |
156 | struct spdk_mempool *lba_pool; | |
157 | ||
158 | /* LBA map requests pool */ | |
159 | struct spdk_mempool *lba_request_pool; | |
160 | ||
161 | /* Statistics */ | |
162 | struct ftl_stats stats; | |
163 | ||
164 | /* Parallel unit range */ | |
165 | struct spdk_ftl_punit_range range; | |
166 | /* Array of parallel units */ | |
167 | struct ftl_punit *punits; | |
168 | ||
169 | /* Current sequence number */ | |
170 | uint64_t seq; | |
171 | ||
172 | /* Array of bands */ | |
173 | struct ftl_band *bands; | |
174 | /* Band being curently defraged */ | |
175 | struct ftl_band *df_band; | |
176 | /* Number of operational bands */ | |
177 | size_t num_bands; | |
178 | /* Next write band */ | |
179 | struct ftl_band *next_band; | |
180 | /* Free band list */ | |
181 | LIST_HEAD(, ftl_band) free_bands; | |
182 | /* Closed bands list */ | |
183 | LIST_HEAD(, ftl_band) shut_bands; | |
184 | /* Number of free bands */ | |
185 | size_t num_free; | |
186 | ||
187 | /* List of write pointers */ | |
188 | LIST_HEAD(, ftl_wptr) wptr_list; | |
189 | ||
190 | /* Logical -> physical table */ | |
191 | void *l2p; | |
192 | /* Size of the l2p table */ | |
193 | uint64_t num_lbas; | |
194 | ||
195 | /* PPA format */ | |
196 | struct ftl_ppa_fmt ppaf; | |
197 | /* PPA address size */ | |
198 | size_t ppa_len; | |
199 | /* Device's geometry */ | |
200 | struct spdk_ocssd_geometry_data geo; | |
201 | ||
202 | /* Flush list */ | |
203 | LIST_HEAD(, ftl_flush) flush_list; | |
204 | ||
205 | /* Device specific md buffer */ | |
206 | struct ftl_global_md global_md; | |
207 | ||
208 | /* Metadata size */ | |
209 | size_t md_size; | |
210 | ||
211 | /* Transfer unit size */ | |
212 | size_t xfer_size; | |
213 | /* Ring write buffer */ | |
214 | struct ftl_rwb *rwb; | |
215 | ||
216 | /* Current user write limit */ | |
217 | int limit; | |
218 | ||
219 | /* Inflight IO operations */ | |
220 | uint32_t num_inflight; | |
221 | /* Queue of IO awaiting retry */ | |
222 | TAILQ_HEAD(, ftl_io) retry_queue; | |
223 | ||
224 | /* Manages data relocation */ | |
225 | struct ftl_reloc *reloc; | |
226 | ||
227 | /* Threads */ | |
228 | struct ftl_thread core_thread; | |
229 | struct ftl_thread read_thread; | |
230 | ||
231 | /* Devices' list */ | |
232 | STAILQ_ENTRY(spdk_ftl_dev) stailq; | |
233 | }; | |
234 | ||
235 | typedef void (*ftl_restore_fn)(struct spdk_ftl_dev *, struct ftl_restore *, int); | |
236 | ||
237 | void ftl_apply_limits(struct spdk_ftl_dev *dev); | |
238 | void ftl_io_read(struct ftl_io *io); | |
239 | void ftl_io_write(struct ftl_io *io); | |
240 | int ftl_io_erase(struct ftl_io *io); | |
241 | int ftl_io_flush(struct ftl_io *io); | |
242 | int ftl_current_limit(const struct spdk_ftl_dev *dev); | |
243 | int ftl_invalidate_addr(struct spdk_ftl_dev *dev, struct ftl_ppa ppa); | |
244 | int ftl_task_core(void *ctx); | |
245 | int ftl_task_read(void *ctx); | |
246 | void ftl_process_anm_event(struct ftl_anm_event *event); | |
247 | size_t ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev); | |
248 | size_t ftl_tail_md_hdr_num_lbks(void); | |
249 | size_t ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev); | |
250 | size_t ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev); | |
251 | size_t ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev); | |
252 | int ftl_restore_md(struct spdk_ftl_dev *dev, ftl_restore_fn cb); | |
253 | int ftl_restore_device(struct ftl_restore *restore, ftl_restore_fn cb); | |
254 | int ftl_band_set_direct_access(struct ftl_band *band, bool access); | |
255 | int ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa, | |
256 | struct spdk_ocssd_chunk_information_entry *info, | |
257 | unsigned int num_entries); | |
258 | ||
259 | #define ftl_to_ppa(addr) \ | |
260 | (struct ftl_ppa) { .ppa = (uint64_t)(addr) } | |
261 | ||
262 | #define ftl_to_ppa_packed(addr) \ | |
263 | (struct ftl_ppa) { .pack.ppa = (uint32_t)(addr) } | |
264 | ||
265 | static inline struct spdk_thread * | |
266 | ftl_get_core_thread(const struct spdk_ftl_dev *dev) | |
267 | { | |
268 | return dev->core_thread.thread; | |
269 | } | |
270 | ||
271 | static inline struct spdk_nvme_qpair * | |
272 | ftl_get_write_qpair(const struct spdk_ftl_dev *dev) | |
273 | { | |
274 | return dev->core_thread.qpair; | |
275 | } | |
276 | ||
277 | static inline struct spdk_thread * | |
278 | ftl_get_read_thread(const struct spdk_ftl_dev *dev) | |
279 | { | |
280 | return dev->read_thread.thread; | |
281 | } | |
282 | ||
283 | static inline struct spdk_nvme_qpair * | |
284 | ftl_get_read_qpair(const struct spdk_ftl_dev *dev) | |
285 | { | |
286 | return dev->read_thread.qpair; | |
287 | } | |
288 | ||
289 | static inline int | |
290 | ftl_ppa_packed(const struct spdk_ftl_dev *dev) | |
291 | { | |
292 | return dev->ppa_len < 32; | |
293 | } | |
294 | ||
295 | static inline int | |
296 | ftl_ppa_invalid(struct ftl_ppa ppa) | |
297 | { | |
298 | return ppa.ppa == ftl_to_ppa(FTL_PPA_INVALID).ppa; | |
299 | } | |
300 | ||
301 | static inline int | |
302 | ftl_ppa_cached(struct ftl_ppa ppa) | |
303 | { | |
304 | return !ftl_ppa_invalid(ppa) && ppa.cached; | |
305 | } | |
306 | ||
307 | static inline uint64_t | |
308 | ftl_ppa_addr_pack(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) | |
309 | { | |
310 | return (ppa.lbk << dev->ppaf.lbk_offset) | | |
311 | (ppa.chk << dev->ppaf.chk_offset) | | |
312 | (ppa.pu << dev->ppaf.pu_offset) | | |
313 | (ppa.grp << dev->ppaf.grp_offset); | |
314 | } | |
315 | ||
316 | static inline struct ftl_ppa | |
317 | ftl_ppa_addr_unpack(const struct spdk_ftl_dev *dev, uint64_t ppa) | |
318 | { | |
319 | struct ftl_ppa res = {}; | |
320 | ||
321 | res.lbk = (ppa >> dev->ppaf.lbk_offset) & dev->ppaf.lbk_mask; | |
322 | res.chk = (ppa >> dev->ppaf.chk_offset) & dev->ppaf.chk_mask; | |
323 | res.pu = (ppa >> dev->ppaf.pu_offset) & dev->ppaf.pu_mask; | |
324 | res.grp = (ppa >> dev->ppaf.grp_offset) & dev->ppaf.grp_mask; | |
325 | ||
326 | return res; | |
327 | } | |
328 | ||
329 | static inline struct ftl_ppa | |
330 | ftl_ppa_to_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) | |
331 | { | |
332 | struct ftl_ppa p = {}; | |
333 | ||
334 | if (ftl_ppa_invalid(ppa)) { | |
335 | p = ftl_to_ppa_packed(FTL_PPA_INVALID); | |
336 | } else if (ftl_ppa_cached(ppa)) { | |
337 | p.pack.cached = 1; | |
338 | p.pack.offset = (uint32_t) ppa.offset; | |
339 | } else { | |
340 | p.pack.ppa = (uint32_t) ftl_ppa_addr_pack(dev, ppa); | |
341 | } | |
342 | ||
343 | return p; | |
344 | } | |
345 | ||
346 | static inline struct ftl_ppa | |
347 | ftl_ppa_from_packed(const struct spdk_ftl_dev *dev, struct ftl_ppa p) | |
348 | { | |
349 | struct ftl_ppa ppa = {}; | |
350 | ||
351 | if (p.pack.ppa == (uint32_t)FTL_PPA_INVALID) { | |
352 | ppa = ftl_to_ppa(FTL_PPA_INVALID); | |
353 | } else if (p.pack.cached) { | |
354 | ppa.cached = 1; | |
355 | ppa.offset = p.pack.offset; | |
356 | } else { | |
357 | ppa = ftl_ppa_addr_unpack(dev, p.pack.ppa); | |
358 | } | |
359 | ||
360 | return ppa; | |
361 | } | |
362 | ||
363 | static inline unsigned int | |
364 | ftl_ppa_flatten_punit(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) | |
365 | { | |
366 | return ppa.pu * dev->geo.num_grp + ppa.grp - dev->range.begin; | |
367 | } | |
368 | ||
369 | static inline int | |
370 | ftl_ppa_in_range(const struct spdk_ftl_dev *dev, struct ftl_ppa ppa) | |
371 | { | |
372 | unsigned int punit = ftl_ppa_flatten_punit(dev, ppa) + dev->range.begin; | |
373 | ||
374 | if (punit >= dev->range.begin && punit <= dev->range.end) { | |
375 | return 1; | |
376 | } | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
381 | #define _ftl_l2p_set(l2p, off, val, bits) \ | |
382 | __atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST) | |
383 | ||
384 | #define _ftl_l2p_set32(l2p, off, val) \ | |
385 | _ftl_l2p_set(l2p, off, val, 32) | |
386 | ||
387 | #define _ftl_l2p_set64(l2p, off, val) \ | |
388 | _ftl_l2p_set(l2p, off, val, 64) | |
389 | ||
390 | #define _ftl_l2p_get(l2p, off, bits) \ | |
391 | __atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST) | |
392 | ||
393 | #define _ftl_l2p_get32(l2p, off) \ | |
394 | _ftl_l2p_get(l2p, off, 32) | |
395 | ||
396 | #define _ftl_l2p_get64(l2p, off) \ | |
397 | _ftl_l2p_get(l2p, off, 64) | |
398 | ||
399 | #define ftl_ppa_cmp(p1, p2) \ | |
400 | ((p1).ppa == (p2).ppa) | |
401 | ||
402 | static inline void | |
403 | ftl_l2p_set(struct spdk_ftl_dev *dev, uint64_t lba, struct ftl_ppa ppa) | |
404 | { | |
405 | assert(dev->num_lbas > lba); | |
406 | ||
407 | if (ftl_ppa_packed(dev)) { | |
408 | _ftl_l2p_set32(dev->l2p, lba, ftl_ppa_to_packed(dev, ppa).ppa); | |
409 | } else { | |
410 | _ftl_l2p_set64(dev->l2p, lba, ppa.ppa); | |
411 | } | |
412 | } | |
413 | ||
414 | static inline struct ftl_ppa | |
415 | ftl_l2p_get(struct spdk_ftl_dev *dev, uint64_t lba) | |
416 | { | |
417 | assert(dev->num_lbas > lba); | |
418 | ||
419 | if (ftl_ppa_packed(dev)) { | |
420 | return ftl_ppa_from_packed(dev, ftl_to_ppa_packed( | |
421 | _ftl_l2p_get32(dev->l2p, lba))); | |
422 | } else { | |
423 | return ftl_to_ppa(_ftl_l2p_get64(dev->l2p, lba)); | |
424 | } | |
425 | } | |
426 | static inline size_t | |
427 | ftl_dev_num_bands(const struct spdk_ftl_dev *dev) | |
428 | { | |
429 | return dev->geo.num_chk; | |
430 | } | |
431 | ||
432 | static inline size_t | |
433 | ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev *dev) | |
434 | { | |
435 | return dev->geo.clba; | |
436 | } | |
437 | ||
438 | static inline size_t | |
439 | ftl_dev_num_punits(const struct spdk_ftl_dev *dev) | |
440 | { | |
441 | return dev->range.end - dev->range.begin + 1; | |
442 | } | |
443 | ||
444 | static inline uint64_t | |
445 | ftl_num_band_lbks(const struct spdk_ftl_dev *dev) | |
446 | { | |
447 | return ftl_dev_num_punits(dev) * ftl_dev_lbks_in_chunk(dev); | |
448 | } | |
449 | ||
450 | static inline size_t | |
451 | ftl_vld_map_size(const struct spdk_ftl_dev *dev) | |
452 | { | |
453 | return (size_t)spdk_divide_round_up(ftl_num_band_lbks(dev), CHAR_BIT); | |
454 | } | |
455 | ||
456 | #endif /* FTL_CORE_H */ |