4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "spdk/stdinc.h"
38 #include "spdk/nvme.h"
39 #include "spdk/nvme_ocssd.h"
40 #include "spdk/uuid.h"
41 #include "spdk/thread.h"
42 #include "spdk/util.h"
43 #include "spdk_internal/log.h"
44 #include "spdk/queue.h"
46 #include "spdk/bdev.h"
50 #include "ftl_trace.h"
63 /* Number of writes scheduled directly by the user */
66 /* Total number of writes */
70 struct ftl_trace trace
;
72 /* Number of limits applied */
73 uint64_t limits
[SPDK_FTL_LIMIT_MAX
];
77 struct spdk_ftl_dev
*dev
;
79 struct ftl_ppa start_ppa
;
84 struct spdk_ftl_dev
*dev
;
86 struct spdk_nvme_qpair
*qpair
;
88 /* Thread on which the poller is running */
89 struct spdk_thread
*thread
;
92 struct spdk_poller
*poller
;
93 /* Poller's function */
94 spdk_poller_fn poller_fn
;
95 /* Poller's frequency */
99 struct ftl_global_md
{
100 /* Device instance */
101 struct spdk_uuid uuid
;
102 /* Size of the l2p table */
106 struct ftl_nv_cache
{
107 /* Write buffer cache bdev */
108 struct spdk_bdev_desc
*bdev_desc
;
110 uint64_t current_addr
;
111 /* Number of available blocks left */
112 uint64_t num_available
;
114 pthread_spinlock_t lock
;
117 struct spdk_ftl_dev
{
118 /* Device instance */
119 struct spdk_uuid uuid
;
123 struct spdk_ftl_conf conf
;
125 /* Indicates the device is fully initialized */
127 /* Indicates the device is about to be stopped */
131 spdk_ftl_init_fn init_cb
;
132 /* Init callback's context */
137 /* Halt callback's context */
139 /* Halt poller, checks if the device has been halted */
140 struct spdk_poller
*halt_poller
;
143 struct spdk_io_channel
*ioch
;
145 /* NVMe controller */
146 struct spdk_nvme_ctrlr
*ctrlr
;
148 struct spdk_nvme_ns
*ns
;
149 /* NVMe transport ID */
150 struct spdk_nvme_transport_id trid
;
152 /* Non-volatile write buffer cache */
153 struct ftl_nv_cache nv_cache
;
155 /* LBA map memory pool */
156 struct spdk_mempool
*lba_pool
;
158 /* LBA map requests pool */
159 struct spdk_mempool
*lba_request_pool
;
162 struct ftl_stats stats
;
164 /* Parallel unit range */
165 struct spdk_ftl_punit_range range
;
166 /* Array of parallel units */
167 struct ftl_punit
*punits
;
169 /* Current sequence number */
173 struct ftl_band
*bands
;
174 /* Band being curently defraged */
175 struct ftl_band
*df_band
;
176 /* Number of operational bands */
178 /* Next write band */
179 struct ftl_band
*next_band
;
181 LIST_HEAD(, ftl_band
) free_bands
;
182 /* Closed bands list */
183 LIST_HEAD(, ftl_band
) shut_bands
;
184 /* Number of free bands */
187 /* List of write pointers */
188 LIST_HEAD(, ftl_wptr
) wptr_list
;
190 /* Logical -> physical table */
192 /* Size of the l2p table */
196 struct ftl_ppa_fmt ppaf
;
197 /* PPA address size */
199 /* Device's geometry */
200 struct spdk_ocssd_geometry_data geo
;
203 LIST_HEAD(, ftl_flush
) flush_list
;
205 /* Device specific md buffer */
206 struct ftl_global_md global_md
;
211 /* Transfer unit size */
213 /* Ring write buffer */
216 /* Current user write limit */
219 /* Inflight IO operations */
220 uint32_t num_inflight
;
221 /* Queue of IO awaiting retry */
222 TAILQ_HEAD(, ftl_io
) retry_queue
;
224 /* Manages data relocation */
225 struct ftl_reloc
*reloc
;
228 struct ftl_thread core_thread
;
229 struct ftl_thread read_thread
;
232 STAILQ_ENTRY(spdk_ftl_dev
) stailq
;
235 typedef void (*ftl_restore_fn
)(struct spdk_ftl_dev
*, struct ftl_restore
*, int);
237 void ftl_apply_limits(struct spdk_ftl_dev
*dev
);
238 void ftl_io_read(struct ftl_io
*io
);
239 void ftl_io_write(struct ftl_io
*io
);
240 int ftl_io_erase(struct ftl_io
*io
);
241 int ftl_io_flush(struct ftl_io
*io
);
242 int ftl_current_limit(const struct spdk_ftl_dev
*dev
);
243 int ftl_invalidate_addr(struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
);
244 int ftl_task_core(void *ctx
);
245 int ftl_task_read(void *ctx
);
246 void ftl_process_anm_event(struct ftl_anm_event
*event
);
247 size_t ftl_tail_md_num_lbks(const struct spdk_ftl_dev
*dev
);
248 size_t ftl_tail_md_hdr_num_lbks(void);
249 size_t ftl_vld_map_num_lbks(const struct spdk_ftl_dev
*dev
);
250 size_t ftl_lba_map_num_lbks(const struct spdk_ftl_dev
*dev
);
251 size_t ftl_head_md_num_lbks(const struct spdk_ftl_dev
*dev
);
252 int ftl_restore_md(struct spdk_ftl_dev
*dev
, ftl_restore_fn cb
);
253 int ftl_restore_device(struct ftl_restore
*restore
, ftl_restore_fn cb
);
254 int ftl_band_set_direct_access(struct ftl_band
*band
, bool access
);
255 int ftl_retrieve_chunk_info(struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
,
256 struct spdk_ocssd_chunk_information_entry
*info
,
257 unsigned int num_entries
);
259 #define ftl_to_ppa(addr) \
260 (struct ftl_ppa) { .ppa = (uint64_t)(addr) }
262 #define ftl_to_ppa_packed(addr) \
263 (struct ftl_ppa) { .pack.ppa = (uint32_t)(addr) }
265 static inline struct spdk_thread
*
266 ftl_get_core_thread(const struct spdk_ftl_dev
*dev
)
268 return dev
->core_thread
.thread
;
271 static inline struct spdk_nvme_qpair
*
272 ftl_get_write_qpair(const struct spdk_ftl_dev
*dev
)
274 return dev
->core_thread
.qpair
;
277 static inline struct spdk_thread
*
278 ftl_get_read_thread(const struct spdk_ftl_dev
*dev
)
280 return dev
->read_thread
.thread
;
283 static inline struct spdk_nvme_qpair
*
284 ftl_get_read_qpair(const struct spdk_ftl_dev
*dev
)
286 return dev
->read_thread
.qpair
;
290 ftl_ppa_packed(const struct spdk_ftl_dev
*dev
)
292 return dev
->ppa_len
< 32;
296 ftl_ppa_invalid(struct ftl_ppa ppa
)
298 return ppa
.ppa
== ftl_to_ppa(FTL_PPA_INVALID
).ppa
;
302 ftl_ppa_cached(struct ftl_ppa ppa
)
304 return !ftl_ppa_invalid(ppa
) && ppa
.cached
;
307 static inline uint64_t
308 ftl_ppa_addr_pack(const struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
)
310 return (ppa
.lbk
<< dev
->ppaf
.lbk_offset
) |
311 (ppa
.chk
<< dev
->ppaf
.chk_offset
) |
312 (ppa
.pu
<< dev
->ppaf
.pu_offset
) |
313 (ppa
.grp
<< dev
->ppaf
.grp_offset
);
316 static inline struct ftl_ppa
317 ftl_ppa_addr_unpack(const struct spdk_ftl_dev
*dev
, uint64_t ppa
)
319 struct ftl_ppa res
= {};
321 res
.lbk
= (ppa
>> dev
->ppaf
.lbk_offset
) & dev
->ppaf
.lbk_mask
;
322 res
.chk
= (ppa
>> dev
->ppaf
.chk_offset
) & dev
->ppaf
.chk_mask
;
323 res
.pu
= (ppa
>> dev
->ppaf
.pu_offset
) & dev
->ppaf
.pu_mask
;
324 res
.grp
= (ppa
>> dev
->ppaf
.grp_offset
) & dev
->ppaf
.grp_mask
;
329 static inline struct ftl_ppa
330 ftl_ppa_to_packed(const struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
)
332 struct ftl_ppa p
= {};
334 if (ftl_ppa_invalid(ppa
)) {
335 p
= ftl_to_ppa_packed(FTL_PPA_INVALID
);
336 } else if (ftl_ppa_cached(ppa
)) {
338 p
.pack
.offset
= (uint32_t) ppa
.offset
;
340 p
.pack
.ppa
= (uint32_t) ftl_ppa_addr_pack(dev
, ppa
);
346 static inline struct ftl_ppa
347 ftl_ppa_from_packed(const struct spdk_ftl_dev
*dev
, struct ftl_ppa p
)
349 struct ftl_ppa ppa
= {};
351 if (p
.pack
.ppa
== (uint32_t)FTL_PPA_INVALID
) {
352 ppa
= ftl_to_ppa(FTL_PPA_INVALID
);
353 } else if (p
.pack
.cached
) {
355 ppa
.offset
= p
.pack
.offset
;
357 ppa
= ftl_ppa_addr_unpack(dev
, p
.pack
.ppa
);
363 static inline unsigned int
364 ftl_ppa_flatten_punit(const struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
)
366 return ppa
.pu
* dev
->geo
.num_grp
+ ppa
.grp
- dev
->range
.begin
;
370 ftl_ppa_in_range(const struct spdk_ftl_dev
*dev
, struct ftl_ppa ppa
)
372 unsigned int punit
= ftl_ppa_flatten_punit(dev
, ppa
) + dev
->range
.begin
;
374 if (punit
>= dev
->range
.begin
&& punit
<= dev
->range
.end
) {
381 #define _ftl_l2p_set(l2p, off, val, bits) \
382 __atomic_store_n(((uint##bits##_t *)(l2p)) + (off), val, __ATOMIC_SEQ_CST)
384 #define _ftl_l2p_set32(l2p, off, val) \
385 _ftl_l2p_set(l2p, off, val, 32)
387 #define _ftl_l2p_set64(l2p, off, val) \
388 _ftl_l2p_set(l2p, off, val, 64)
390 #define _ftl_l2p_get(l2p, off, bits) \
391 __atomic_load_n(((uint##bits##_t *)(l2p)) + (off), __ATOMIC_SEQ_CST)
393 #define _ftl_l2p_get32(l2p, off) \
394 _ftl_l2p_get(l2p, off, 32)
396 #define _ftl_l2p_get64(l2p, off) \
397 _ftl_l2p_get(l2p, off, 64)
399 #define ftl_ppa_cmp(p1, p2) \
400 ((p1).ppa == (p2).ppa)
403 ftl_l2p_set(struct spdk_ftl_dev
*dev
, uint64_t lba
, struct ftl_ppa ppa
)
405 assert(dev
->num_lbas
> lba
);
407 if (ftl_ppa_packed(dev
)) {
408 _ftl_l2p_set32(dev
->l2p
, lba
, ftl_ppa_to_packed(dev
, ppa
).ppa
);
410 _ftl_l2p_set64(dev
->l2p
, lba
, ppa
.ppa
);
414 static inline struct ftl_ppa
415 ftl_l2p_get(struct spdk_ftl_dev
*dev
, uint64_t lba
)
417 assert(dev
->num_lbas
> lba
);
419 if (ftl_ppa_packed(dev
)) {
420 return ftl_ppa_from_packed(dev
, ftl_to_ppa_packed(
421 _ftl_l2p_get32(dev
->l2p
, lba
)));
423 return ftl_to_ppa(_ftl_l2p_get64(dev
->l2p
, lba
));
427 ftl_dev_num_bands(const struct spdk_ftl_dev
*dev
)
429 return dev
->geo
.num_chk
;
433 ftl_dev_lbks_in_chunk(const struct spdk_ftl_dev
*dev
)
435 return dev
->geo
.clba
;
439 ftl_dev_num_punits(const struct spdk_ftl_dev
*dev
)
441 return dev
->range
.end
- dev
->range
.begin
+ 1;
444 static inline uint64_t
445 ftl_num_band_lbks(const struct spdk_ftl_dev
*dev
)
447 return ftl_dev_num_punits(dev
) * ftl_dev_lbks_in_chunk(dev
);
451 ftl_vld_map_size(const struct spdk_ftl_dev
*dev
)
453 return (size_t)spdk_divide_round_up(ftl_num_band_lbks(dev
), CHAR_BIT
);
456 #endif /* FTL_CORE_H */