2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
23 #include <linux/blkdev.h>
24 #include <linux/blk-mq.h>
25 #include <linux/bio.h>
26 #include <linux/module.h>
27 #include <linux/kthread.h>
28 #include <linux/vmalloc.h>
29 #include <linux/crc32.h>
30 #include <linux/uuid.h>
32 #include <linux/lightnvm.h>
34 /* Run only GC if less than 1/X blocks are free */
35 #define GC_LIMIT_INVERSE 5
36 #define GC_TIME_MSECS 1000
38 #define PBLK_SECTOR (512)
39 #define PBLK_EXPOSED_PAGE_SIZE (4096)
40 #define PBLK_MAX_REQ_ADDRS (64)
41 #define PBLK_MAX_REQ_ADDRS_PW (6)
43 #define PBLK_WS_POOL_SIZE (128)
44 #define PBLK_META_POOL_SIZE (128)
45 #define PBLK_READ_REQ_POOL_SIZE (1024)
47 #define PBLK_NR_CLOSE_JOBS (4)
49 #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
51 #define PBLK_COMMAND_TIMEOUT_MS 30000
53 /* Max 512 LUNs per device */
54 #define PBLK_MAX_LUNS_BITMAP (4)
56 #define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
58 #define pblk_for_each_lun(pblk, rlun, i) \
59 for ((i) = 0, rlun = &(pblk)->luns[0]; \
60 (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
62 #define ERASE 2 /* READ = 0, WRITE = 1 */
66 PBLK_IOTYPE_USER
= 1 << 0,
67 PBLK_IOTYPE_GC
= 1 << 1,
69 /* Write buffer flags */
70 PBLK_FLUSH_ENTRY
= 1 << 2,
71 PBLK_WRITTEN_DATA
= 1 << 3,
72 PBLK_SUBMITTED_ENTRY
= 1 << 4,
73 PBLK_WRITABLE_ENTRY
= 1 << 5,
77 PBLK_BLK_ST_OPEN
= 0x1,
78 PBLK_BLK_ST_CLOSED
= 0x2,
81 struct pblk_sec_meta
{
86 /* The number of GC lists and the rate-limiter states go together. This way the
87 * rate-limiter can dictate how much GC is needed based on resource utilization.
89 #define PBLK_GC_NR_LISTS 3
97 #define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
99 /* write buffer completion context */
101 struct list_head list
; /* Head for out-of-order completion */
103 unsigned long *lun_bitmap
; /* Luns used on current request */
105 unsigned int nr_valid
;
106 unsigned int nr_padded
;
109 /* generic context */
117 struct completion wait
;
121 /* Recovery context */
122 struct pblk_rec_ctx
{
125 struct list_head failed
;
126 struct work_struct ws_rec
;
131 struct bio_list bios
; /* Original bios - used for completion
132 * in REQ_FUA, REQ_FLUSH case
134 u64 lba
; /* Logic addr. associated with entry */
135 struct ppa_addr ppa
; /* Physic addr. associated with entry */
136 int flags
; /* Write context flags */
139 struct pblk_rb_entry
{
140 struct ppa_addr cacheline
; /* Cacheline for this entry */
141 void *data
; /* Pointer to data on this entry */
142 struct pblk_w_ctx w_ctx
; /* Context for this entry */
143 struct list_head index
; /* List head to enable indexes */
146 #define EMPTY_ENTRY (~0U)
148 struct pblk_rb_pages
{
151 struct list_head list
;
155 struct pblk_rb_entry
*entries
; /* Ring buffer entries */
156 unsigned int mem
; /* Write offset - points to next
157 * writable entry in memory
159 unsigned int subm
; /* Read offset - points to last entry
160 * that has been submitted to the media
163 unsigned int sync
; /* Synced - backpointer that signals
164 * the last submitted entry that has
165 * been successfully persisted to media
167 unsigned int sync_point
; /* Sync point - last entry that must be
168 * flushed to the media. Used with
169 * REQ_FLUSH and REQ_FUA
171 unsigned int l2p_update
; /* l2p update point - next entry for
172 * which l2p mapping will be updated to
173 * contain a device ppa address (instead
176 unsigned int nr_entries
; /* Number of entries in write buffer -
177 * must be a power of two
179 unsigned int seg_size
; /* Size of the data segments being
180 * stored on each entry. Typically this
184 struct list_head pages
; /* List of data pages */
186 spinlock_t w_lock
; /* Write lock */
187 spinlock_t s_lock
; /* Sync lock */
189 #ifdef CONFIG_NVM_DEBUG
190 atomic_t inflight_sync_point
; /* Not served REQ_FLUSH | REQ_FUA */
194 #define PBLK_RECOVERY_SECTORS 16
197 struct ppa_addr bppa
;
199 u8
*bb_list
; /* Bad block list for LUN. Only used on
200 * bring up. Bad blocks are managed
201 * within lines on run-time.
204 struct semaphore wr_sem
;
208 struct pblk_line
*line
;
210 u64 lba_list
[PBLK_MAX_REQ_ADDRS
];
213 struct list_head list
;
217 /* These states are not protected by a lock since (i) they are in the
218 * fast path, and (ii) they are not critical.
224 struct task_struct
*gc_ts
;
225 struct task_struct
*gc_writer_ts
;
226 struct task_struct
*gc_reader_ts
;
228 struct workqueue_struct
*gc_line_reader_wq
;
229 struct workqueue_struct
*gc_reader_wq
;
231 struct timer_list gc_timer
;
233 struct semaphore gc_sem
;
234 atomic_t inflight_gc
;
237 struct list_head w_list
;
238 struct list_head r_list
;
246 unsigned int high
; /* Upper threshold for rate limiter (free run -
247 * user I/O rate limiter
249 unsigned int low
; /* Lower threshold for rate limiter (user I/O
250 * rate limiter - stall)
252 unsigned int high_pw
; /* High rounded up as a power of 2 */
254 #define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
255 #define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
257 int rb_windows_pw
; /* Number of rate windows in the write buffer
258 * given as a power-of-2. This guarantees that
259 * when user I/O is being rate limited, there
260 * will be reserved enough space for the GC to
261 * place its payload. A window is of
262 * pblk->max_write_pgs size, which in NVMe is
265 int rb_budget
; /* Total number of entries available for I/O */
266 int rb_user_max
; /* Max buffer entries available for user I/O */
267 int rb_gc_max
; /* Max buffer entries available for GC I/O */
268 int rb_gc_rsv
; /* Reserved buffer entries for GC I/O */
269 int rb_state
; /* Rate-limiter current state */
271 atomic_t rb_user_cnt
; /* User I/O buffer counter */
272 atomic_t rb_gc_cnt
; /* GC I/O buffer counter */
273 atomic_t rb_space
; /* Space limit in case of reaching capacity */
275 int rsv_blocks
; /* Reserved blocks for GC */
280 struct timer_list u_timer
;
282 unsigned long long nr_secs
;
283 unsigned long total_blocks
;
284 atomic_t free_blocks
;
287 #define PBLK_LINE_EMPTY (~0U)
291 PBLK_LINETYPE_FREE
= 0,
292 PBLK_LINETYPE_LOG
= 1,
293 PBLK_LINETYPE_DATA
= 2,
296 PBLK_LINESTATE_FREE
= 10,
297 PBLK_LINESTATE_OPEN
= 11,
298 PBLK_LINESTATE_CLOSED
= 12,
299 PBLK_LINESTATE_GC
= 13,
300 PBLK_LINESTATE_BAD
= 14,
301 PBLK_LINESTATE_CORRUPT
= 15,
304 PBLK_LINEGC_NONE
= 20,
305 PBLK_LINEGC_EMPTY
= 21,
306 PBLK_LINEGC_LOW
= 22,
307 PBLK_LINEGC_MID
= 23,
308 PBLK_LINEGC_HIGH
= 24,
309 PBLK_LINEGC_FULL
= 25,
312 #define PBLK_MAGIC 0x70626c6b /*pblk*/
316 __le32 identifier
; /* pblk identifier */
317 __u8 uuid
[16]; /* instance uuid */
318 __le16 type
; /* line type */
319 __le16 version
; /* type version */
320 __le32 id
; /* line id for current line */
324 struct line_header header
;
326 __le32 crc
; /* Full structure including struct crc */
327 /* Previous line metadata */
328 __le32 prev_id
; /* Line id for previous line */
330 /* Current line metadata */
331 __le64 seq_nr
; /* Sequence number for current line */
334 __le32 window_wr_lun
; /* Number of parallel LUNs to write */
342 * Metadata layout in media:
344 * 1. struct line_emeta
345 * 2. bad block bitmap (u64 * window_wr_lun)
346 * Mid sectors (start at lbas_sector):
347 * 3. nr_lbas (u64) forming lba list
348 * Last sectors (start at vsc_sector):
349 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
352 struct line_header header
;
354 __le32 crc
; /* Full structure including struct crc */
356 /* Previous line metadata */
357 __le32 prev_id
; /* Line id for prev line */
359 /* Current line metadata */
360 __le64 seq_nr
; /* Sequence number for current line */
363 __le32 window_wr_lun
; /* Number of parallel LUNs to write */
365 /* Bookkeeping for recovery */
366 __le32 next_id
; /* Line id for next line */
367 __le64 nr_lbas
; /* Number of lbas mapped in line */
368 __le64 nr_valid_lbas
; /* Number of valid lbas mapped in line */
369 __le64 bb_bitmap
[]; /* Updated bad block bitmap for line */
373 struct line_emeta
*buf
; /* emeta buffer in media format */
374 int mem
; /* Write offset - points to next
375 * writable entry in memory
377 atomic_t sync
; /* Synced - backpointer that signals the
378 * last entry that has been successfully
381 unsigned int nr_entries
; /* Number of emeta entries */
385 struct line_smeta
*buf
; /* smeta buffer in persistent format */
390 unsigned int id
; /* Line number corresponds to the
393 unsigned int seq_nr
; /* Unique line sequence number */
395 int state
; /* PBLK_LINESTATE_X */
396 int type
; /* PBLK_LINETYPE_X */
397 int gc_group
; /* PBLK_LINEGC_X */
398 struct list_head list
; /* Free, GC lists */
400 unsigned long *lun_bitmap
; /* Bitmap for LUNs mapped in line */
402 struct pblk_smeta
*smeta
; /* Start metadata */
403 struct pblk_emeta
*emeta
; /* End medatada */
405 int meta_line
; /* Metadata line id */
406 int meta_distance
; /* Distance between data and metadata */
408 u64 smeta_ssec
; /* Sector where smeta starts */
409 u64 emeta_ssec
; /* Sector where emeta starts */
411 unsigned int sec_in_line
; /* Number of usable secs in line */
413 atomic_t blk_in_line
; /* Number of good blocks in line */
414 unsigned long *blk_bitmap
; /* Bitmap for valid/invalid blocks */
415 unsigned long *erase_bitmap
; /* Bitmap for erased blocks */
417 unsigned long *map_bitmap
; /* Bitmap for mapped sectors in line */
418 unsigned long *invalid_bitmap
; /* Bitmap for invalid sectors in line */
420 atomic_t left_eblks
; /* Blocks left for erasing */
421 atomic_t left_seblks
; /* Blocks left for sync erasing */
423 int left_msecs
; /* Sectors left for mapping */
424 unsigned int cur_sec
; /* Sector map pointer */
425 unsigned int nr_valid_lbas
; /* Number of valid lbas in line */
427 __le32
*vsc
; /* Valid sector count in line */
429 struct kref ref
; /* Write buffer L2P references */
431 spinlock_t lock
; /* Necessary for invalid_bitmap only */
434 #define PBLK_DATA_LINES 4
437 PBLK_KMALLOC_META
= 1,
438 PBLK_VMALLOC_META
= 2,
442 PBLK_EMETA_TYPE_HEADER
= 1, /* struct line_emeta first sector */
443 PBLK_EMETA_TYPE_LLBA
= 2, /* lba list - type: __le64 */
444 PBLK_EMETA_TYPE_VSC
= 3, /* vsc list - type: __le32 */
447 struct pblk_line_mgmt
{
448 int nr_lines
; /* Total number of full lines */
449 int nr_free_lines
; /* Number of full lines in free list */
451 /* Free lists - use free_lock */
452 struct list_head free_list
; /* Full lines ready to use */
453 struct list_head corrupt_list
; /* Full lines corrupted */
454 struct list_head bad_list
; /* Full lines bad */
456 /* GC lists - use gc_lock */
457 struct list_head
*gc_lists
[PBLK_GC_NR_LISTS
];
458 struct list_head gc_high_list
; /* Full lines ready to GC, high isc */
459 struct list_head gc_mid_list
; /* Full lines ready to GC, mid isc */
460 struct list_head gc_low_list
; /* Full lines ready to GC, low isc */
462 struct list_head gc_full_list
; /* Full lines ready to GC, no valid */
463 struct list_head gc_empty_list
; /* Full lines close, all valid */
465 struct pblk_line
*log_line
; /* Current FTL log line */
466 struct pblk_line
*data_line
; /* Current data line */
467 struct pblk_line
*log_next
; /* Next FTL log line */
468 struct pblk_line
*data_next
; /* Next data line */
470 struct list_head emeta_list
; /* Lines queued to schedule emeta */
472 __le32
*vsc_list
; /* Valid sector counts for all lines */
474 /* Metadata allocation type: VMALLOC | KMALLOC */
475 int emeta_alloc_type
;
477 /* Pre-allocated metadata for data lines */
478 struct pblk_smeta
*sline_meta
[PBLK_DATA_LINES
];
479 struct pblk_emeta
*eline_meta
[PBLK_DATA_LINES
];
480 unsigned long meta_bitmap
;
482 /* Helpers for fast bitmap calculations */
483 unsigned long *bb_template
;
484 unsigned long *bb_aux
;
486 unsigned long d_seq_nr
; /* Data line unique sequence number */
487 unsigned long l_seq_nr
; /* Log line unique sequence number */
489 spinlock_t free_lock
;
490 spinlock_t close_lock
;
494 struct pblk_line_meta
{
495 unsigned int smeta_len
; /* Total length for smeta */
496 unsigned int smeta_sec
; /* Sectors needed for smeta */
498 unsigned int emeta_len
[4]; /* Lengths for emeta:
500 * [1]: struct line_emeta length
501 * [2]: L2P portion length
502 * [3]: vsc list length
504 unsigned int emeta_sec
[4]; /* Sectors needed for emeta. Same layout
508 unsigned int emeta_bb
; /* Boundary for bb that affects emeta */
510 unsigned int vsc_list_len
; /* Length for vsc list */
511 unsigned int sec_bitmap_len
; /* Length for sector bitmap in line */
512 unsigned int blk_bitmap_len
; /* Length for block bitmap in line */
513 unsigned int lun_bitmap_len
; /* Length for lun bitmap in line */
515 unsigned int blk_per_line
; /* Number of blocks in a full line */
516 unsigned int sec_per_line
; /* Number of sectors in a line */
517 unsigned int dsec_per_line
; /* Number of data sectors in a line */
518 unsigned int min_blk_line
; /* Min. number of good blocks in line */
520 unsigned int mid_thrs
; /* Threshold for GC mid list */
521 unsigned int high_thrs
; /* Threshold for GC high list */
523 unsigned int meta_distance
; /* Distance between data and metadata */
526 struct pblk_addr_format
{
542 PBLK_STATE_RUNNING
= 0,
543 PBLK_STATE_STOPPING
= 1,
544 PBLK_STATE_RECOVERING
= 2,
545 PBLK_STATE_STOPPED
= 3,
549 struct nvm_tgt_dev
*dev
;
550 struct gendisk
*disk
;
554 struct pblk_lun
*luns
;
556 struct pblk_line
*lines
; /* Line array */
557 struct pblk_line_mgmt l_mg
; /* Line management */
558 struct pblk_line_meta lm
; /* Line metadata */
561 struct pblk_addr_format ppaf
;
565 int state
; /* pblk line state */
567 int min_write_pgs
; /* Minimum amount of pages required by controller */
568 int max_write_pgs
; /* Maximum amount of pages supported by controller */
569 int pgs_in_buffer
; /* Number of pages that need to be held in buffer to
570 * guarantee successful reads.
573 sector_t capacity
; /* Device capacity when bad blocks are subtracted */
574 int over_pct
; /* Percentage of device used for over-provisioning */
576 /* pblk provisioning values. Used by rate limiter */
581 unsigned char instance_uuid
[16];
582 #ifdef CONFIG_NVM_DEBUG
583 /* All debug counters apply to 4kb sector I/Os */
584 atomic_long_t inflight_writes
; /* Inflight writes (user and gc) */
585 atomic_long_t padded_writes
; /* Sectors padded due to flush/fua */
586 atomic_long_t padded_wb
; /* Sectors padded in write buffer */
587 atomic_long_t nr_flush
; /* Number of flush/fua I/O */
588 atomic_long_t req_writes
; /* Sectors stored on write buffer */
589 atomic_long_t sub_writes
; /* Sectors submitted from buffer */
590 atomic_long_t sync_writes
; /* Sectors synced to media */
591 atomic_long_t inflight_reads
; /* Inflight sector read requests */
592 atomic_long_t cache_reads
; /* Read requests that hit the cache */
593 atomic_long_t sync_reads
; /* Completed sector read requests */
594 atomic_long_t recov_writes
; /* Sectors submitted from recovery */
595 atomic_long_t recov_gc_writes
; /* Sectors submitted from write GC */
596 atomic_long_t recov_gc_reads
; /* Sectors submitted from read GC */
601 atomic_long_t read_failed
;
602 atomic_long_t read_empty
;
603 atomic_long_t read_high_ecc
;
604 atomic_long_t read_failed_gc
;
605 atomic_long_t write_failed
;
606 atomic_long_t erase_failed
;
608 atomic_t inflight_io
; /* General inflight I/O counter */
610 struct task_struct
*writer_ts
;
612 /* Simple translation map of logical addresses to physical addresses.
613 * The logical addresses is known by the host system, while the physical
614 * addresses are used when writing to the disk block device.
616 unsigned char *trans_map
;
617 spinlock_t trans_lock
;
619 struct list_head compl_list
;
621 mempool_t
*page_pool
;
622 mempool_t
*line_ws_pool
;
624 mempool_t
*g_rq_pool
;
625 mempool_t
*w_rq_pool
;
626 mempool_t
*line_meta_pool
;
628 struct workqueue_struct
*close_wq
;
629 struct workqueue_struct
*bb_wq
;
631 struct timer_list wtimer
;
636 struct pblk_line_ws
{
638 struct pblk_line
*line
;
640 struct work_struct ws
;
643 #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
644 #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
647 * pblk ring buffer operations
649 int pblk_rb_init(struct pblk_rb
*rb
, struct pblk_rb_entry
*rb_entry_base
,
650 unsigned int power_size
, unsigned int power_seg_sz
);
651 unsigned int pblk_rb_calculate_size(unsigned int nr_entries
);
652 void *pblk_rb_entries_ref(struct pblk_rb
*rb
);
653 int pblk_rb_may_write_user(struct pblk_rb
*rb
, struct bio
*bio
,
654 unsigned int nr_entries
, unsigned int *pos
);
655 int pblk_rb_may_write_gc(struct pblk_rb
*rb
, unsigned int nr_entries
,
657 void pblk_rb_write_entry_user(struct pblk_rb
*rb
, void *data
,
658 struct pblk_w_ctx w_ctx
, unsigned int pos
);
659 void pblk_rb_write_entry_gc(struct pblk_rb
*rb
, void *data
,
660 struct pblk_w_ctx w_ctx
, struct pblk_line
*gc_line
,
662 struct pblk_w_ctx
*pblk_rb_w_ctx(struct pblk_rb
*rb
, unsigned int pos
);
663 void pblk_rb_flush(struct pblk_rb
*rb
);
665 void pblk_rb_sync_l2p(struct pblk_rb
*rb
);
666 unsigned int pblk_rb_read_to_bio(struct pblk_rb
*rb
, struct nvm_rq
*rqd
,
667 struct bio
*bio
, unsigned int pos
,
668 unsigned int nr_entries
, unsigned int count
);
669 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb
*rb
, struct bio
*bio
,
670 struct list_head
*list
,
672 int pblk_rb_copy_to_bio(struct pblk_rb
*rb
, struct bio
*bio
, sector_t lba
,
673 struct ppa_addr ppa
, int bio_iter
, bool advanced_bio
);
674 unsigned int pblk_rb_read_commit(struct pblk_rb
*rb
, unsigned int entries
);
676 unsigned int pblk_rb_sync_init(struct pblk_rb
*rb
, unsigned long *flags
);
677 unsigned int pblk_rb_sync_advance(struct pblk_rb
*rb
, unsigned int nr_entries
);
678 struct pblk_rb_entry
*pblk_rb_sync_scan_entry(struct pblk_rb
*rb
,
679 struct ppa_addr
*ppa
);
680 void pblk_rb_sync_end(struct pblk_rb
*rb
, unsigned long *flags
);
681 unsigned int pblk_rb_sync_point_count(struct pblk_rb
*rb
);
683 unsigned int pblk_rb_read_count(struct pblk_rb
*rb
);
684 unsigned int pblk_rb_sync_count(struct pblk_rb
*rb
);
685 unsigned int pblk_rb_wrap_pos(struct pblk_rb
*rb
, unsigned int pos
);
687 int pblk_rb_tear_down_check(struct pblk_rb
*rb
);
688 int pblk_rb_pos_oob(struct pblk_rb
*rb
, u64 pos
);
689 void pblk_rb_data_free(struct pblk_rb
*rb
);
690 ssize_t
pblk_rb_sysfs(struct pblk_rb
*rb
, char *buf
);
695 struct nvm_rq
*pblk_alloc_rqd(struct pblk
*pblk
, int rw
);
696 void pblk_set_sec_per_write(struct pblk
*pblk
, int sec_per_write
);
697 int pblk_setup_w_rec_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
698 struct pblk_c_ctx
*c_ctx
);
699 void pblk_free_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
, int rw
);
700 void pblk_wait_for_meta(struct pblk
*pblk
);
701 struct ppa_addr
pblk_get_lba_map(struct pblk
*pblk
, sector_t lba
);
702 void pblk_discard(struct pblk
*pblk
, struct bio
*bio
);
703 void pblk_log_write_err(struct pblk
*pblk
, struct nvm_rq
*rqd
);
704 void pblk_log_read_err(struct pblk
*pblk
, struct nvm_rq
*rqd
);
705 int pblk_submit_io(struct pblk
*pblk
, struct nvm_rq
*rqd
);
706 int pblk_submit_meta_io(struct pblk
*pblk
, struct pblk_line
*meta_line
);
707 struct bio
*pblk_bio_map_addr(struct pblk
*pblk
, void *data
,
708 unsigned int nr_secs
, unsigned int len
,
709 int alloc_type
, gfp_t gfp_mask
);
710 struct pblk_line
*pblk_line_get(struct pblk
*pblk
);
711 struct pblk_line
*pblk_line_get_first_data(struct pblk
*pblk
);
712 void pblk_line_replace_data(struct pblk
*pblk
);
713 int pblk_line_recov_alloc(struct pblk
*pblk
, struct pblk_line
*line
);
714 void pblk_line_recov_close(struct pblk
*pblk
, struct pblk_line
*line
);
715 struct pblk_line
*pblk_line_get_data(struct pblk
*pblk
);
716 struct pblk_line
*pblk_line_get_erase(struct pblk
*pblk
);
717 int pblk_line_erase(struct pblk
*pblk
, struct pblk_line
*line
);
718 int pblk_line_is_full(struct pblk_line
*line
);
719 void pblk_line_free(struct pblk
*pblk
, struct pblk_line
*line
);
720 void pblk_line_close_meta(struct pblk
*pblk
, struct pblk_line
*line
);
721 void pblk_line_close(struct pblk
*pblk
, struct pblk_line
*line
);
722 void pblk_line_close_meta_sync(struct pblk
*pblk
);
723 void pblk_line_close_ws(struct work_struct
*work
);
724 void pblk_pipeline_stop(struct pblk
*pblk
);
725 void pblk_line_mark_bb(struct work_struct
*work
);
726 void pblk_line_run_ws(struct pblk
*pblk
, struct pblk_line
*line
, void *priv
,
727 void (*work
)(struct work_struct
*),
728 struct workqueue_struct
*wq
);
729 u64
pblk_line_smeta_start(struct pblk
*pblk
, struct pblk_line
*line
);
730 int pblk_line_read_smeta(struct pblk
*pblk
, struct pblk_line
*line
);
731 int pblk_line_read_emeta(struct pblk
*pblk
, struct pblk_line
*line
,
733 int pblk_blk_erase_async(struct pblk
*pblk
, struct ppa_addr erase_ppa
);
734 void pblk_line_put(struct kref
*ref
);
735 struct list_head
*pblk_line_gc_list(struct pblk
*pblk
, struct pblk_line
*line
);
736 u64
pblk_lookup_page(struct pblk
*pblk
, struct pblk_line
*line
);
737 void pblk_dealloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
);
738 u64
pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
);
739 u64
__pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
);
740 int pblk_calc_secs(struct pblk
*pblk
, unsigned long secs_avail
,
741 unsigned long secs_to_flush
);
742 void pblk_up_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
);
743 void pblk_down_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
744 unsigned long *lun_bitmap
);
745 void pblk_down_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
);
746 void pblk_up_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
747 unsigned long *lun_bitmap
);
748 void pblk_end_bio_sync(struct bio
*bio
);
749 void pblk_end_io_sync(struct nvm_rq
*rqd
);
750 int pblk_bio_add_pages(struct pblk
*pblk
, struct bio
*bio
, gfp_t flags
,
752 void pblk_bio_free_pages(struct pblk
*pblk
, struct bio
*bio
, int off
,
754 void pblk_map_invalidate(struct pblk
*pblk
, struct ppa_addr ppa
);
755 void __pblk_map_invalidate(struct pblk
*pblk
, struct pblk_line
*line
,
757 void pblk_update_map(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
);
758 void pblk_update_map_cache(struct pblk
*pblk
, sector_t lba
,
759 struct ppa_addr ppa
);
760 void pblk_update_map_dev(struct pblk
*pblk
, sector_t lba
,
761 struct ppa_addr ppa
, struct ppa_addr entry_line
);
762 int pblk_update_map_gc(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
,
763 struct pblk_line
*gc_line
);
764 void pblk_lookup_l2p_rand(struct pblk
*pblk
, struct ppa_addr
*ppas
,
765 u64
*lba_list
, int nr_secs
);
766 void pblk_lookup_l2p_seq(struct pblk
*pblk
, struct ppa_addr
*ppas
,
767 sector_t blba
, int nr_secs
);
770 * pblk user I/O write path
772 int pblk_write_to_cache(struct pblk
*pblk
, struct bio
*bio
,
773 unsigned long flags
);
774 int pblk_write_gc_to_cache(struct pblk
*pblk
, void *data
, u64
*lba_list
,
775 unsigned int nr_entries
, unsigned int nr_rec_entries
,
776 struct pblk_line
*gc_line
, unsigned long flags
);
781 void pblk_map_erase_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
782 unsigned int sentry
, unsigned long *lun_bitmap
,
783 unsigned int valid_secs
, struct ppa_addr
*erase_ppa
);
784 void pblk_map_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
, unsigned int sentry
,
785 unsigned long *lun_bitmap
, unsigned int valid_secs
,
791 int pblk_write_ts(void *data
);
792 void pblk_write_timer_fn(unsigned long data
);
793 void pblk_write_should_kick(struct pblk
*pblk
);
798 extern struct bio_set
*pblk_bio_set
;
799 int pblk_submit_read(struct pblk
*pblk
, struct bio
*bio
);
800 int pblk_submit_read_gc(struct pblk
*pblk
, u64
*lba_list
, void *data
,
801 unsigned int nr_secs
, unsigned int *secs_to_gc
,
802 struct pblk_line
*line
);
806 void pblk_submit_rec(struct work_struct
*work
);
807 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
);
808 int pblk_recov_pad(struct pblk
*pblk
);
809 __le64
*pblk_recov_get_lba_list(struct pblk
*pblk
, struct line_emeta
*emeta
);
810 int pblk_recov_setup_rq(struct pblk
*pblk
, struct pblk_c_ctx
*c_ctx
,
811 struct pblk_rec_ctx
*recovery
, u64
*comp_bits
,
817 #define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
818 #define PBLK_GC_W_QD 128 /* Queue depth for inflight GC write I/Os */
819 #define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
820 #define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
822 int pblk_gc_init(struct pblk
*pblk
);
823 void pblk_gc_exit(struct pblk
*pblk
);
824 void pblk_gc_should_start(struct pblk
*pblk
);
825 void pblk_gc_should_stop(struct pblk
*pblk
);
826 void pblk_gc_should_kick(struct pblk
*pblk
);
827 void pblk_gc_kick(struct pblk
*pblk
);
828 void pblk_gc_sysfs_state_show(struct pblk
*pblk
, int *gc_enabled
,
830 int pblk_gc_sysfs_force(struct pblk
*pblk
, int force
);
835 void pblk_rl_init(struct pblk_rl
*rl
, int budget
);
836 void pblk_rl_free(struct pblk_rl
*rl
);
837 int pblk_rl_high_thrs(struct pblk_rl
*rl
);
838 int pblk_rl_low_thrs(struct pblk_rl
*rl
);
839 unsigned long pblk_rl_nr_free_blks(struct pblk_rl
*rl
);
840 int pblk_rl_user_may_insert(struct pblk_rl
*rl
, int nr_entries
);
841 void pblk_rl_inserted(struct pblk_rl
*rl
, int nr_entries
);
842 void pblk_rl_user_in(struct pblk_rl
*rl
, int nr_entries
);
843 int pblk_rl_gc_may_insert(struct pblk_rl
*rl
, int nr_entries
);
844 void pblk_rl_gc_in(struct pblk_rl
*rl
, int nr_entries
);
845 void pblk_rl_out(struct pblk_rl
*rl
, int nr_user
, int nr_gc
);
846 int pblk_rl_sysfs_rate_show(struct pblk_rl
*rl
);
847 void pblk_rl_free_lines_inc(struct pblk_rl
*rl
, struct pblk_line
*line
);
848 void pblk_rl_free_lines_dec(struct pblk_rl
*rl
, struct pblk_line
*line
);
849 void pblk_rl_set_space_limit(struct pblk_rl
*rl
, int entries_left
);
850 int pblk_rl_is_limit(struct pblk_rl
*rl
);
855 int pblk_sysfs_init(struct gendisk
*tdisk
);
856 void pblk_sysfs_exit(struct gendisk
*tdisk
);
858 static inline void *pblk_malloc(size_t size
, int type
, gfp_t flags
)
860 if (type
== PBLK_KMALLOC_META
)
861 return kmalloc(size
, flags
);
862 return vmalloc(size
);
865 static inline void pblk_mfree(void *ptr
, int type
)
867 if (type
== PBLK_KMALLOC_META
)
873 static inline struct nvm_rq
*nvm_rq_from_c_ctx(void *c_ctx
)
875 return c_ctx
- sizeof(struct nvm_rq
);
878 static inline void *emeta_to_bb(struct line_emeta
*emeta
)
880 return emeta
->bb_bitmap
;
883 static inline void *emeta_to_lbas(struct pblk
*pblk
, struct line_emeta
*emeta
)
885 return ((void *)emeta
+ pblk
->lm
.emeta_len
[1]);
888 static inline void *emeta_to_vsc(struct pblk
*pblk
, struct line_emeta
*emeta
)
890 return (emeta_to_lbas(pblk
, emeta
) + pblk
->lm
.emeta_len
[2]);
893 static inline int pblk_line_vsc(struct pblk_line
*line
)
897 spin_lock(&line
->lock
);
898 vsc
= le32_to_cpu(*line
->vsc
);
899 spin_unlock(&line
->lock
);
904 #define NVM_MEM_PAGE_WRITE (8)
906 static inline int pblk_pad_distance(struct pblk
*pblk
)
908 struct nvm_tgt_dev
*dev
= pblk
->dev
;
909 struct nvm_geo
*geo
= &dev
->geo
;
911 return NVM_MEM_PAGE_WRITE
* geo
->nr_luns
* geo
->sec_per_pl
;
914 static inline int pblk_dev_ppa_to_line(struct ppa_addr p
)
919 static inline int pblk_tgt_ppa_to_line(struct ppa_addr p
)
924 static inline int pblk_ppa_to_pos(struct nvm_geo
*geo
, struct ppa_addr p
)
926 return p
.g
.lun
* geo
->nr_chnls
+ p
.g
.ch
;
929 /* A block within a line corresponds to the lun */
930 static inline int pblk_dev_ppa_to_pos(struct nvm_geo
*geo
, struct ppa_addr p
)
932 return p
.g
.lun
* geo
->nr_chnls
+ p
.g
.ch
;
935 static inline struct ppa_addr
pblk_ppa32_to_ppa64(struct pblk
*pblk
, u32 ppa32
)
937 struct ppa_addr ppa64
;
942 ppa64
.ppa
= ADDR_EMPTY
;
943 } else if (ppa32
& (1U << 31)) {
944 ppa64
.c
.line
= ppa32
& ((~0U) >> 1);
945 ppa64
.c
.is_cached
= 1;
947 ppa64
.g
.blk
= (ppa32
& pblk
->ppaf
.blk_mask
) >>
948 pblk
->ppaf
.blk_offset
;
949 ppa64
.g
.pg
= (ppa32
& pblk
->ppaf
.pg_mask
) >>
950 pblk
->ppaf
.pg_offset
;
951 ppa64
.g
.lun
= (ppa32
& pblk
->ppaf
.lun_mask
) >>
952 pblk
->ppaf
.lun_offset
;
953 ppa64
.g
.ch
= (ppa32
& pblk
->ppaf
.ch_mask
) >>
954 pblk
->ppaf
.ch_offset
;
955 ppa64
.g
.pl
= (ppa32
& pblk
->ppaf
.pln_mask
) >>
956 pblk
->ppaf
.pln_offset
;
957 ppa64
.g
.sec
= (ppa32
& pblk
->ppaf
.sec_mask
) >>
958 pblk
->ppaf
.sec_offset
;
964 static inline struct ppa_addr
pblk_trans_map_get(struct pblk
*pblk
,
969 if (pblk
->ppaf_bitsize
< 32) {
970 u32
*map
= (u32
*)pblk
->trans_map
;
972 ppa
= pblk_ppa32_to_ppa64(pblk
, map
[lba
]);
974 struct ppa_addr
*map
= (struct ppa_addr
*)pblk
->trans_map
;
982 static inline u32
pblk_ppa64_to_ppa32(struct pblk
*pblk
, struct ppa_addr ppa64
)
986 if (ppa64
.ppa
== ADDR_EMPTY
) {
988 } else if (ppa64
.c
.is_cached
) {
989 ppa32
|= ppa64
.c
.line
;
992 ppa32
|= ppa64
.g
.blk
<< pblk
->ppaf
.blk_offset
;
993 ppa32
|= ppa64
.g
.pg
<< pblk
->ppaf
.pg_offset
;
994 ppa32
|= ppa64
.g
.lun
<< pblk
->ppaf
.lun_offset
;
995 ppa32
|= ppa64
.g
.ch
<< pblk
->ppaf
.ch_offset
;
996 ppa32
|= ppa64
.g
.pl
<< pblk
->ppaf
.pln_offset
;
997 ppa32
|= ppa64
.g
.sec
<< pblk
->ppaf
.sec_offset
;
1003 static inline void pblk_trans_map_set(struct pblk
*pblk
, sector_t lba
,
1004 struct ppa_addr ppa
)
1006 if (pblk
->ppaf_bitsize
< 32) {
1007 u32
*map
= (u32
*)pblk
->trans_map
;
1009 map
[lba
] = pblk_ppa64_to_ppa32(pblk
, ppa
);
1011 u64
*map
= (u64
*)pblk
->trans_map
;
1017 static inline u64
pblk_dev_ppa_to_line_addr(struct pblk
*pblk
,
1023 paddr
|= (u64
)p
.g
.pg
<< pblk
->ppaf
.pg_offset
;
1024 paddr
|= (u64
)p
.g
.lun
<< pblk
->ppaf
.lun_offset
;
1025 paddr
|= (u64
)p
.g
.ch
<< pblk
->ppaf
.ch_offset
;
1026 paddr
|= (u64
)p
.g
.pl
<< pblk
->ppaf
.pln_offset
;
1027 paddr
|= (u64
)p
.g
.sec
<< pblk
->ppaf
.sec_offset
;
1032 static inline int pblk_ppa_empty(struct ppa_addr ppa_addr
)
1034 return (ppa_addr
.ppa
== ADDR_EMPTY
);
1037 static inline void pblk_ppa_set_empty(struct ppa_addr
*ppa_addr
)
1039 ppa_addr
->ppa
= ADDR_EMPTY
;
1042 static inline bool pblk_ppa_comp(struct ppa_addr lppa
, struct ppa_addr rppa
)
1044 if (lppa
.ppa
== rppa
.ppa
)
1050 static inline int pblk_addr_in_cache(struct ppa_addr ppa
)
1052 return (ppa
.ppa
!= ADDR_EMPTY
&& ppa
.c
.is_cached
);
1055 static inline int pblk_addr_to_cacheline(struct ppa_addr ppa
)
1060 static inline struct ppa_addr
pblk_cacheline_to_addr(int addr
)
1070 static inline struct ppa_addr
addr_to_gen_ppa(struct pblk
*pblk
, u64 paddr
,
1073 struct ppa_addr ppa
;
1076 ppa
.g
.blk
= line_id
;
1077 ppa
.g
.pg
= (paddr
& pblk
->ppaf
.pg_mask
) >> pblk
->ppaf
.pg_offset
;
1078 ppa
.g
.lun
= (paddr
& pblk
->ppaf
.lun_mask
) >> pblk
->ppaf
.lun_offset
;
1079 ppa
.g
.ch
= (paddr
& pblk
->ppaf
.ch_mask
) >> pblk
->ppaf
.ch_offset
;
1080 ppa
.g
.pl
= (paddr
& pblk
->ppaf
.pln_mask
) >> pblk
->ppaf
.pln_offset
;
1081 ppa
.g
.sec
= (paddr
& pblk
->ppaf
.sec_mask
) >> pblk
->ppaf
.sec_offset
;
1086 static inline struct ppa_addr
addr_to_pblk_ppa(struct pblk
*pblk
, u64 paddr
,
1089 struct ppa_addr ppa
;
1091 ppa
= addr_to_gen_ppa(pblk
, paddr
, line_id
);
1096 static inline u32
pblk_calc_meta_header_crc(struct pblk
*pblk
,
1097 struct line_header
*header
)
1101 crc
= crc32_le(crc
, (unsigned char *)header
+ sizeof(crc
),
1102 sizeof(struct line_header
) - sizeof(crc
));
1107 static inline u32
pblk_calc_smeta_crc(struct pblk
*pblk
,
1108 struct line_smeta
*smeta
)
1110 struct pblk_line_meta
*lm
= &pblk
->lm
;
1113 crc
= crc32_le(crc
, (unsigned char *)smeta
+
1114 sizeof(struct line_header
) + sizeof(crc
),
1116 sizeof(struct line_header
) - sizeof(crc
));
1121 static inline u32
pblk_calc_emeta_crc(struct pblk
*pblk
,
1122 struct line_emeta
*emeta
)
1124 struct pblk_line_meta
*lm
= &pblk
->lm
;
1127 crc
= crc32_le(crc
, (unsigned char *)emeta
+
1128 sizeof(struct line_header
) + sizeof(crc
),
1130 sizeof(struct line_header
) - sizeof(crc
));
1135 static inline int pblk_set_progr_mode(struct pblk
*pblk
, int type
)
1137 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1138 struct nvm_geo
*geo
= &dev
->geo
;
1141 flags
= geo
->plane_mode
>> 1;
1144 flags
|= NVM_IO_SCRAMBLE_ENABLE
;
1150 PBLK_READ_RANDOM
= 0,
1151 PBLK_READ_SEQUENTIAL
= 1,
1154 static inline int pblk_set_read_mode(struct pblk
*pblk
, int type
)
1156 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1157 struct nvm_geo
*geo
= &dev
->geo
;
1160 flags
= NVM_IO_SUSPEND
| NVM_IO_SCRAMBLE_ENABLE
;
1161 if (type
== PBLK_READ_SEQUENTIAL
)
1162 flags
|= geo
->plane_mode
>> 1;
1167 static inline int pblk_io_aligned(struct pblk
*pblk
, int nr_secs
)
1169 return !(nr_secs
% pblk
->min_write_pgs
);
1172 #ifdef CONFIG_NVM_DEBUG
1173 static inline void print_ppa(struct ppa_addr
*p
, char *msg
, int error
)
1175 if (p
->c
.is_cached
) {
1176 pr_err("ppa: (%s: %x) cache line: %llu\n",
1177 msg
, error
, (u64
)p
->c
.line
);
1179 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1181 p
->g
.ch
, p
->g
.lun
, p
->g
.blk
,
1182 p
->g
.pg
, p
->g
.pl
, p
->g
.sec
);
1186 static inline void pblk_print_failed_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
,
1191 if (rqd
->nr_ppas
== 1) {
1192 print_ppa(&rqd
->ppa_addr
, "rqd", error
);
1196 while ((bit
= find_next_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
,
1197 bit
+ 1)) < rqd
->nr_ppas
) {
1198 print_ppa(&rqd
->ppa_list
[bit
], "rqd", error
);
1201 pr_err("error:%d, ppa_status:%llx\n", error
, rqd
->ppa_status
);
1205 static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev
*tgt_dev
,
1206 struct ppa_addr
*ppas
, int nr_ppas
)
1208 struct nvm_geo
*geo
= &tgt_dev
->geo
;
1209 struct ppa_addr
*ppa
;
1212 for (i
= 0; i
< nr_ppas
; i
++) {
1215 if (!ppa
->c
.is_cached
&&
1216 ppa
->g
.ch
< geo
->nr_chnls
&&
1217 ppa
->g
.lun
< geo
->luns_per_chnl
&&
1218 ppa
->g
.pl
< geo
->nr_planes
&&
1219 ppa
->g
.blk
< geo
->blks_per_lun
&&
1220 ppa
->g
.pg
< geo
->pgs_per_blk
&&
1221 ppa
->g
.sec
< geo
->sec_per_pg
)
1224 #ifdef CONFIG_NVM_DEBUG
1225 print_ppa(ppa
, "boundary", i
);
1232 static inline int pblk_boundary_paddr_checks(struct pblk
*pblk
, u64 paddr
)
1234 struct pblk_line_meta
*lm
= &pblk
->lm
;
1236 if (paddr
> lm
->sec_per_line
)
1242 static inline unsigned int pblk_get_bi_idx(struct bio
*bio
)
1244 return bio
->bi_iter
.bi_idx
;
1247 static inline sector_t
pblk_get_lba(struct bio
*bio
)
1249 return bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
1252 static inline unsigned int pblk_get_secs(struct bio
*bio
)
1254 return bio
->bi_iter
.bi_size
/ PBLK_EXPOSED_PAGE_SIZE
;
1257 static inline sector_t
pblk_get_sector(sector_t lba
)
1259 return lba
* NR_PHY_IN_LOG
;
1262 static inline void pblk_setup_uuid(struct pblk
*pblk
)
1267 memcpy(pblk
->instance_uuid
, uuid
.b
, 16);
1269 #endif /* PBLK_H_ */