]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk.h
lightnvm: pblk: delete redundant debug line stat
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk.h
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
17 *
18 */
19
20#ifndef PBLK_H_
21#define PBLK_H_
22
23#include <linux/blkdev.h>
24#include <linux/blk-mq.h>
25#include <linux/bio.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/vmalloc.h>
29#include <linux/crc32.h>
30#include <linux/uuid.h>
31
32#include <linux/lightnvm.h>
33
34/* Run only GC if less than 1/X blocks are free */
35#define GC_LIMIT_INVERSE 5
36#define GC_TIME_MSECS 1000
37
38#define PBLK_SECTOR (512)
39#define PBLK_EXPOSED_PAGE_SIZE (4096)
40#define PBLK_MAX_REQ_ADDRS (64)
41#define PBLK_MAX_REQ_ADDRS_PW (6)
42
43#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
44
45#define PBLK_COMMAND_TIMEOUT_MS 30000
46
47/* Max 512 LUNs per device */
48#define PBLK_MAX_LUNS_BITMAP (4)
49
50#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
51
52#define pblk_for_each_lun(pblk, rlun, i) \
53 for ((i) = 0, rlun = &(pblk)->luns[0]; \
54 (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
55
56#define ERASE 2 /* READ = 0, WRITE = 1 */
57
58enum {
59 /* IO Types */
60 PBLK_IOTYPE_USER = 1 << 0,
61 PBLK_IOTYPE_GC = 1 << 1,
62
63 /* Write buffer flags */
64 PBLK_FLUSH_ENTRY = 1 << 2,
65 PBLK_WRITTEN_DATA = 1 << 3,
66 PBLK_SUBMITTED_ENTRY = 1 << 4,
67 PBLK_WRITABLE_ENTRY = 1 << 5,
68};
69
70enum {
71 PBLK_BLK_ST_OPEN = 0x1,
72 PBLK_BLK_ST_CLOSED = 0x2,
73};
74
75/* The number of GC lists and the rate-limiter states go together. This way the
76 * rate-limiter can dictate how much GC is needed based on resource utilization.
77 */
78#define PBLK_NR_GC_LISTS 3
79#define PBLK_MAX_GC_JOBS 32
80
81enum {
82 PBLK_RL_HIGH = 1,
83 PBLK_RL_MID = 2,
84 PBLK_RL_LOW = 3,
85};
86
87struct pblk_sec_meta {
88 u64 reserved;
89 __le64 lba;
90};
91
92#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
93
084ec9ba 94/* write buffer completion context */
a4bd217b
JG
95struct pblk_c_ctx {
96 struct list_head list; /* Head for out-of-order completion */
97
98 unsigned long *lun_bitmap; /* Luns used on current request */
99 unsigned int sentry;
100 unsigned int nr_valid;
101 unsigned int nr_padded;
102};
103
084ec9ba
JG
104/* generic context */
105struct pblk_g_ctx {
106 void *private;
a4bd217b
JG
107};
108
109/* Recovery context */
110struct pblk_rec_ctx {
111 struct pblk *pblk;
112 struct nvm_rq *rqd;
113 struct list_head failed;
114 struct work_struct ws_rec;
115};
116
117/* Write context */
118struct pblk_w_ctx {
119 struct bio_list bios; /* Original bios - used for completion
120 * in REQ_FUA, REQ_FLUSH case
121 */
ef697902 122 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
123 struct ppa_addr ppa; /* Physic addr. associated with entry */
124 int flags; /* Write context flags */
125};
126
127struct pblk_rb_entry {
128 struct ppa_addr cacheline; /* Cacheline for this entry */
129 void *data; /* Pointer to data on this entry */
130 struct pblk_w_ctx w_ctx; /* Context for this entry */
131 struct list_head index; /* List head to enable indexes */
132};
133
134#define EMPTY_ENTRY (~0U)
135
136struct pblk_rb_pages {
137 struct page *pages;
138 int order;
139 struct list_head list;
140};
141
142struct pblk_rb {
143 struct pblk_rb_entry *entries; /* Ring buffer entries */
144 unsigned int mem; /* Write offset - points to next
145 * writable entry in memory
146 */
147 unsigned int subm; /* Read offset - points to last entry
148 * that has been submitted to the media
149 * to be persisted
150 */
151 unsigned int sync; /* Synced - backpointer that signals
152 * the last submitted entry that has
153 * been successfully persisted to media
154 */
155 unsigned int sync_point; /* Sync point - last entry that must be
156 * flushed to the media. Used with
157 * REQ_FLUSH and REQ_FUA
158 */
159 unsigned int l2p_update; /* l2p update point - next entry for
160 * which l2p mapping will be updated to
161 * contain a device ppa address (instead
162 * of a cacheline
163 */
164 unsigned int nr_entries; /* Number of entries in write buffer -
165 * must be a power of two
166 */
167 unsigned int seg_size; /* Size of the data segments being
168 * stored on each entry. Typically this
169 * will be 4KB
170 */
171
172 struct list_head pages; /* List of data pages */
173
174 spinlock_t w_lock; /* Write lock */
175 spinlock_t s_lock; /* Sync lock */
176
177#ifdef CONFIG_NVM_DEBUG
178 atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
179#endif
180};
181
182#define PBLK_RECOVERY_SECTORS 16
183
184struct pblk_lun {
185 struct ppa_addr bppa;
186
187 u8 *bb_list; /* Bad block list for LUN. Only used on
188 * bring up. Bad blocks are managed
189 * within lines on run-time.
190 */
191
192 struct semaphore wr_sem;
193};
194
195struct pblk_gc_rq {
196 struct pblk_line *line;
197 void *data;
198 u64 *lba_list;
199 int nr_secs;
200 int secs_to_gc;
201 struct list_head list;
202};
203
204struct pblk_gc {
205 int gc_active;
206 int gc_enabled;
207 int gc_forced;
208 int gc_jobs_active;
209 atomic_t inflight_gc;
210
211 struct task_struct *gc_ts;
212 struct task_struct *gc_writer_ts;
213 struct workqueue_struct *gc_reader_wq;
214 struct timer_list gc_timer;
215
216 int w_entries;
217 struct list_head w_list;
218
219 spinlock_t lock;
220 spinlock_t w_lock;
221};
222
223struct pblk_rl {
224 unsigned int high; /* Upper threshold for rate limiter (free run -
225 * user I/O rate limiter
226 */
227 unsigned int low; /* Lower threshold for rate limiter (user I/O
228 * rate limiter - stall)
229 */
230 unsigned int high_pw; /* High rounded up as a power of 2 */
231
232#define PBLK_USER_HIGH_THRS 2 /* Begin write limit at 50 percent
233 * available blks
234 */
235#define PBLK_USER_LOW_THRS 20 /* Aggressive GC at 5% available blocks */
236
237 int rb_windows_pw; /* Number of rate windows in the write buffer
238 * given as a power-of-2. This guarantees that
239 * when user I/O is being rate limited, there
240 * will be reserved enough space for the GC to
241 * place its payload. A window is of
242 * pblk->max_write_pgs size, which in NVMe is
243 * 64, i.e., 256kb.
244 */
245 int rb_budget; /* Total number of entries available for I/O */
246 int rb_user_max; /* Max buffer entries available for user I/O */
247 atomic_t rb_user_cnt; /* User I/O buffer counter */
248 int rb_gc_max; /* Max buffer entries available for GC I/O */
249 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
250 int rb_state; /* Rate-limiter current state */
251 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
252
253 int rb_user_active;
254 struct timer_list u_timer;
255
256 unsigned long long nr_secs;
257 unsigned long total_blocks;
258 atomic_t free_blocks;
259};
260
a4bd217b
JG
261#define PBLK_LINE_EMPTY (~0U)
262
263enum {
264 /* Line Types */
265 PBLK_LINETYPE_FREE = 0,
266 PBLK_LINETYPE_LOG = 1,
267 PBLK_LINETYPE_DATA = 2,
268
269 /* Line state */
270 PBLK_LINESTATE_FREE = 10,
271 PBLK_LINESTATE_OPEN = 11,
272 PBLK_LINESTATE_CLOSED = 12,
273 PBLK_LINESTATE_GC = 13,
274 PBLK_LINESTATE_BAD = 14,
275 PBLK_LINESTATE_CORRUPT = 15,
276
277 /* GC group */
278 PBLK_LINEGC_NONE = 20,
279 PBLK_LINEGC_EMPTY = 21,
280 PBLK_LINEGC_LOW = 22,
281 PBLK_LINEGC_MID = 23,
282 PBLK_LINEGC_HIGH = 24,
283 PBLK_LINEGC_FULL = 25,
284};
285
286#define PBLK_MAGIC 0x70626c6b /*pblk*/
287
288struct line_header {
289 __le32 crc;
290 __le32 identifier; /* pblk identifier */
291 __u8 uuid[16]; /* instance uuid */
292 __le16 type; /* line type */
293 __le16 version; /* type version */
294 __le32 id; /* line id for current line */
295};
296
297struct line_smeta {
298 struct line_header header;
299
300 __le32 crc; /* Full structure including struct crc */
301 /* Previous line metadata */
302 __le32 prev_id; /* Line id for previous line */
303
304 /* Current line metadata */
305 __le64 seq_nr; /* Sequence number for current line */
306
307 /* Active writers */
308 __le32 window_wr_lun; /* Number of parallel LUNs to write */
309
310 __le32 rsvd[2];
dd2a4343
JG
311
312 __le64 lun_bitmap[];
a4bd217b
JG
313};
314
315/*
dd2a4343
JG
316 * Metadata layout in media:
317 * First sector:
318 * 1. struct line_emeta
319 * 2. bad block bitmap (u64 * window_wr_lun)
320 * Mid sectors (start at lbas_sector):
321 * 3. nr_lbas (u64) forming lba list
322 * Last sectors (start at vsc_sector):
323 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
324 */
325struct line_emeta {
326 struct line_header header;
327
328 __le32 crc; /* Full structure including struct crc */
329
330 /* Previous line metadata */
331 __le32 prev_id; /* Line id for prev line */
332
333 /* Current line metadata */
334 __le64 seq_nr; /* Sequence number for current line */
335
336 /* Active writers */
337 __le32 window_wr_lun; /* Number of parallel LUNs to write */
338
339 /* Bookkeeping for recovery */
340 __le32 next_id; /* Line id for next line */
341 __le64 nr_lbas; /* Number of lbas mapped in line */
342 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
dd2a4343
JG
343 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
344};
345
346struct pblk_emeta {
347 struct line_emeta *buf; /* emeta buffer in media format */
348 int mem; /* Write offset - points to next
349 * writable entry in memory
350 */
351 atomic_t sync; /* Synced - backpointer that signals the
352 * last entry that has been successfully
353 * persisted to media
354 */
355 unsigned int nr_entries; /* Number of emeta entries */
356};
357
358struct pblk_smeta {
359 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
360};
361
362struct pblk_line {
363 struct pblk *pblk;
364 unsigned int id; /* Line number corresponds to the
365 * block line
366 */
367 unsigned int seq_nr; /* Unique line sequence number */
368
369 int state; /* PBLK_LINESTATE_X */
370 int type; /* PBLK_LINETYPE_X */
371 int gc_group; /* PBLK_LINEGC_X */
372 struct list_head list; /* Free, GC lists */
373
374 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
375
dd2a4343
JG
376 struct pblk_smeta *smeta; /* Start metadata */
377 struct pblk_emeta *emeta; /* End medatada */
378
a4bd217b 379 int meta_line; /* Metadata line id */
dd2a4343
JG
380 int meta_distance; /* Distance between data and metadata */
381
a4bd217b
JG
382 u64 smeta_ssec; /* Sector where smeta starts */
383 u64 emeta_ssec; /* Sector where emeta starts */
384
385 unsigned int sec_in_line; /* Number of usable secs in line */
386
a44f53fa 387 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
388 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
389 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
390
391 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
392 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
393
a44f53fa 394 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
395 atomic_t left_seblks; /* Blocks left for sync erasing */
396
397 int left_msecs; /* Sectors left for mapping */
398 int left_ssecs; /* Sectors left to sync */
399 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
400 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
401
402 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
403
404 struct kref ref; /* Write buffer L2P references */
405
406 spinlock_t lock; /* Necessary for invalid_bitmap only */
407};
408
a44f53fa 409#define PBLK_DATA_LINES 4
a4bd217b 410
dd2a4343 411enum {
a4bd217b
JG
412 PBLK_KMALLOC_META = 1,
413 PBLK_VMALLOC_META = 2,
414};
415
dd2a4343
JG
416enum {
417 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
418 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
419 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
420};
421
422struct pblk_line_mgmt {
423 int nr_lines; /* Total number of full lines */
424 int nr_free_lines; /* Number of full lines in free list */
425
426 /* Free lists - use free_lock */
427 struct list_head free_list; /* Full lines ready to use */
428 struct list_head corrupt_list; /* Full lines corrupted */
429 struct list_head bad_list; /* Full lines bad */
430
431 /* GC lists - use gc_lock */
432 struct list_head *gc_lists[PBLK_NR_GC_LISTS];
433 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
434 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
435 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
436
437 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
438 struct list_head gc_empty_list; /* Full lines close, all valid */
439
440 struct pblk_line *log_line; /* Current FTL log line */
441 struct pblk_line *data_line; /* Current data line */
442 struct pblk_line *log_next; /* Next FTL log line */
443 struct pblk_line *data_next; /* Next data line */
444
dd2a4343
JG
445 struct list_head emeta_list; /* Lines queued to schedule emeta */
446
447 __le32 *vsc_list; /* Valid sector counts for all lines */
448
a4bd217b
JG
449 /* Metadata allocation type: VMALLOC | KMALLOC */
450 int smeta_alloc_type;
451 int emeta_alloc_type;
452
453 /* Pre-allocated metadata for data lines */
dd2a4343
JG
454 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
455 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
456 unsigned long meta_bitmap;
457
458 /* Helpers for fast bitmap calculations */
459 unsigned long *bb_template;
460 unsigned long *bb_aux;
461
462 unsigned long d_seq_nr; /* Data line unique sequence number */
463 unsigned long l_seq_nr; /* Log line unique sequence number */
464
465 spinlock_t free_lock;
dd2a4343 466 spinlock_t close_lock;
a4bd217b
JG
467 spinlock_t gc_lock;
468};
469
470struct pblk_line_meta {
471 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
472 unsigned int smeta_sec; /* Sectors needed for smeta */
473
474 unsigned int emeta_len[4]; /* Lengths for emeta:
475 * [0]: Total length
476 * [1]: struct line_emeta length
477 * [2]: L2P portion length
478 * [3]: vsc list length
479 */
480 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
481 * as emeta_len
482 */
483
a4bd217b 484 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
485
486 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
487 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
488 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
489 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
490
491 unsigned int blk_per_line; /* Number of blocks in a full line */
492 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 493 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
494 unsigned int min_blk_line; /* Min. number of good blocks in line */
495
496 unsigned int mid_thrs; /* Threshold for GC mid list */
497 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
498
499 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
500};
501
502struct pblk_addr_format {
503 u64 ch_mask;
504 u64 lun_mask;
505 u64 pln_mask;
506 u64 blk_mask;
507 u64 pg_mask;
508 u64 sec_mask;
509 u8 ch_offset;
510 u8 lun_offset;
511 u8 pln_offset;
512 u8 blk_offset;
513 u8 pg_offset;
514 u8 sec_offset;
515};
516
517struct pblk {
518 struct nvm_tgt_dev *dev;
519 struct gendisk *disk;
520
521 struct kobject kobj;
522
523 struct pblk_lun *luns;
524
525 struct pblk_line *lines; /* Line array */
526 struct pblk_line_mgmt l_mg; /* Line management */
527 struct pblk_line_meta lm; /* Line metadata */
528
529 int ppaf_bitsize;
530 struct pblk_addr_format ppaf;
531
532 struct pblk_rb rwb;
533
534 int min_write_pgs; /* Minimum amount of pages required by controller */
535 int max_write_pgs; /* Maximum amount of pages supported by controller */
536 int pgs_in_buffer; /* Number of pages that need to be held in buffer to
537 * guarantee successful reads.
538 */
539
540 sector_t capacity; /* Device capacity when bad blocks are subtracted */
541 int over_pct; /* Percentage of device used for over-provisioning */
542
543 /* pblk provisioning values. Used by rate limiter */
544 struct pblk_rl rl;
545
c2e9f5d4 546 int sec_per_write;
a4bd217b
JG
547
548 unsigned char instance_uuid[16];
549#ifdef CONFIG_NVM_DEBUG
550 /* All debug counters apply to 4kb sector I/Os */
551 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
552 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
553 atomic_long_t padded_wb; /* Sectors padded in write buffer */
554 atomic_long_t nr_flush; /* Number of flush/fua I/O */
555 atomic_long_t req_writes; /* Sectors stored on write buffer */
556 atomic_long_t sub_writes; /* Sectors submitted from buffer */
557 atomic_long_t sync_writes; /* Sectors synced to media */
558 atomic_long_t compl_writes; /* Sectors completed in write bio */
559 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 560 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
561 atomic_long_t sync_reads; /* Completed sector read requests */
562 atomic_long_t recov_writes; /* Sectors submitted from recovery */
563 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
564 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
565#endif
566
567 spinlock_t lock;
568
569 atomic_long_t read_failed;
570 atomic_long_t read_empty;
571 atomic_long_t read_high_ecc;
572 atomic_long_t read_failed_gc;
573 atomic_long_t write_failed;
574 atomic_long_t erase_failed;
575
576 struct task_struct *writer_ts;
577
578 /* Simple translation map of logical addresses to physical addresses.
579 * The logical addresses is known by the host system, while the physical
580 * addresses are used when writing to the disk block device.
581 */
582 unsigned char *trans_map;
583 spinlock_t trans_lock;
584
585 struct list_head compl_list;
586
587 mempool_t *page_pool;
588 mempool_t *line_ws_pool;
589 mempool_t *rec_pool;
084ec9ba 590 mempool_t *g_rq_pool;
a4bd217b
JG
591 mempool_t *w_rq_pool;
592 mempool_t *line_meta_pool;
593
594 struct workqueue_struct *kw_wq;
595 struct timer_list wtimer;
596
597 struct pblk_gc gc;
598};
599
600struct pblk_line_ws {
601 struct pblk *pblk;
602 struct pblk_line *line;
603 void *priv;
604 struct work_struct ws;
605};
606
084ec9ba 607#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
608#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
609
610/*
611 * pblk ring buffer operations
612 */
613int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
614 unsigned int power_size, unsigned int power_seg_sz);
615unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
616void *pblk_rb_entries_ref(struct pblk_rb *rb);
617int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
618 unsigned int nr_entries, unsigned int *pos);
619int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
620 unsigned int *pos);
621void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
622 struct pblk_w_ctx w_ctx, unsigned int pos);
623void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
624 struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
625 unsigned int pos);
626struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
627
628void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371
JG
629unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
630 struct bio *bio, unsigned int pos,
631 unsigned int nr_entries, unsigned int count);
a4bd217b
JG
632unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
633 struct list_head *list,
634 unsigned int max);
635int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
636 u64 pos, int bio_iter);
637unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
638
639unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
640unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
641struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
642 struct ppa_addr *ppa);
643void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
644unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
645
646unsigned int pblk_rb_read_count(struct pblk_rb *rb);
647unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
648
649int pblk_rb_tear_down_check(struct pblk_rb *rb);
650int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
651void pblk_rb_data_free(struct pblk_rb *rb);
652ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
653
654/*
655 * pblk core
656 */
657struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
c2e9f5d4 658void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
659int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
660 struct pblk_c_ctx *c_ctx);
661void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
662void pblk_flush_writer(struct pblk *pblk);
663struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
664void pblk_discard(struct pblk *pblk, struct bio *bio);
665void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
666void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
667int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 668int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
a4bd217b
JG
669struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
670 unsigned int nr_secs, unsigned int len,
671 gfp_t gfp_mask);
672struct pblk_line *pblk_line_get(struct pblk *pblk);
673struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
674struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
675int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
676void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
677struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 678struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
679int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
680int pblk_line_is_full(struct pblk_line *line);
681void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
dd2a4343 682void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 683void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
dd2a4343 684void pblk_line_close_ws(struct work_struct *work);
a4bd217b
JG
685void pblk_line_mark_bb(struct work_struct *work);
686void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
687 void (*work)(struct work_struct *));
688u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
689int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
690int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
691 void *emeta_buf);
a4bd217b
JG
692int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
693void pblk_line_put(struct kref *ref);
694struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
695u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
696void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 697u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 698u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b
JG
699int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
700 unsigned long secs_to_flush);
701void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
702 unsigned long *lun_bitmap);
703void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
704 unsigned long *lun_bitmap);
705void pblk_end_bio_sync(struct bio *bio);
706void pblk_end_io_sync(struct nvm_rq *rqd);
707int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
708 int nr_pages);
709void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
710 u64 paddr);
711void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
712 int nr_pages);
713void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
714void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
715void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
716 struct ppa_addr ppa);
717void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
718 struct ppa_addr ppa, struct ppa_addr entry_line);
719int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
720 struct pblk_line *gc_line);
721void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
722 u64 *lba_list, int nr_secs);
723void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
724 sector_t blba, int nr_secs);
725
726/*
727 * pblk user I/O write path
728 */
729int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
730 unsigned long flags);
731int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
732 unsigned int nr_entries, unsigned int nr_rec_entries,
733 struct pblk_line *gc_line, unsigned long flags);
734
735/*
736 * pblk map
737 */
738void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
739 unsigned int sentry, unsigned long *lun_bitmap,
740 unsigned int valid_secs, struct ppa_addr *erase_ppa);
741void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
742 unsigned long *lun_bitmap, unsigned int valid_secs,
743 unsigned int off);
744
745/*
746 * pblk write thread
747 */
748int pblk_write_ts(void *data);
749void pblk_write_timer_fn(unsigned long data);
750void pblk_write_should_kick(struct pblk *pblk);
751
752/*
753 * pblk read path
754 */
b25d5237 755extern struct bio_set *pblk_bio_set;
a4bd217b
JG
756int pblk_submit_read(struct pblk *pblk, struct bio *bio);
757int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
758 unsigned int nr_secs, unsigned int *secs_to_gc,
759 struct pblk_line *line);
760/*
761 * pblk recovery
762 */
763void pblk_submit_rec(struct work_struct *work);
764struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
765void pblk_recov_pad(struct pblk *pblk);
766__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
767int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
768 struct pblk_rec_ctx *recovery, u64 *comp_bits,
769 unsigned int comp);
770
771/*
772 * pblk gc
773 */
774#define PBLK_GC_TRIES 3
775
776int pblk_gc_init(struct pblk *pblk);
777void pblk_gc_exit(struct pblk *pblk);
778void pblk_gc_should_start(struct pblk *pblk);
779void pblk_gc_should_stop(struct pblk *pblk);
780int pblk_gc_status(struct pblk *pblk);
781void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
782 int *gc_active);
783void pblk_gc_sysfs_force(struct pblk *pblk, int force);
784
785/*
786 * pblk rate limiter
787 */
788void pblk_rl_init(struct pblk_rl *rl, int budget);
789void pblk_rl_free(struct pblk_rl *rl);
790int pblk_rl_gc_thrs(struct pblk_rl *rl);
791unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
792int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
793void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
794int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
795void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
796void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
797void pblk_rl_set_gc_rsc(struct pblk_rl *rl, int rsv);
798int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
799void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
800void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
801
802/*
803 * pblk sysfs
804 */
805int pblk_sysfs_init(struct gendisk *tdisk);
806void pblk_sysfs_exit(struct gendisk *tdisk);
807
808static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
809{
810 if (type == PBLK_KMALLOC_META)
811 return kmalloc(size, flags);
812 return vmalloc(size);
813}
814
815static inline void pblk_mfree(void *ptr, int type)
816{
817 if (type == PBLK_KMALLOC_META)
818 kfree(ptr);
819 else
820 vfree(ptr);
821}
822
823static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
824{
825 return c_ctx - sizeof(struct nvm_rq);
826}
827
dd2a4343
JG
828static inline void *emeta_to_bb(struct line_emeta *emeta)
829{
830 return emeta->bb_bitmap;
831}
832
833static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
834{
835 return ((void *)emeta + pblk->lm.emeta_len[1]);
836}
837
838static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 839{
dd2a4343 840 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
841}
842
843#define NVM_MEM_PAGE_WRITE (8)
844
845static inline int pblk_pad_distance(struct pblk *pblk)
846{
847 struct nvm_tgt_dev *dev = pblk->dev;
848 struct nvm_geo *geo = &dev->geo;
849
850 return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
851}
852
853static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
854{
855 return p.g.blk;
856}
857
858static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
859{
860 return p.g.blk;
861}
862
863static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
864{
865 return p.g.lun * geo->nr_chnls + p.g.ch;
866}
867
868/* A block within a line corresponds to the lun */
869static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
870{
871 return p.g.lun * geo->nr_chnls + p.g.ch;
872}
873
874static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
875{
876 struct ppa_addr ppa64;
877
878 ppa64.ppa = 0;
879
880 if (ppa32 == -1) {
881 ppa64.ppa = ADDR_EMPTY;
882 } else if (ppa32 & (1U << 31)) {
883 ppa64.c.line = ppa32 & ((~0U) >> 1);
884 ppa64.c.is_cached = 1;
885 } else {
886 ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
887 pblk->ppaf.blk_offset;
888 ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
889 pblk->ppaf.pg_offset;
890 ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
891 pblk->ppaf.lun_offset;
892 ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
893 pblk->ppaf.ch_offset;
894 ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
895 pblk->ppaf.pln_offset;
896 ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
897 pblk->ppaf.sec_offset;
898 }
899
900 return ppa64;
901}
902
903static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
904 sector_t lba)
905{
906 struct ppa_addr ppa;
907
908 if (pblk->ppaf_bitsize < 32) {
909 u32 *map = (u32 *)pblk->trans_map;
910
911 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
912 } else {
913 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
914
915 ppa = map[lba];
916 }
917
918 return ppa;
919}
920
921static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
922{
923 u32 ppa32 = 0;
924
925 if (ppa64.ppa == ADDR_EMPTY) {
926 ppa32 = ~0U;
927 } else if (ppa64.c.is_cached) {
928 ppa32 |= ppa64.c.line;
929 ppa32 |= 1U << 31;
930 } else {
931 ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
932 ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
933 ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
934 ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
935 ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
936 ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
937 }
938
939 return ppa32;
940}
941
942static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
943 struct ppa_addr ppa)
944{
945 if (pblk->ppaf_bitsize < 32) {
946 u32 *map = (u32 *)pblk->trans_map;
947
948 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
949 } else {
950 u64 *map = (u64 *)pblk->trans_map;
951
952 map[lba] = ppa.ppa;
953 }
954}
955
956static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
957 struct ppa_addr p)
958{
959 u64 paddr;
960
961 paddr = 0;
962 paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
963 paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
964 paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
965 paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
966 paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
967
968 return paddr;
969}
970
971static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
972{
973 return (ppa_addr.ppa == ADDR_EMPTY);
974}
975
976static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
977{
978 ppa_addr->ppa = ADDR_EMPTY;
979}
980
981static inline int pblk_addr_in_cache(struct ppa_addr ppa)
982{
983 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
984}
985
986static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
987{
988 return ppa.c.line;
989}
990
991static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
992{
993 struct ppa_addr p;
994
995 p.c.line = addr;
996 p.c.is_cached = 1;
997
998 return p;
999}
1000
1001static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1002 u64 line_id)
1003{
1004 struct ppa_addr ppa;
1005
1006 ppa.ppa = 0;
1007 ppa.g.blk = line_id;
1008 ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
1009 ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
1010 ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
1011 ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
1012 ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
1013
1014 return ppa;
1015}
1016
1017static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
1018 u64 line_id)
1019{
1020 struct ppa_addr ppa;
1021
1022 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
1023
1024 return ppa;
1025}
1026
1027static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1028 struct line_header *header)
a4bd217b
JG
1029{
1030 u32 crc = ~(u32)0;
1031
dd2a4343 1032 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1033 sizeof(struct line_header) - sizeof(crc));
1034
1035 return crc;
1036}
1037
1038static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1039 struct line_smeta *smeta)
1040{
1041 struct pblk_line_meta *lm = &pblk->lm;
1042 u32 crc = ~(u32)0;
1043
1044 crc = crc32_le(crc, (unsigned char *)smeta +
1045 sizeof(struct line_header) + sizeof(crc),
1046 lm->smeta_len -
1047 sizeof(struct line_header) - sizeof(crc));
1048
1049 return crc;
1050}
1051
1052static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1053 struct line_emeta *emeta)
1054{
1055 struct pblk_line_meta *lm = &pblk->lm;
1056 u32 crc = ~(u32)0;
1057
1058 crc = crc32_le(crc, (unsigned char *)emeta +
1059 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1060 lm->emeta_len[0] -
a4bd217b
JG
1061 sizeof(struct line_header) - sizeof(crc));
1062
1063 return crc;
1064}
1065
1066static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1067{
1068 struct nvm_tgt_dev *dev = pblk->dev;
1069 struct nvm_geo *geo = &dev->geo;
1070 int flags;
1071
1072 flags = geo->plane_mode >> 1;
1073
1074 if (type == WRITE)
1075 flags |= NVM_IO_SCRAMBLE_ENABLE;
1076
1077 return flags;
1078}
1079
1080static inline int pblk_set_read_mode(struct pblk *pblk)
1081{
1082 return NVM_IO_SNGL_ACCESS | NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1083}
1084
1085#ifdef CONFIG_NVM_DEBUG
1086static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1087{
1088 if (p->c.is_cached) {
1089 pr_err("ppa: (%s: %x) cache line: %llu\n",
1090 msg, error, (u64)p->c.line);
1091 } else {
1092 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1093 msg, error,
1094 p->g.ch, p->g.lun, p->g.blk,
1095 p->g.pg, p->g.pl, p->g.sec);
1096 }
1097}
1098
1099static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1100 int error)
1101{
1102 int bit = -1;
1103
1104 if (rqd->nr_ppas == 1) {
1105 print_ppa(&rqd->ppa_addr, "rqd", error);
1106 return;
1107 }
1108
1109 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1110 bit + 1)) < rqd->nr_ppas) {
1111 print_ppa(&rqd->ppa_list[bit], "rqd", error);
1112 }
1113
1114 pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1115}
1116#endif
1117
1118static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1119 struct ppa_addr *ppas, int nr_ppas)
1120{
1121 struct nvm_geo *geo = &tgt_dev->geo;
1122 struct ppa_addr *ppa;
1123 int i;
1124
1125 for (i = 0; i < nr_ppas; i++) {
1126 ppa = &ppas[i];
1127
1128 if (!ppa->c.is_cached &&
1129 ppa->g.ch < geo->nr_chnls &&
1130 ppa->g.lun < geo->luns_per_chnl &&
1131 ppa->g.pl < geo->nr_planes &&
1132 ppa->g.blk < geo->blks_per_lun &&
1133 ppa->g.pg < geo->pgs_per_blk &&
1134 ppa->g.sec < geo->sec_per_pg)
1135 continue;
1136
1137#ifdef CONFIG_NVM_DEBUG
1138 print_ppa(ppa, "boundary", i);
1139#endif
1140 return 1;
1141 }
1142 return 0;
1143}
1144
1145static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1146{
1147 struct pblk_line_meta *lm = &pblk->lm;
1148
1149 if (paddr > lm->sec_per_line)
1150 return 1;
1151
1152 return 0;
1153}
1154
1155static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1156{
1157 return bio->bi_iter.bi_idx;
1158}
1159
1160static inline sector_t pblk_get_lba(struct bio *bio)
1161{
1162 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1163}
1164
1165static inline unsigned int pblk_get_secs(struct bio *bio)
1166{
1167 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1168}
1169
1170static inline sector_t pblk_get_sector(sector_t lba)
1171{
1172 return lba * NR_PHY_IN_LOG;
1173}
1174
1175static inline void pblk_setup_uuid(struct pblk *pblk)
1176{
1177 uuid_le uuid;
1178
1179 uuid_le_gen(&uuid);
1180 memcpy(pblk->instance_uuid, uuid.b, 16);
1181}
1182#endif /* PBLK_H_ */