]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk.h
lightnvm: pblk: simplify path on REQ_PREFLUSH
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk.h
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Implementation of a Physical Block-device target for Open-channel SSDs.
17 *
18 */
19
20#ifndef PBLK_H_
21#define PBLK_H_
22
23#include <linux/blkdev.h>
24#include <linux/blk-mq.h>
25#include <linux/bio.h>
26#include <linux/module.h>
27#include <linux/kthread.h>
28#include <linux/vmalloc.h>
29#include <linux/crc32.h>
30#include <linux/uuid.h>
31
32#include <linux/lightnvm.h>
33
34/* Run only GC if less than 1/X blocks are free */
35#define GC_LIMIT_INVERSE 5
36#define GC_TIME_MSECS 1000
37
38#define PBLK_SECTOR (512)
39#define PBLK_EXPOSED_PAGE_SIZE (4096)
40#define PBLK_MAX_REQ_ADDRS (64)
41#define PBLK_MAX_REQ_ADDRS_PW (6)
42
ef576494
JG
43#define PBLK_NR_CLOSE_JOBS (4)
44
a4bd217b
JG
45#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
46
47#define PBLK_COMMAND_TIMEOUT_MS 30000
48
49/* Max 512 LUNs per device */
50#define PBLK_MAX_LUNS_BITMAP (4)
51
52#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
53
54#define pblk_for_each_lun(pblk, rlun, i) \
55 for ((i) = 0, rlun = &(pblk)->luns[0]; \
56 (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
57
58#define ERASE 2 /* READ = 0, WRITE = 1 */
59
0d880398 60/* Static pool sizes */
b84ae4a8
JG
61#define PBLK_GEN_WS_POOL_SIZE (2)
62
a4bd217b
JG
63enum {
64 /* IO Types */
65 PBLK_IOTYPE_USER = 1 << 0,
66 PBLK_IOTYPE_GC = 1 << 1,
67
68 /* Write buffer flags */
69 PBLK_FLUSH_ENTRY = 1 << 2,
70 PBLK_WRITTEN_DATA = 1 << 3,
71 PBLK_SUBMITTED_ENTRY = 1 << 4,
72 PBLK_WRITABLE_ENTRY = 1 << 5,
73};
74
75enum {
76 PBLK_BLK_ST_OPEN = 0x1,
77 PBLK_BLK_ST_CLOSED = 0x2,
78};
79
b20ba1bc
JG
80struct pblk_sec_meta {
81 u64 reserved;
82 __le64 lba;
83};
84
a4bd217b
JG
85/* The number of GC lists and the rate-limiter states go together. This way the
86 * rate-limiter can dictate how much GC is needed based on resource utilization.
87 */
b20ba1bc 88#define PBLK_GC_NR_LISTS 3
a4bd217b
JG
89
90enum {
91 PBLK_RL_HIGH = 1,
92 PBLK_RL_MID = 2,
93 PBLK_RL_LOW = 3,
94};
95
a4bd217b
JG
96#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
97
084ec9ba 98/* write buffer completion context */
a4bd217b
JG
99struct pblk_c_ctx {
100 struct list_head list; /* Head for out-of-order completion */
101
102 unsigned long *lun_bitmap; /* Luns used on current request */
103 unsigned int sentry;
104 unsigned int nr_valid;
105 unsigned int nr_padded;
106};
107
084ec9ba
JG
108/* generic context */
109struct pblk_g_ctx {
110 void *private;
a4bd217b
JG
111};
112
ee8d5c1a
JG
113/* Pad context */
114struct pblk_pad_rq {
115 struct pblk *pblk;
116 struct completion wait;
117 struct kref ref;
118};
119
a4bd217b
JG
120/* Recovery context */
121struct pblk_rec_ctx {
122 struct pblk *pblk;
123 struct nvm_rq *rqd;
124 struct list_head failed;
125 struct work_struct ws_rec;
126};
127
128/* Write context */
129struct pblk_w_ctx {
130 struct bio_list bios; /* Original bios - used for completion
131 * in REQ_FUA, REQ_FLUSH case
132 */
ef697902 133 u64 lba; /* Logic addr. associated with entry */
a4bd217b
JG
134 struct ppa_addr ppa; /* Physic addr. associated with entry */
135 int flags; /* Write context flags */
136};
137
138struct pblk_rb_entry {
139 struct ppa_addr cacheline; /* Cacheline for this entry */
140 void *data; /* Pointer to data on this entry */
141 struct pblk_w_ctx w_ctx; /* Context for this entry */
142 struct list_head index; /* List head to enable indexes */
143};
144
145#define EMPTY_ENTRY (~0U)
146
147struct pblk_rb_pages {
148 struct page *pages;
149 int order;
150 struct list_head list;
151};
152
153struct pblk_rb {
154 struct pblk_rb_entry *entries; /* Ring buffer entries */
155 unsigned int mem; /* Write offset - points to next
156 * writable entry in memory
157 */
158 unsigned int subm; /* Read offset - points to last entry
159 * that has been submitted to the media
160 * to be persisted
161 */
162 unsigned int sync; /* Synced - backpointer that signals
163 * the last submitted entry that has
164 * been successfully persisted to media
165 */
166 unsigned int sync_point; /* Sync point - last entry that must be
167 * flushed to the media. Used with
168 * REQ_FLUSH and REQ_FUA
169 */
170 unsigned int l2p_update; /* l2p update point - next entry for
171 * which l2p mapping will be updated to
172 * contain a device ppa address (instead
173 * of a cacheline
174 */
175 unsigned int nr_entries; /* Number of entries in write buffer -
176 * must be a power of two
177 */
178 unsigned int seg_size; /* Size of the data segments being
179 * stored on each entry. Typically this
180 * will be 4KB
181 */
182
183 struct list_head pages; /* List of data pages */
184
185 spinlock_t w_lock; /* Write lock */
186 spinlock_t s_lock; /* Sync lock */
187
188#ifdef CONFIG_NVM_DEBUG
189 atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
190#endif
191};
192
193#define PBLK_RECOVERY_SECTORS 16
194
195struct pblk_lun {
196 struct ppa_addr bppa;
197
198 u8 *bb_list; /* Bad block list for LUN. Only used on
199 * bring up. Bad blocks are managed
200 * within lines on run-time.
201 */
202
203 struct semaphore wr_sem;
204};
205
206struct pblk_gc_rq {
207 struct pblk_line *line;
208 void *data;
d340121e 209 u64 paddr_list[PBLK_MAX_REQ_ADDRS];
b20ba1bc 210 u64 lba_list[PBLK_MAX_REQ_ADDRS];
a4bd217b
JG
211 int nr_secs;
212 int secs_to_gc;
213 struct list_head list;
214};
215
216struct pblk_gc {
b20ba1bc
JG
217 /* These states are not protected by a lock since (i) they are in the
218 * fast path, and (ii) they are not critical.
219 */
a4bd217b
JG
220 int gc_active;
221 int gc_enabled;
222 int gc_forced;
a4bd217b
JG
223
224 struct task_struct *gc_ts;
225 struct task_struct *gc_writer_ts;
b20ba1bc
JG
226 struct task_struct *gc_reader_ts;
227
228 struct workqueue_struct *gc_line_reader_wq;
a4bd217b 229 struct workqueue_struct *gc_reader_wq;
b20ba1bc 230
a4bd217b
JG
231 struct timer_list gc_timer;
232
b20ba1bc
JG
233 struct semaphore gc_sem;
234 atomic_t inflight_gc;
a4bd217b 235 int w_entries;
b20ba1bc 236
a4bd217b 237 struct list_head w_list;
b20ba1bc 238 struct list_head r_list;
a4bd217b
JG
239
240 spinlock_t lock;
241 spinlock_t w_lock;
b20ba1bc 242 spinlock_t r_lock;
a4bd217b
JG
243};
244
245struct pblk_rl {
246 unsigned int high; /* Upper threshold for rate limiter (free run -
247 * user I/O rate limiter
248 */
249 unsigned int low; /* Lower threshold for rate limiter (user I/O
250 * rate limiter - stall)
251 */
252 unsigned int high_pw; /* High rounded up as a power of 2 */
253
b20ba1bc
JG
254#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
255#define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
a4bd217b
JG
256
257 int rb_windows_pw; /* Number of rate windows in the write buffer
258 * given as a power-of-2. This guarantees that
259 * when user I/O is being rate limited, there
260 * will be reserved enough space for the GC to
261 * place its payload. A window is of
262 * pblk->max_write_pgs size, which in NVMe is
263 * 64, i.e., 256kb.
264 */
265 int rb_budget; /* Total number of entries available for I/O */
266 int rb_user_max; /* Max buffer entries available for user I/O */
a4bd217b
JG
267 int rb_gc_max; /* Max buffer entries available for GC I/O */
268 int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
269 int rb_state; /* Rate-limiter current state */
da67e68f 270 int rb_max_io; /* Maximum size for an I/O giving the config */
588726d3
JG
271
272 atomic_t rb_user_cnt; /* User I/O buffer counter */
a4bd217b 273 atomic_t rb_gc_cnt; /* GC I/O buffer counter */
588726d3 274 atomic_t rb_space; /* Space limit in case of reaching capacity */
a4bd217b 275
b20ba1bc
JG
276 int rsv_blocks; /* Reserved blocks for GC */
277
a4bd217b 278 int rb_user_active;
b20ba1bc
JG
279 int rb_gc_active;
280
a4bd217b
JG
281 struct timer_list u_timer;
282
283 unsigned long long nr_secs;
284 unsigned long total_blocks;
285 atomic_t free_blocks;
286};
287
a4bd217b
JG
288#define PBLK_LINE_EMPTY (~0U)
289
290enum {
291 /* Line Types */
292 PBLK_LINETYPE_FREE = 0,
293 PBLK_LINETYPE_LOG = 1,
294 PBLK_LINETYPE_DATA = 2,
295
296 /* Line state */
297 PBLK_LINESTATE_FREE = 10,
298 PBLK_LINESTATE_OPEN = 11,
299 PBLK_LINESTATE_CLOSED = 12,
300 PBLK_LINESTATE_GC = 13,
301 PBLK_LINESTATE_BAD = 14,
302 PBLK_LINESTATE_CORRUPT = 15,
303
304 /* GC group */
305 PBLK_LINEGC_NONE = 20,
306 PBLK_LINEGC_EMPTY = 21,
307 PBLK_LINEGC_LOW = 22,
308 PBLK_LINEGC_MID = 23,
309 PBLK_LINEGC_HIGH = 24,
310 PBLK_LINEGC_FULL = 25,
311};
312
313#define PBLK_MAGIC 0x70626c6b /*pblk*/
c79819bc 314#define SMETA_VERSION cpu_to_le16(1)
a4bd217b
JG
315
316struct line_header {
317 __le32 crc;
318 __le32 identifier; /* pblk identifier */
319 __u8 uuid[16]; /* instance uuid */
320 __le16 type; /* line type */
321 __le16 version; /* type version */
322 __le32 id; /* line id for current line */
323};
324
325struct line_smeta {
326 struct line_header header;
327
328 __le32 crc; /* Full structure including struct crc */
329 /* Previous line metadata */
330 __le32 prev_id; /* Line id for previous line */
331
332 /* Current line metadata */
333 __le64 seq_nr; /* Sequence number for current line */
334
335 /* Active writers */
336 __le32 window_wr_lun; /* Number of parallel LUNs to write */
337
338 __le32 rsvd[2];
dd2a4343
JG
339
340 __le64 lun_bitmap[];
a4bd217b
JG
341};
342
343/*
dd2a4343
JG
344 * Metadata layout in media:
345 * First sector:
346 * 1. struct line_emeta
347 * 2. bad block bitmap (u64 * window_wr_lun)
348 * Mid sectors (start at lbas_sector):
349 * 3. nr_lbas (u64) forming lba list
350 * Last sectors (start at vsc_sector):
351 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
a4bd217b
JG
352 */
353struct line_emeta {
354 struct line_header header;
355
356 __le32 crc; /* Full structure including struct crc */
357
358 /* Previous line metadata */
359 __le32 prev_id; /* Line id for prev line */
360
361 /* Current line metadata */
362 __le64 seq_nr; /* Sequence number for current line */
363
364 /* Active writers */
365 __le32 window_wr_lun; /* Number of parallel LUNs to write */
366
367 /* Bookkeeping for recovery */
368 __le32 next_id; /* Line id for next line */
369 __le64 nr_lbas; /* Number of lbas mapped in line */
370 __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
dd2a4343
JG
371 __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
372};
373
374struct pblk_emeta {
375 struct line_emeta *buf; /* emeta buffer in media format */
376 int mem; /* Write offset - points to next
377 * writable entry in memory
378 */
379 atomic_t sync; /* Synced - backpointer that signals the
380 * last entry that has been successfully
381 * persisted to media
382 */
383 unsigned int nr_entries; /* Number of emeta entries */
384};
385
386struct pblk_smeta {
387 struct line_smeta *buf; /* smeta buffer in persistent format */
a4bd217b
JG
388};
389
390struct pblk_line {
391 struct pblk *pblk;
392 unsigned int id; /* Line number corresponds to the
393 * block line
394 */
395 unsigned int seq_nr; /* Unique line sequence number */
396
397 int state; /* PBLK_LINESTATE_X */
398 int type; /* PBLK_LINETYPE_X */
399 int gc_group; /* PBLK_LINEGC_X */
400 struct list_head list; /* Free, GC lists */
401
402 unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
403
dd2a4343
JG
404 struct pblk_smeta *smeta; /* Start metadata */
405 struct pblk_emeta *emeta; /* End medatada */
406
a4bd217b 407 int meta_line; /* Metadata line id */
dd2a4343
JG
408 int meta_distance; /* Distance between data and metadata */
409
a4bd217b
JG
410 u64 smeta_ssec; /* Sector where smeta starts */
411 u64 emeta_ssec; /* Sector where emeta starts */
412
413 unsigned int sec_in_line; /* Number of usable secs in line */
414
a44f53fa 415 atomic_t blk_in_line; /* Number of good blocks in line */
a4bd217b
JG
416 unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
417 unsigned long *erase_bitmap; /* Bitmap for erased blocks */
418
419 unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
420 unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
421
a44f53fa 422 atomic_t left_eblks; /* Blocks left for erasing */
a4bd217b
JG
423 atomic_t left_seblks; /* Blocks left for sync erasing */
424
425 int left_msecs; /* Sectors left for mapping */
a4bd217b 426 unsigned int cur_sec; /* Sector map pointer */
dd2a4343
JG
427 unsigned int nr_valid_lbas; /* Number of valid lbas in line */
428
429 __le32 *vsc; /* Valid sector count in line */
a4bd217b
JG
430
431 struct kref ref; /* Write buffer L2P references */
432
433 spinlock_t lock; /* Necessary for invalid_bitmap only */
434};
435
a44f53fa 436#define PBLK_DATA_LINES 4
a4bd217b 437
dd2a4343 438enum {
a4bd217b
JG
439 PBLK_KMALLOC_META = 1,
440 PBLK_VMALLOC_META = 2,
441};
442
dd2a4343
JG
443enum {
444 PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
445 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
446 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
a4bd217b
JG
447};
448
449struct pblk_line_mgmt {
450 int nr_lines; /* Total number of full lines */
451 int nr_free_lines; /* Number of full lines in free list */
452
453 /* Free lists - use free_lock */
454 struct list_head free_list; /* Full lines ready to use */
455 struct list_head corrupt_list; /* Full lines corrupted */
456 struct list_head bad_list; /* Full lines bad */
457
458 /* GC lists - use gc_lock */
b20ba1bc 459 struct list_head *gc_lists[PBLK_GC_NR_LISTS];
a4bd217b
JG
460 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
461 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
462 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
463
464 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
465 struct list_head gc_empty_list; /* Full lines close, all valid */
466
467 struct pblk_line *log_line; /* Current FTL log line */
468 struct pblk_line *data_line; /* Current data line */
469 struct pblk_line *log_next; /* Next FTL log line */
470 struct pblk_line *data_next; /* Next data line */
471
dd2a4343
JG
472 struct list_head emeta_list; /* Lines queued to schedule emeta */
473
474 __le32 *vsc_list; /* Valid sector counts for all lines */
475
a4bd217b 476 /* Metadata allocation type: VMALLOC | KMALLOC */
a4bd217b
JG
477 int emeta_alloc_type;
478
479 /* Pre-allocated metadata for data lines */
dd2a4343
JG
480 struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
481 struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
a4bd217b
JG
482 unsigned long meta_bitmap;
483
484 /* Helpers for fast bitmap calculations */
485 unsigned long *bb_template;
486 unsigned long *bb_aux;
487
488 unsigned long d_seq_nr; /* Data line unique sequence number */
489 unsigned long l_seq_nr; /* Log line unique sequence number */
490
491 spinlock_t free_lock;
dd2a4343 492 spinlock_t close_lock;
a4bd217b
JG
493 spinlock_t gc_lock;
494};
495
496struct pblk_line_meta {
497 unsigned int smeta_len; /* Total length for smeta */
dd2a4343
JG
498 unsigned int smeta_sec; /* Sectors needed for smeta */
499
500 unsigned int emeta_len[4]; /* Lengths for emeta:
501 * [0]: Total length
502 * [1]: struct line_emeta length
503 * [2]: L2P portion length
504 * [3]: vsc list length
505 */
506 unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
507 * as emeta_len
508 */
509
a4bd217b 510 unsigned int emeta_bb; /* Boundary for bb that affects emeta */
dd2a4343
JG
511
512 unsigned int vsc_list_len; /* Length for vsc list */
a4bd217b
JG
513 unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
514 unsigned int blk_bitmap_len; /* Length for block bitmap in line */
515 unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
516
517 unsigned int blk_per_line; /* Number of blocks in a full line */
518 unsigned int sec_per_line; /* Number of sectors in a line */
dd2a4343 519 unsigned int dsec_per_line; /* Number of data sectors in a line */
a4bd217b
JG
520 unsigned int min_blk_line; /* Min. number of good blocks in line */
521
522 unsigned int mid_thrs; /* Threshold for GC mid list */
523 unsigned int high_thrs; /* Threshold for GC high list */
dd2a4343
JG
524
525 unsigned int meta_distance; /* Distance between data and metadata */
a4bd217b
JG
526};
527
528struct pblk_addr_format {
529 u64 ch_mask;
530 u64 lun_mask;
531 u64 pln_mask;
532 u64 blk_mask;
533 u64 pg_mask;
534 u64 sec_mask;
535 u8 ch_offset;
536 u8 lun_offset;
537 u8 pln_offset;
538 u8 blk_offset;
539 u8 pg_offset;
540 u8 sec_offset;
541};
542
588726d3
JG
543enum {
544 PBLK_STATE_RUNNING = 0,
545 PBLK_STATE_STOPPING = 1,
546 PBLK_STATE_RECOVERING = 2,
547 PBLK_STATE_STOPPED = 3,
548};
549
a4bd217b
JG
550struct pblk {
551 struct nvm_tgt_dev *dev;
552 struct gendisk *disk;
553
554 struct kobject kobj;
555
556 struct pblk_lun *luns;
557
558 struct pblk_line *lines; /* Line array */
559 struct pblk_line_mgmt l_mg; /* Line management */
560 struct pblk_line_meta lm; /* Line metadata */
561
562 int ppaf_bitsize;
563 struct pblk_addr_format ppaf;
564
565 struct pblk_rb rwb;
566
588726d3
JG
567 int state; /* pblk line state */
568
a4bd217b
JG
569 int min_write_pgs; /* Minimum amount of pages required by controller */
570 int max_write_pgs; /* Maximum amount of pages supported by controller */
571 int pgs_in_buffer; /* Number of pages that need to be held in buffer to
572 * guarantee successful reads.
573 */
574
575 sector_t capacity; /* Device capacity when bad blocks are subtracted */
576 int over_pct; /* Percentage of device used for over-provisioning */
577
578 /* pblk provisioning values. Used by rate limiter */
579 struct pblk_rl rl;
580
c2e9f5d4 581 int sec_per_write;
a4bd217b
JG
582
583 unsigned char instance_uuid[16];
584#ifdef CONFIG_NVM_DEBUG
585 /* All debug counters apply to 4kb sector I/Os */
586 atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
587 atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
588 atomic_long_t padded_wb; /* Sectors padded in write buffer */
589 atomic_long_t nr_flush; /* Number of flush/fua I/O */
590 atomic_long_t req_writes; /* Sectors stored on write buffer */
591 atomic_long_t sub_writes; /* Sectors submitted from buffer */
592 atomic_long_t sync_writes; /* Sectors synced to media */
a4bd217b 593 atomic_long_t inflight_reads; /* Inflight sector read requests */
db7ada33 594 atomic_long_t cache_reads; /* Read requests that hit the cache */
a4bd217b
JG
595 atomic_long_t sync_reads; /* Completed sector read requests */
596 atomic_long_t recov_writes; /* Sectors submitted from recovery */
597 atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
598 atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
599#endif
600
601 spinlock_t lock;
602
603 atomic_long_t read_failed;
604 atomic_long_t read_empty;
605 atomic_long_t read_high_ecc;
606 atomic_long_t read_failed_gc;
607 atomic_long_t write_failed;
608 atomic_long_t erase_failed;
609
588726d3
JG
610 atomic_t inflight_io; /* General inflight I/O counter */
611
a4bd217b
JG
612 struct task_struct *writer_ts;
613
614 /* Simple translation map of logical addresses to physical addresses.
615 * The logical addresses is known by the host system, while the physical
616 * addresses are used when writing to the disk block device.
617 */
618 unsigned char *trans_map;
619 spinlock_t trans_lock;
620
621 struct list_head compl_list;
622
bd432417 623 mempool_t *page_bio_pool;
b84ae4a8 624 mempool_t *gen_ws_pool;
a4bd217b 625 mempool_t *rec_pool;
0d880398 626 mempool_t *r_rq_pool;
a4bd217b 627 mempool_t *w_rq_pool;
0d880398 628 mempool_t *e_rq_pool;
a4bd217b 629
ef576494
JG
630 struct workqueue_struct *close_wq;
631 struct workqueue_struct *bb_wq;
632
a4bd217b
JG
633 struct timer_list wtimer;
634
635 struct pblk_gc gc;
636};
637
638struct pblk_line_ws {
639 struct pblk *pblk;
640 struct pblk_line *line;
641 void *priv;
642 struct work_struct ws;
643};
644
084ec9ba 645#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
a4bd217b
JG
646#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
647
648/*
649 * pblk ring buffer operations
650 */
651int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
652 unsigned int power_size, unsigned int power_seg_sz);
653unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
654void *pblk_rb_entries_ref(struct pblk_rb *rb);
655int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
656 unsigned int nr_entries, unsigned int *pos);
657int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
658 unsigned int *pos);
659void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
660 struct pblk_w_ctx w_ctx, unsigned int pos);
661void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
d340121e
JG
662 struct pblk_w_ctx w_ctx, struct pblk_line *line,
663 u64 paddr, unsigned int pos);
a4bd217b 664struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
588726d3 665void pblk_rb_flush(struct pblk_rb *rb);
a4bd217b
JG
666
667void pblk_rb_sync_l2p(struct pblk_rb *rb);
d624f371
JG
668unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
669 struct bio *bio, unsigned int pos,
670 unsigned int nr_entries, unsigned int count);
a4bd217b
JG
671unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
672 struct list_head *list,
673 unsigned int max);
674int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
75cb8e93 675 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
a4bd217b
JG
676unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
677
678unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
679unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
680struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
681 struct ppa_addr *ppa);
682void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
683unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
684
685unsigned int pblk_rb_read_count(struct pblk_rb *rb);
ee8d5c1a 686unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
a4bd217b
JG
687unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
688
689int pblk_rb_tear_down_check(struct pblk_rb *rb);
690int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
691void pblk_rb_data_free(struct pblk_rb *rb);
692ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
693
694/*
695 * pblk core
696 */
697struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
c2e9f5d4 698void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
a4bd217b
JG
699int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
700 struct pblk_c_ctx *c_ctx);
701void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
588726d3 702void pblk_wait_for_meta(struct pblk *pblk);
a4bd217b
JG
703struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
704void pblk_discard(struct pblk *pblk, struct bio *bio);
705void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
706void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
707int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
dd2a4343 708int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
a4bd217b
JG
709struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
710 unsigned int nr_secs, unsigned int len,
de54e703 711 int alloc_type, gfp_t gfp_mask);
a4bd217b
JG
712struct pblk_line *pblk_line_get(struct pblk *pblk);
713struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
588726d3 714void pblk_line_replace_data(struct pblk *pblk);
a4bd217b
JG
715int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
716void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
717struct pblk_line *pblk_line_get_data(struct pblk *pblk);
d624f371 718struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
a4bd217b
JG
719int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
720int pblk_line_is_full(struct pblk_line *line);
721void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
dd2a4343 722void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
a4bd217b 723void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
588726d3 724void pblk_line_close_meta_sync(struct pblk *pblk);
dd2a4343 725void pblk_line_close_ws(struct work_struct *work);
588726d3 726void pblk_pipeline_stop(struct pblk *pblk);
a4bd217b 727void pblk_line_mark_bb(struct work_struct *work);
b84ae4a8
JG
728void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
729 void (*work)(struct work_struct *), gfp_t gfp_mask,
730 struct workqueue_struct *wq);
a4bd217b
JG
731u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
732int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
733int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
734 void *emeta_buf);
a4bd217b
JG
735int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
736void pblk_line_put(struct kref *ref);
737struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
dd2a4343
JG
738u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
739void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b 740u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
dd2a4343 741u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
a4bd217b
JG
742int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
743 unsigned long secs_to_flush);
3eaa11e2 744void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
745void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
746 unsigned long *lun_bitmap);
3eaa11e2 747void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
a4bd217b
JG
748void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
749 unsigned long *lun_bitmap);
750void pblk_end_bio_sync(struct bio *bio);
751void pblk_end_io_sync(struct nvm_rq *rqd);
752int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
753 int nr_pages);
a4bd217b
JG
754void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
755 int nr_pages);
756void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
0880a9aa
JG
757void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
758 u64 paddr);
a4bd217b
JG
759void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
760void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
761 struct ppa_addr ppa);
762void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
763 struct ppa_addr ppa, struct ppa_addr entry_line);
764int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
d340121e 765 struct pblk_line *gc_line, u64 paddr);
a4bd217b
JG
766void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
767 u64 *lba_list, int nr_secs);
768void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
769 sector_t blba, int nr_secs);
770
771/*
772 * pblk user I/O write path
773 */
774int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
775 unsigned long flags);
d340121e 776int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
777
778/*
779 * pblk map
780 */
781void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
782 unsigned int sentry, unsigned long *lun_bitmap,
783 unsigned int valid_secs, struct ppa_addr *erase_ppa);
784void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
785 unsigned long *lun_bitmap, unsigned int valid_secs,
786 unsigned int off);
787
788/*
789 * pblk write thread
790 */
791int pblk_write_ts(void *data);
792void pblk_write_timer_fn(unsigned long data);
793void pblk_write_should_kick(struct pblk *pblk);
794
795/*
796 * pblk read path
797 */
b25d5237 798extern struct bio_set *pblk_bio_set;
a4bd217b 799int pblk_submit_read(struct pblk *pblk, struct bio *bio);
d340121e 800int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
a4bd217b
JG
801/*
802 * pblk recovery
803 */
804void pblk_submit_rec(struct work_struct *work);
805struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
588726d3 806int pblk_recov_pad(struct pblk *pblk);
a4bd217b
JG
807__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
808int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
809 struct pblk_rec_ctx *recovery, u64 *comp_bits,
810 unsigned int comp);
811
812/*
813 * pblk gc
814 */
b20ba1bc 815#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
3627896a 816#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
b20ba1bc
JG
817#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
818#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
a4bd217b
JG
819
820int pblk_gc_init(struct pblk *pblk);
821void pblk_gc_exit(struct pblk *pblk);
822void pblk_gc_should_start(struct pblk *pblk);
823void pblk_gc_should_stop(struct pblk *pblk);
b20ba1bc 824void pblk_gc_kick(struct pblk *pblk);
a4bd217b
JG
825void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
826 int *gc_active);
b20ba1bc 827int pblk_gc_sysfs_force(struct pblk *pblk, int force);
a4bd217b
JG
828
829/*
830 * pblk rate limiter
831 */
832void pblk_rl_init(struct pblk_rl *rl, int budget);
833void pblk_rl_free(struct pblk_rl *rl);
b20ba1bc
JG
834int pblk_rl_high_thrs(struct pblk_rl *rl);
835int pblk_rl_low_thrs(struct pblk_rl *rl);
a4bd217b
JG
836unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
837int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
588726d3 838void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
a4bd217b
JG
839void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
840int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
841void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
842void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
a4bd217b 843int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
da67e68f 844int pblk_rl_max_io(struct pblk_rl *rl);
a4bd217b
JG
845void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
846void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
588726d3
JG
847void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
848int pblk_rl_is_limit(struct pblk_rl *rl);
a4bd217b
JG
849
850/*
851 * pblk sysfs
852 */
853int pblk_sysfs_init(struct gendisk *tdisk);
854void pblk_sysfs_exit(struct gendisk *tdisk);
855
856static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
857{
858 if (type == PBLK_KMALLOC_META)
859 return kmalloc(size, flags);
860 return vmalloc(size);
861}
862
863static inline void pblk_mfree(void *ptr, int type)
864{
865 if (type == PBLK_KMALLOC_META)
866 kfree(ptr);
867 else
868 vfree(ptr);
869}
870
871static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
872{
873 return c_ctx - sizeof(struct nvm_rq);
874}
875
dd2a4343
JG
876static inline void *emeta_to_bb(struct line_emeta *emeta)
877{
878 return emeta->bb_bitmap;
879}
880
881static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
882{
883 return ((void *)emeta + pblk->lm.emeta_len[1]);
884}
885
886static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
a4bd217b 887{
dd2a4343 888 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
a4bd217b
JG
889}
890
b20ba1bc
JG
891static inline int pblk_line_vsc(struct pblk_line *line)
892{
d340121e 893 return le32_to_cpu(*line->vsc);
b20ba1bc
JG
894}
895
a4bd217b
JG
896#define NVM_MEM_PAGE_WRITE (8)
897
898static inline int pblk_pad_distance(struct pblk *pblk)
899{
900 struct nvm_tgt_dev *dev = pblk->dev;
901 struct nvm_geo *geo = &dev->geo;
902
903 return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
904}
905
906static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
907{
908 return p.g.blk;
909}
910
911static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
912{
913 return p.g.blk;
914}
915
916static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
917{
918 return p.g.lun * geo->nr_chnls + p.g.ch;
919}
920
921/* A block within a line corresponds to the lun */
922static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
923{
924 return p.g.lun * geo->nr_chnls + p.g.ch;
925}
926
927static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
928{
929 struct ppa_addr ppa64;
930
931 ppa64.ppa = 0;
932
933 if (ppa32 == -1) {
934 ppa64.ppa = ADDR_EMPTY;
935 } else if (ppa32 & (1U << 31)) {
936 ppa64.c.line = ppa32 & ((~0U) >> 1);
937 ppa64.c.is_cached = 1;
938 } else {
939 ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
940 pblk->ppaf.blk_offset;
941 ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
942 pblk->ppaf.pg_offset;
943 ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
944 pblk->ppaf.lun_offset;
945 ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
946 pblk->ppaf.ch_offset;
947 ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
948 pblk->ppaf.pln_offset;
949 ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
950 pblk->ppaf.sec_offset;
951 }
952
953 return ppa64;
954}
955
956static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
957 sector_t lba)
958{
959 struct ppa_addr ppa;
960
961 if (pblk->ppaf_bitsize < 32) {
962 u32 *map = (u32 *)pblk->trans_map;
963
964 ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
965 } else {
966 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
967
968 ppa = map[lba];
969 }
970
971 return ppa;
972}
973
974static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
975{
976 u32 ppa32 = 0;
977
978 if (ppa64.ppa == ADDR_EMPTY) {
979 ppa32 = ~0U;
980 } else if (ppa64.c.is_cached) {
981 ppa32 |= ppa64.c.line;
982 ppa32 |= 1U << 31;
983 } else {
984 ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
985 ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
986 ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
987 ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
988 ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
989 ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
990 }
991
992 return ppa32;
993}
994
995static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
996 struct ppa_addr ppa)
997{
998 if (pblk->ppaf_bitsize < 32) {
999 u32 *map = (u32 *)pblk->trans_map;
1000
1001 map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1002 } else {
1003 u64 *map = (u64 *)pblk->trans_map;
1004
1005 map[lba] = ppa.ppa;
1006 }
1007}
1008
1009static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1010 struct ppa_addr p)
1011{
1012 u64 paddr;
1013
1014 paddr = 0;
1015 paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
1016 paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
1017 paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
1018 paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
1019 paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
1020
1021 return paddr;
1022}
1023
1024static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1025{
1026 return (ppa_addr.ppa == ADDR_EMPTY);
1027}
1028
1029static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1030{
1031 ppa_addr->ppa = ADDR_EMPTY;
1032}
1033
07698466
JG
1034static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1035{
1036 if (lppa.ppa == rppa.ppa)
1037 return true;
1038
1039 return false;
1040}
1041
a4bd217b
JG
1042static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1043{
1044 return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1045}
1046
1047static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1048{
1049 return ppa.c.line;
1050}
1051
1052static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1053{
1054 struct ppa_addr p;
1055
1056 p.c.line = addr;
1057 p.c.is_cached = 1;
1058
1059 return p;
1060}
1061
1062static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1063 u64 line_id)
1064{
1065 struct ppa_addr ppa;
1066
1067 ppa.ppa = 0;
1068 ppa.g.blk = line_id;
1069 ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
1070 ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
1071 ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
1072 ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
1073 ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
1074
1075 return ppa;
1076}
1077
1078static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
1079 u64 line_id)
1080{
1081 struct ppa_addr ppa;
1082
1083 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
1084
1085 return ppa;
1086}
1087
1088static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
dd2a4343 1089 struct line_header *header)
a4bd217b
JG
1090{
1091 u32 crc = ~(u32)0;
1092
dd2a4343 1093 crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
a4bd217b
JG
1094 sizeof(struct line_header) - sizeof(crc));
1095
1096 return crc;
1097}
1098
1099static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1100 struct line_smeta *smeta)
1101{
1102 struct pblk_line_meta *lm = &pblk->lm;
1103 u32 crc = ~(u32)0;
1104
1105 crc = crc32_le(crc, (unsigned char *)smeta +
1106 sizeof(struct line_header) + sizeof(crc),
1107 lm->smeta_len -
1108 sizeof(struct line_header) - sizeof(crc));
1109
1110 return crc;
1111}
1112
1113static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1114 struct line_emeta *emeta)
1115{
1116 struct pblk_line_meta *lm = &pblk->lm;
1117 u32 crc = ~(u32)0;
1118
1119 crc = crc32_le(crc, (unsigned char *)emeta +
1120 sizeof(struct line_header) + sizeof(crc),
dd2a4343 1121 lm->emeta_len[0] -
a4bd217b
JG
1122 sizeof(struct line_header) - sizeof(crc));
1123
1124 return crc;
1125}
1126
1127static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1128{
1129 struct nvm_tgt_dev *dev = pblk->dev;
1130 struct nvm_geo *geo = &dev->geo;
1131 int flags;
1132
1133 flags = geo->plane_mode >> 1;
1134
1135 if (type == WRITE)
1136 flags |= NVM_IO_SCRAMBLE_ENABLE;
1137
1138 return flags;
1139}
1140
f9c10152
JG
1141enum {
1142 PBLK_READ_RANDOM = 0,
1143 PBLK_READ_SEQUENTIAL = 1,
1144};
1145
1146static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1147{
1148 struct nvm_tgt_dev *dev = pblk->dev;
1149 struct nvm_geo *geo = &dev->geo;
1150 int flags;
1151
1152 flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1153 if (type == PBLK_READ_SEQUENTIAL)
1154 flags |= geo->plane_mode >> 1;
1155
1156 return flags;
1157}
1158
1159static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
a4bd217b 1160{
f9c10152 1161 return !(nr_secs % pblk->min_write_pgs);
a4bd217b
JG
1162}
1163
1164#ifdef CONFIG_NVM_DEBUG
1165static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
1166{
1167 if (p->c.is_cached) {
1168 pr_err("ppa: (%s: %x) cache line: %llu\n",
1169 msg, error, (u64)p->c.line);
1170 } else {
1171 pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1172 msg, error,
1173 p->g.ch, p->g.lun, p->g.blk,
1174 p->g.pg, p->g.pl, p->g.sec);
1175 }
1176}
1177
1178static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1179 int error)
1180{
1181 int bit = -1;
1182
1183 if (rqd->nr_ppas == 1) {
1184 print_ppa(&rqd->ppa_addr, "rqd", error);
1185 return;
1186 }
1187
1188 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1189 bit + 1)) < rqd->nr_ppas) {
1190 print_ppa(&rqd->ppa_list[bit], "rqd", error);
1191 }
1192
1193 pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1194}
1195#endif
1196
1197static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1198 struct ppa_addr *ppas, int nr_ppas)
1199{
1200 struct nvm_geo *geo = &tgt_dev->geo;
1201 struct ppa_addr *ppa;
1202 int i;
1203
1204 for (i = 0; i < nr_ppas; i++) {
1205 ppa = &ppas[i];
1206
1207 if (!ppa->c.is_cached &&
1208 ppa->g.ch < geo->nr_chnls &&
1209 ppa->g.lun < geo->luns_per_chnl &&
1210 ppa->g.pl < geo->nr_planes &&
1211 ppa->g.blk < geo->blks_per_lun &&
1212 ppa->g.pg < geo->pgs_per_blk &&
1213 ppa->g.sec < geo->sec_per_pg)
1214 continue;
1215
1216#ifdef CONFIG_NVM_DEBUG
1217 print_ppa(ppa, "boundary", i);
1218#endif
1219 return 1;
1220 }
1221 return 0;
1222}
1223
1224static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1225{
1226 struct pblk_line_meta *lm = &pblk->lm;
1227
1228 if (paddr > lm->sec_per_line)
1229 return 1;
1230
1231 return 0;
1232}
1233
1234static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1235{
1236 return bio->bi_iter.bi_idx;
1237}
1238
1239static inline sector_t pblk_get_lba(struct bio *bio)
1240{
1241 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1242}
1243
1244static inline unsigned int pblk_get_secs(struct bio *bio)
1245{
1246 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1247}
1248
1249static inline sector_t pblk_get_sector(sector_t lba)
1250{
1251 return lba * NR_PHY_IN_LOG;
1252}
1253
1254static inline void pblk_setup_uuid(struct pblk *pblk)
1255{
1256 uuid_le uuid;
1257
1258 uuid_le_gen(&uuid);
1259 memcpy(pblk->instance_uuid, uuid.b, 16);
1260}
1261#endif /* PBLK_H_ */