]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/lightnvm.h
netfilter: nft_ct: add zone id set support
[mirror_ubuntu-artful-kernel.git] / include / linux / lightnvm.h
1 #ifndef NVM_H
2 #define NVM_H
3
4 #include <linux/blkdev.h>
5 #include <linux/types.h>
6 #include <uapi/linux/lightnvm.h>
7
8 enum {
9 NVM_IO_OK = 0,
10 NVM_IO_REQUEUE = 1,
11 NVM_IO_DONE = 2,
12 NVM_IO_ERR = 3,
13
14 NVM_IOTYPE_NONE = 0,
15 NVM_IOTYPE_GC = 1,
16 };
17
18 #define NVM_BLK_BITS (16)
19 #define NVM_PG_BITS (16)
20 #define NVM_SEC_BITS (8)
21 #define NVM_PL_BITS (8)
22 #define NVM_LUN_BITS (8)
23 #define NVM_CH_BITS (7)
24
25 struct ppa_addr {
26 /* Generic structure for all addresses */
27 union {
28 struct {
29 u64 blk : NVM_BLK_BITS;
30 u64 pg : NVM_PG_BITS;
31 u64 sec : NVM_SEC_BITS;
32 u64 pl : NVM_PL_BITS;
33 u64 lun : NVM_LUN_BITS;
34 u64 ch : NVM_CH_BITS;
35 u64 reserved : 1;
36 } g;
37
38 struct {
39 u64 line : 63;
40 u64 is_cached : 1;
41 } c;
42
43 u64 ppa;
44 };
45 };
46
47 struct nvm_rq;
48 struct nvm_id;
49 struct nvm_dev;
50 struct nvm_tgt_dev;
51
52 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
53 typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
54 typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
55 nvm_l2p_update_fn *, void *);
56 typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
57 typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
58 typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
59 typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
60 typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
61 typedef void (nvm_destroy_dma_pool_fn)(void *);
62 typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
63 dma_addr_t *);
64 typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
65
66 struct nvm_dev_ops {
67 nvm_id_fn *identity;
68 nvm_get_l2p_tbl_fn *get_l2p_tbl;
69 nvm_op_bb_tbl_fn *get_bb_tbl;
70 nvm_op_set_bb_fn *set_bb_tbl;
71
72 nvm_submit_io_fn *submit_io;
73 nvm_erase_blk_fn *erase_block;
74
75 nvm_create_dma_pool_fn *create_dma_pool;
76 nvm_destroy_dma_pool_fn *destroy_dma_pool;
77 nvm_dev_dma_alloc_fn *dev_dma_alloc;
78 nvm_dev_dma_free_fn *dev_dma_free;
79
80 unsigned int max_phys_sect;
81 };
82
83
84
85 #ifdef CONFIG_NVM
86
87 #include <linux/blkdev.h>
88 #include <linux/file.h>
89 #include <linux/dmapool.h>
90 #include <uapi/linux/lightnvm.h>
91
92 enum {
93 /* HW Responsibilities */
94 NVM_RSP_L2P = 1 << 0,
95 NVM_RSP_ECC = 1 << 1,
96
97 /* Physical Adressing Mode */
98 NVM_ADDRMODE_LINEAR = 0,
99 NVM_ADDRMODE_CHANNEL = 1,
100
101 /* Plane programming mode for LUN */
102 NVM_PLANE_SINGLE = 1,
103 NVM_PLANE_DOUBLE = 2,
104 NVM_PLANE_QUAD = 4,
105
106 /* Status codes */
107 NVM_RSP_SUCCESS = 0x0,
108 NVM_RSP_NOT_CHANGEABLE = 0x1,
109 NVM_RSP_ERR_FAILWRITE = 0x40ff,
110 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
111 NVM_RSP_ERR_FAILECC = 0x4281,
112 NVM_RSP_WARN_HIGHECC = 0x4700,
113
114 /* Device opcodes */
115 NVM_OP_HBREAD = 0x02,
116 NVM_OP_HBWRITE = 0x81,
117 NVM_OP_PWRITE = 0x91,
118 NVM_OP_PREAD = 0x92,
119 NVM_OP_ERASE = 0x90,
120
121 /* PPA Command Flags */
122 NVM_IO_SNGL_ACCESS = 0x0,
123 NVM_IO_DUAL_ACCESS = 0x1,
124 NVM_IO_QUAD_ACCESS = 0x2,
125
126 /* NAND Access Modes */
127 NVM_IO_SUSPEND = 0x80,
128 NVM_IO_SLC_MODE = 0x100,
129 NVM_IO_SCRAMBLE_DISABLE = 0x200,
130
131 /* Block Types */
132 NVM_BLK_T_FREE = 0x0,
133 NVM_BLK_T_BAD = 0x1,
134 NVM_BLK_T_GRWN_BAD = 0x2,
135 NVM_BLK_T_DEV = 0x4,
136 NVM_BLK_T_HOST = 0x8,
137
138 /* Memory capabilities */
139 NVM_ID_CAP_SLC = 0x1,
140 NVM_ID_CAP_CMD_SUSPEND = 0x2,
141 NVM_ID_CAP_SCRAMBLE = 0x4,
142 NVM_ID_CAP_ENCRYPT = 0x8,
143
144 /* Memory types */
145 NVM_ID_FMTYPE_SLC = 0,
146 NVM_ID_FMTYPE_MLC = 1,
147
148 /* Device capabilities */
149 NVM_ID_DCAP_BBLKMGMT = 0x1,
150 NVM_UD_DCAP_ECC = 0x2,
151 };
152
153 struct nvm_id_lp_mlc {
154 u16 num_pairs;
155 u8 pairs[886];
156 };
157
158 struct nvm_id_lp_tbl {
159 __u8 id[8];
160 struct nvm_id_lp_mlc mlc;
161 };
162
163 struct nvm_id_group {
164 u8 mtype;
165 u8 fmtype;
166 u8 num_ch;
167 u8 num_lun;
168 u8 num_pln;
169 u16 num_blk;
170 u16 num_pg;
171 u16 fpg_sz;
172 u16 csecs;
173 u16 sos;
174 u32 trdt;
175 u32 trdm;
176 u32 tprt;
177 u32 tprm;
178 u32 tbet;
179 u32 tbem;
180 u32 mpos;
181 u32 mccap;
182 u16 cpar;
183
184 struct nvm_id_lp_tbl lptbl;
185 };
186
187 struct nvm_addr_format {
188 u8 ch_offset;
189 u8 ch_len;
190 u8 lun_offset;
191 u8 lun_len;
192 u8 pln_offset;
193 u8 pln_len;
194 u8 blk_offset;
195 u8 blk_len;
196 u8 pg_offset;
197 u8 pg_len;
198 u8 sect_offset;
199 u8 sect_len;
200 };
201
202 struct nvm_id {
203 u8 ver_id;
204 u8 vmnt;
205 u8 cgrps;
206 u32 cap;
207 u32 dom;
208 struct nvm_addr_format ppaf;
209 struct nvm_id_group groups[4];
210 } __packed;
211
212 struct nvm_target {
213 struct list_head list;
214 struct nvm_tgt_dev *dev;
215 struct nvm_tgt_type *type;
216 struct gendisk *disk;
217 };
218
219 struct nvm_tgt_instance {
220 struct nvm_tgt_type *tt;
221 };
222
223 #define ADDR_EMPTY (~0ULL)
224
225 #define NVM_VERSION_MAJOR 1
226 #define NVM_VERSION_MINOR 0
227 #define NVM_VERSION_PATCH 0
228
229 struct nvm_rq;
230 typedef void (nvm_end_io_fn)(struct nvm_rq *);
231
232 struct nvm_rq {
233 struct nvm_tgt_instance *ins;
234 struct nvm_tgt_dev *dev;
235
236 struct bio *bio;
237
238 union {
239 struct ppa_addr ppa_addr;
240 dma_addr_t dma_ppa_list;
241 };
242
243 struct ppa_addr *ppa_list;
244
245 void *meta_list;
246 dma_addr_t dma_meta_list;
247
248 struct completion *wait;
249 nvm_end_io_fn *end_io;
250
251 uint8_t opcode;
252 uint16_t nr_ppas;
253 uint16_t flags;
254
255 u64 ppa_status; /* ppa media status */
256 int error;
257 };
258
259 static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
260 {
261 return pdu - sizeof(struct nvm_rq);
262 }
263
264 static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
265 {
266 return rqdata + 1;
267 }
268
269 enum {
270 NVM_BLK_ST_FREE = 0x1, /* Free block */
271 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
272 NVM_BLK_ST_BAD = 0x8, /* Bad block */
273 };
274
275 /* system block cpu representation */
276 struct nvm_sb_info {
277 unsigned long seqnr;
278 unsigned long erase_cnt;
279 unsigned int version;
280 char mmtype[NVM_MMTYPE_LEN];
281 struct ppa_addr fs_ppa;
282 };
283
284 /* Device generic information */
285 struct nvm_geo {
286 int nr_chnls;
287 int nr_luns;
288 int luns_per_chnl; /* -1 if channels are not symmetric */
289 int nr_planes;
290 int sec_per_pg; /* only sectors for a single page */
291 int pgs_per_blk;
292 int blks_per_lun;
293 int fpg_size;
294 int pfpg_size; /* size of buffer if all pages are to be read */
295 int sec_size;
296 int oob_size;
297 int mccap;
298 struct nvm_addr_format ppaf;
299
300 /* Calculated/Cached values. These do not reflect the actual usable
301 * blocks at run-time.
302 */
303 int max_rq_size;
304 int plane_mode; /* drive device in single, double or quad mode */
305
306 int sec_per_pl; /* all sectors across planes */
307 int sec_per_blk;
308 int sec_per_lun;
309 };
310
311 struct nvm_tgt_dev {
312 /* Device information */
313 struct nvm_geo geo;
314
315 /* Base ppas for target LUNs */
316 struct ppa_addr *luns;
317
318 sector_t total_secs;
319
320 struct nvm_id identity;
321 struct request_queue *q;
322
323 struct nvm_dev *parent;
324 void *map;
325 };
326
327 struct nvm_dev {
328 struct nvm_dev_ops *ops;
329
330 struct list_head devices;
331
332 /* Media manager */
333 struct nvmm_type *mt;
334 void *mp;
335
336 /* System blocks */
337 struct nvm_sb_info sb;
338
339 /* Device information */
340 struct nvm_geo geo;
341
342 /* lower page table */
343 int lps_per_blk;
344 int *lptbl;
345
346 unsigned long total_secs;
347
348 unsigned long *lun_map;
349 void *dma_pool;
350
351 struct nvm_id identity;
352
353 /* Backend device */
354 struct request_queue *q;
355 char name[DISK_NAME_LEN];
356 void *private_data;
357
358 void *rmap;
359
360 struct mutex mlock;
361 spinlock_t lock;
362 };
363
364 static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
365 u64 pba)
366 {
367 struct ppa_addr l;
368 int secs, pgs, blks, luns;
369 sector_t ppa = pba;
370
371 l.ppa = 0;
372
373 div_u64_rem(ppa, geo->sec_per_pg, &secs);
374 l.g.sec = secs;
375
376 sector_div(ppa, geo->sec_per_pg);
377 div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
378 l.g.pg = pgs;
379
380 sector_div(ppa, geo->pgs_per_blk);
381 div_u64_rem(ppa, geo->blks_per_lun, &blks);
382 l.g.blk = blks;
383
384 sector_div(ppa, geo->blks_per_lun);
385 div_u64_rem(ppa, geo->luns_per_chnl, &luns);
386 l.g.lun = luns;
387
388 sector_div(ppa, geo->luns_per_chnl);
389 l.g.ch = ppa;
390
391 return l;
392 }
393
394 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
395 struct ppa_addr r)
396 {
397 struct nvm_geo *geo = &dev->geo;
398 struct ppa_addr l;
399
400 l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
401 l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
402 l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
403 l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
404 l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
405 l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
406
407 return l;
408 }
409
410 static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
411 struct ppa_addr r)
412 {
413 struct nvm_geo *geo = &dev->geo;
414 struct ppa_addr l;
415
416 l.ppa = 0;
417 /*
418 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
419 */
420 l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
421 (((1 << geo->ppaf.blk_len) - 1));
422 l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
423 (((1 << geo->ppaf.pg_len) - 1));
424 l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
425 (((1 << geo->ppaf.sect_len) - 1));
426 l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
427 (((1 << geo->ppaf.pln_len) - 1));
428 l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
429 (((1 << geo->ppaf.lun_len) - 1));
430 l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
431 (((1 << geo->ppaf.ch_len) - 1));
432
433 return l;
434 }
435
436 static inline int ppa_empty(struct ppa_addr ppa_addr)
437 {
438 return (ppa_addr.ppa == ADDR_EMPTY);
439 }
440
441 static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
442 {
443 ppa_addr->ppa = ADDR_EMPTY;
444 }
445
446 static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
447 {
448 if (ppa_empty(ppa1) || ppa_empty(ppa2))
449 return 0;
450
451 return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
452 (ppa1.g.blk == ppa2.g.blk));
453 }
454
455 static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
456 {
457 return dev->lptbl[slc_pg];
458 }
459
460 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
461 typedef sector_t (nvm_tgt_capacity_fn)(void *);
462 typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
463 typedef void (nvm_tgt_exit_fn)(void *);
464
465 struct nvm_tgt_type {
466 const char *name;
467 unsigned int version[3];
468
469 /* target entry points */
470 nvm_tgt_make_rq_fn *make_rq;
471 nvm_tgt_capacity_fn *capacity;
472 nvm_end_io_fn *end_io;
473
474 /* module-specific init/teardown */
475 nvm_tgt_init_fn *init;
476 nvm_tgt_exit_fn *exit;
477
478 /* For internal use */
479 struct list_head list;
480 };
481
482 extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
483
484 extern int nvm_register_tgt_type(struct nvm_tgt_type *);
485 extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
486
487 extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
488 extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
489
490 typedef int (nvmm_register_fn)(struct nvm_dev *);
491 typedef void (nvmm_unregister_fn)(struct nvm_dev *);
492
493 typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
494 typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
495 typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
496 typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
497 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
498 typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
499 typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *,
500 struct ppa_addr, int);
501 typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
502
503 enum {
504 TRANS_TGT_TO_DEV = 0x0,
505 TRANS_DEV_TO_TGT = 0x1,
506 };
507
508 struct nvmm_type {
509 const char *name;
510 unsigned int version[3];
511
512 nvmm_register_fn *register_mgr;
513 nvmm_unregister_fn *unregister_mgr;
514
515 nvmm_create_tgt_fn *create_tgt;
516 nvmm_remove_tgt_fn *remove_tgt;
517
518 nvmm_submit_io_fn *submit_io;
519 nvmm_erase_blk_fn *erase_blk;
520
521 nvmm_get_area_fn *get_area;
522 nvmm_put_area_fn *put_area;
523
524 nvmm_trans_ppa_fn *trans_ppa;
525 nvmm_part_to_tgt_fn *part_to_tgt;
526
527 struct list_head list;
528 };
529
530 extern int nvm_register_mgr(struct nvmm_type *);
531 extern void nvm_unregister_mgr(struct nvmm_type *);
532
533 extern struct nvm_dev *nvm_alloc_dev(int);
534 extern int nvm_register(struct nvm_dev *);
535 extern void nvm_unregister(struct nvm_dev *);
536
537 extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int);
538 extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
539 int, int);
540 extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
541 extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
542 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
543 extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
544 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
545 const struct ppa_addr *, int, int);
546 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
547 extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
548 extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
549 extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
550 void *);
551 extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
552 extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
553 extern void nvm_end_io(struct nvm_rq *, int);
554 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
555 void *, int);
556 extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
557 int, void *, int);
558 extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
559 extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
560 extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
561
562 /* sysblk.c */
563 #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
564
565 /* system block on disk representation */
566 struct nvm_system_block {
567 __be32 magic; /* magic signature */
568 __be32 seqnr; /* sequence number */
569 __be32 erase_cnt; /* erase count */
570 __be16 version; /* version number */
571 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
572 __be64 fs_ppa; /* PPA for media manager
573 * superblock */
574 };
575
576 extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
577 extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
578 extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
579
580 extern int nvm_dev_factory(struct nvm_dev *, int flags);
581
582 #define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \
583 for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \
584 (chid)++, (ppa).g.ch = (chid)) \
585 for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \
586 (lunid)++, (ppa).g.lun = (lunid))
587
588 #else /* CONFIG_NVM */
589 struct nvm_dev_ops;
590
591 static inline struct nvm_dev *nvm_alloc_dev(int node)
592 {
593 return ERR_PTR(-EINVAL);
594 }
595 static inline int nvm_register(struct nvm_dev *dev)
596 {
597 return -EINVAL;
598 }
599 static inline void nvm_unregister(struct nvm_dev *dev) {}
600 #endif /* CONFIG_NVM */
601 #endif /* LIGHTNVM.H */