2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 #include <linux/lightnvm.h>
22 #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
28 /* A row is a collection of flash blocks for a system block. */
31 int act_blk
[MAX_SYSBLKS
];
34 struct ppa_addr ppas
[MAX_SYSBLKS
* MAX_BLKS_PR_SYSBLK
];/* all sysblks */
37 static inline int scan_ppa_idx(int row
, int blkid
)
39 return (row
* MAX_BLKS_PR_SYSBLK
) + blkid
;
42 void nvm_sysblk_to_cpu(struct nvm_sb_info
*info
, struct nvm_system_block
*sb
)
44 info
->seqnr
= be32_to_cpu(sb
->seqnr
);
45 info
->erase_cnt
= be32_to_cpu(sb
->erase_cnt
);
46 info
->version
= be16_to_cpu(sb
->version
);
47 strncpy(info
->mmtype
, sb
->mmtype
, NVM_MMTYPE_LEN
);
48 info
->fs_ppa
.ppa
= be64_to_cpu(sb
->fs_ppa
);
51 void nvm_cpu_to_sysblk(struct nvm_system_block
*sb
, struct nvm_sb_info
*info
)
53 sb
->magic
= cpu_to_be32(NVM_SYSBLK_MAGIC
);
54 sb
->seqnr
= cpu_to_be32(info
->seqnr
);
55 sb
->erase_cnt
= cpu_to_be32(info
->erase_cnt
);
56 sb
->version
= cpu_to_be16(info
->version
);
57 strncpy(sb
->mmtype
, info
->mmtype
, NVM_MMTYPE_LEN
);
58 sb
->fs_ppa
= cpu_to_be64(info
->fs_ppa
.ppa
);
61 static int nvm_setup_sysblks(struct nvm_dev
*dev
, struct ppa_addr
*sysblk_ppas
)
63 int nr_rows
= min_t(int, MAX_SYSBLKS
, dev
->nr_chnls
);
66 for (i
= 0; i
< nr_rows
; i
++)
67 sysblk_ppas
[i
].ppa
= 0;
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
72 switch (dev
->nr_chnls
) {
74 sysblk_ppas
[1].g
.ch
= 1;
77 sysblk_ppas
[0].g
.ch
= 0;
80 sysblk_ppas
[0].g
.ch
= 0;
81 sysblk_ppas
[1].g
.ch
= dev
->nr_chnls
/ 2;
82 sysblk_ppas
[2].g
.ch
= dev
->nr_chnls
- 1;
89 void nvm_setup_sysblk_scan(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
90 struct ppa_addr
*sysblk_ppas
)
92 memset(s
, 0, sizeof(struct sysblk_scan
));
93 s
->nr_rows
= nvm_setup_sysblks(dev
, sysblk_ppas
);
96 static int sysblk_get_host_blks(struct ppa_addr ppa
, int nr_blks
, u8
*blks
,
99 struct sysblk_scan
*s
= private;
100 int i
, nr_sysblk
= 0;
102 for (i
= 0; i
< nr_blks
; i
++) {
103 if (blks
[i
] != NVM_BLK_T_HOST
)
106 if (s
->nr_ppas
== MAX_BLKS_PR_SYSBLK
* MAX_SYSBLKS
) {
107 pr_err("nvm: too many host blks\n");
113 s
->ppas
[scan_ppa_idx(s
->row
, nr_sysblk
)] = ppa
;
121 static int nvm_get_all_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
122 struct ppa_addr
*ppas
, nvm_bb_update_fn
*fn
)
124 struct ppa_addr dppa
;
129 for (i
= 0; i
< s
->nr_rows
; i
++) {
130 dppa
= generic_to_dev_addr(dev
, ppas
[i
]);
133 ret
= dev
->ops
->get_bb_tbl(dev
, dppa
, dev
->blks_per_lun
, fn
, s
);
135 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
146 * scans a block for latest sysblk.
148 * 0 - newer sysblk not found. PPA is updated to latest page.
149 * 1 - newer sysblk found and stored in *cur. PPA is updated to
153 static int nvm_scan_block(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
154 struct nvm_system_block
*sblk
)
156 struct nvm_system_block
*cur
;
157 int pg
, ret
, found
= 0;
159 /* the full buffer for a flash page is allocated. Only the first of it
160 * contains the system block information
162 cur
= kmalloc(dev
->pfpg_size
, GFP_KERNEL
);
166 /* perform linear scan through the block */
167 for (pg
= 0; pg
< dev
->lps_per_blk
; pg
++) {
168 ppa
->g
.pg
= ppa_to_slc(dev
, pg
);
170 ret
= nvm_submit_ppa(dev
, ppa
, 1, NVM_OP_PREAD
, NVM_IO_SLC_MODE
,
171 cur
, dev
->pfpg_size
);
173 if (ret
== NVM_RSP_ERR_EMPTYPAGE
) {
174 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
181 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
187 break; /* if we can't read a page, continue to the
192 if (be32_to_cpu(cur
->magic
) != NVM_SYSBLK_MAGIC
) {
193 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
198 break; /* last valid page already found */
201 if (be32_to_cpu(cur
->seqnr
) < be32_to_cpu(sblk
->seqnr
))
204 memcpy(sblk
, cur
, sizeof(struct nvm_system_block
));
213 static int nvm_set_bb_tbl(struct nvm_dev
*dev
, struct sysblk_scan
*s
, int type
)
218 if (s
->nr_ppas
> dev
->ops
->max_phys_sect
) {
219 pr_err("nvm: unable to update all sysblocks atomically\n");
223 memset(&rqd
, 0, sizeof(struct nvm_rq
));
225 nvm_set_rqd_ppalist(dev
, &rqd
, s
->ppas
, s
->nr_ppas
);
226 nvm_generic_to_addr_mode(dev
, &rqd
);
228 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
, type
);
229 nvm_free_rqd_ppalist(dev
, &rqd
);
231 pr_err("nvm: sysblk failed bb mark\n");
238 static int sysblk_get_free_blks(struct ppa_addr ppa
, int nr_blks
, u8
*blks
,
241 struct sysblk_scan
*s
= private;
242 struct ppa_addr
*sppa
;
245 for (i
= 0; i
< nr_blks
; i
++) {
246 if (blks
[i
] == NVM_BLK_T_HOST
)
249 if (blks
[i
] != NVM_BLK_T_FREE
)
252 sppa
= &s
->ppas
[scan_ppa_idx(s
->row
, blkid
)];
253 sppa
->g
.ch
= ppa
.g
.ch
;
254 sppa
->g
.lun
= ppa
.g
.lun
;
259 pr_debug("nvm: use (%u %u %u) as sysblk\n",
260 sppa
->g
.ch
, sppa
->g
.lun
, sppa
->g
.blk
);
261 if (blkid
> MAX_BLKS_PR_SYSBLK
- 1)
265 pr_err("nvm: sysblk failed get sysblk\n");
269 static int nvm_write_and_verify(struct nvm_dev
*dev
, struct nvm_sb_info
*info
,
270 struct sysblk_scan
*s
)
272 struct nvm_system_block nvmsb
;
274 int i
, sect
, ret
= 0;
275 struct ppa_addr
*ppas
;
277 nvm_cpu_to_sysblk(&nvmsb
, info
);
279 buf
= kzalloc(dev
->pfpg_size
, GFP_KERNEL
);
282 memcpy(buf
, &nvmsb
, sizeof(struct nvm_system_block
));
284 ppas
= kcalloc(dev
->sec_per_pg
, sizeof(struct ppa_addr
), GFP_KERNEL
);
290 /* Write and verify */
291 for (i
= 0; i
< s
->nr_rows
; i
++) {
292 ppas
[0] = s
->ppas
[scan_ppa_idx(i
, s
->act_blk
[i
])];
294 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
300 /* Expand to all sectors within a flash page */
301 if (dev
->sec_per_pg
> 1) {
302 for (sect
= 1; sect
< dev
->sec_per_pg
; sect
++) {
303 ppas
[sect
].ppa
= ppas
[0].ppa
;
304 ppas
[sect
].g
.sec
= sect
;
308 ret
= nvm_submit_ppa(dev
, ppas
, dev
->sec_per_pg
, NVM_OP_PWRITE
,
309 NVM_IO_SLC_MODE
, buf
, dev
->pfpg_size
);
311 pr_err("nvm: sysblk failed program (%u %u %u)\n",
318 ret
= nvm_submit_ppa(dev
, ppas
, dev
->sec_per_pg
, NVM_OP_PREAD
,
319 NVM_IO_SLC_MODE
, buf
, dev
->pfpg_size
);
321 pr_err("nvm: sysblk failed read (%u %u %u)\n",
328 if (memcmp(buf
, &nvmsb
, sizeof(struct nvm_system_block
))) {
329 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
345 static int nvm_prepare_new_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
)
348 unsigned long nxt_blk
;
349 struct ppa_addr
*ppa
;
351 for (i
= 0; i
< s
->nr_rows
; i
++) {
352 nxt_blk
= (s
->act_blk
[i
] + 1) % MAX_BLKS_PR_SYSBLK
;
353 ppa
= &s
->ppas
[scan_ppa_idx(i
, nxt_blk
)];
354 ppa
->g
.pg
= ppa_to_slc(dev
, 0);
356 ret
= nvm_erase_ppa(dev
, ppa
, 1);
360 s
->act_blk
[i
] = nxt_blk
;
366 int nvm_get_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
368 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
369 struct sysblk_scan s
;
370 struct nvm_system_block
*cur
;
375 * 1. setup sysblk locations
376 * 2. get bad block list
377 * 3. filter on host-specific (type 3)
378 * 4. iterate through all and find the highest seq nr.
379 * 5. return superblock information
382 if (!dev
->ops
->get_bb_tbl
)
385 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
387 mutex_lock(&dev
->mlock
);
388 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_host_blks
);
392 /* no sysblocks initialized */
396 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
400 /* find the latest block across all sysblocks */
401 for (i
= 0; i
< s
.nr_rows
; i
++) {
402 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
403 struct ppa_addr ppa
= s
.ppas
[scan_ppa_idx(i
, j
)];
405 ret
= nvm_scan_block(dev
, &ppa
, cur
);
413 nvm_sysblk_to_cpu(info
, cur
);
417 mutex_unlock(&dev
->mlock
);
424 int nvm_update_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*new)
426 /* 1. for each latest superblock
428 * a. write new flash page entry with the updated information
430 * a. find next available block on lun (linear search)
431 * if none, continue to next lun
432 * if none at all, report error. also report that it wasn't
433 * possible to write to all superblocks.
434 * c. write data to block.
436 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
437 struct sysblk_scan s
;
438 struct nvm_system_block
*cur
;
439 int i
, j
, ppaidx
, found
= 0;
442 if (!dev
->ops
->get_bb_tbl
)
445 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
447 mutex_lock(&dev
->mlock
);
448 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_host_blks
);
452 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
456 /* Get the latest sysblk for each sysblk row */
457 for (i
= 0; i
< s
.nr_rows
; i
++) {
459 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
460 ppaidx
= scan_ppa_idx(i
, j
);
461 ret
= nvm_scan_block(dev
, &s
.ppas
[ppaidx
], cur
);
471 pr_err("nvm: no valid sysblks found to update\n");
477 * All sysblocks found. Check that they have same page id in their flash
480 for (i
= 1; i
< s
.nr_rows
; i
++) {
481 struct ppa_addr l
= s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])];
482 struct ppa_addr r
= s
.ppas
[scan_ppa_idx(i
, s
.act_blk
[i
])];
484 if (l
.g
.pg
!= r
.g
.pg
) {
485 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
492 * Check that there haven't been another update to the seqnr since we
495 if ((new->seqnr
- 1) != be32_to_cpu(cur
->seqnr
)) {
496 pr_err("nvm: seq is not sequential\n");
502 * When all pages in a block has been written, a new block is selected
503 * and writing is performed on the new block.
505 if (s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])].g
.pg
==
506 dev
->lps_per_blk
- 1) {
507 ret
= nvm_prepare_new_sysblks(dev
, &s
);
512 ret
= nvm_write_and_verify(dev
, new, &s
);
516 mutex_unlock(&dev
->mlock
);
521 int nvm_init_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
523 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
524 struct sysblk_scan s
;
528 * 1. select master blocks and select first available blks
529 * 2. get bad block list
530 * 3. mark MAX_SYSBLKS block as host-based device allocated.
531 * 4. write and verify data to block
534 if (!dev
->ops
->get_bb_tbl
|| !dev
->ops
->set_bb_tbl
)
537 if (!(dev
->mccap
& NVM_ID_CAP_SLC
) || !dev
->lps_per_blk
) {
538 pr_err("nvm: memory does not support SLC access\n");
542 /* Index all sysblocks and mark them as host-driven */
543 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
545 mutex_lock(&dev
->mlock
);
546 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_free_blks
);
550 ret
= nvm_set_bb_tbl(dev
, &s
, NVM_BLK_T_HOST
);
554 /* Write to the first block of each row */
555 ret
= nvm_write_and_verify(dev
, info
, &s
);
557 mutex_unlock(&dev
->mlock
);
561 struct factory_blks
{
567 static int factory_nblks(int nblks
)
569 /* Round up to nearest BITS_PER_LONG */
570 return (nblks
+ (BITS_PER_LONG
- 1)) & ~(BITS_PER_LONG
- 1);
573 static unsigned int factory_blk_offset(struct nvm_dev
*dev
, int ch
, int lun
)
575 int nblks
= factory_nblks(dev
->blks_per_lun
);
577 return ((ch
* dev
->luns_per_chnl
* nblks
) + (lun
* nblks
)) /
581 static int nvm_factory_blks(struct ppa_addr ppa
, int nr_blks
, u8
*blks
,
584 struct factory_blks
*f
= private;
585 struct nvm_dev
*dev
= f
->dev
;
588 lunoff
= factory_blk_offset(dev
, ppa
.g
.ch
, ppa
.g
.lun
);
590 /* non-set bits correspond to the block must be erased */
591 for (i
= 0; i
< nr_blks
; i
++) {
594 if (f
->flags
& NVM_FACTORY_ERASE_ONLY_USER
)
595 set_bit(i
, &f
->blks
[lunoff
]);
598 if (!(f
->flags
& NVM_FACTORY_RESET_HOST_BLKS
))
599 set_bit(i
, &f
->blks
[lunoff
]);
601 case NVM_BLK_T_GRWN_BAD
:
602 if (!(f
->flags
& NVM_FACTORY_RESET_GRWN_BBLKS
))
603 set_bit(i
, &f
->blks
[lunoff
]);
606 set_bit(i
, &f
->blks
[lunoff
]);
614 static int nvm_fact_get_blks(struct nvm_dev
*dev
, struct ppa_addr
*erase_list
,
615 int max_ppas
, struct factory_blks
*f
)
618 int ch
, lun
, blkid
, idx
, done
= 0, ppa_cnt
= 0;
619 unsigned long *offset
;
623 for (ch
= 0; ch
< dev
->nr_chnls
; ch
++) {
624 for (lun
= 0; lun
< dev
->luns_per_chnl
; lun
++) {
625 idx
= factory_blk_offset(dev
, ch
, lun
);
626 offset
= &f
->blks
[idx
];
628 blkid
= find_first_zero_bit(offset
,
630 if (blkid
>= dev
->blks_per_lun
)
632 set_bit(blkid
, offset
);
638 pr_debug("nvm: erase ppa (%u %u %u)\n",
643 erase_list
[ppa_cnt
] = ppa
;
647 if (ppa_cnt
== max_ppas
)
656 static int nvm_fact_get_bb_tbl(struct nvm_dev
*dev
, struct ppa_addr ppa
,
657 nvm_bb_update_fn
*fn
, void *priv
)
659 struct ppa_addr dev_ppa
;
662 dev_ppa
= generic_to_dev_addr(dev
, ppa
);
664 ret
= dev
->ops
->get_bb_tbl(dev
, dev_ppa
, dev
->blks_per_lun
, fn
, priv
);
666 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
667 ppa
.g
.ch
, ppa
.g
.blk
);
671 static int nvm_fact_select_blks(struct nvm_dev
*dev
, struct factory_blks
*f
)
677 for (ch
= 0; ch
< dev
->nr_chnls
; ch
++) {
678 for (lun
= 0; lun
< dev
->luns_per_chnl
; lun
++) {
682 ret
= nvm_fact_get_bb_tbl(dev
, ppa
, nvm_factory_blks
,
692 int nvm_dev_factory(struct nvm_dev
*dev
, int flags
)
694 struct factory_blks f
;
695 struct ppa_addr
*ppas
;
696 int ppa_cnt
, ret
= -ENOMEM
;
697 int max_ppas
= dev
->ops
->max_phys_sect
/ dev
->nr_planes
;
698 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
699 struct sysblk_scan s
;
701 f
.blks
= kzalloc(factory_nblks(dev
->blks_per_lun
) * dev
->nr_luns
,
706 ppas
= kcalloc(max_ppas
, sizeof(struct ppa_addr
), GFP_KERNEL
);
713 /* create list of blks to be erased */
714 ret
= nvm_fact_select_blks(dev
, &f
);
718 /* continue to erase until list of blks until empty */
719 while ((ppa_cnt
= nvm_fact_get_blks(dev
, ppas
, max_ppas
, &f
)) > 0)
720 nvm_erase_ppa(dev
, ppas
, ppa_cnt
);
722 /* mark host reserved blocks free */
723 if (flags
& NVM_FACTORY_RESET_HOST_BLKS
) {
724 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
725 mutex_lock(&dev
->mlock
);
726 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
,
727 sysblk_get_host_blks
);
729 ret
= nvm_set_bb_tbl(dev
, &s
, NVM_BLK_T_FREE
);
730 mutex_unlock(&dev
->mlock
);
738 EXPORT_SYMBOL(nvm_dev_factory
);