2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 #include <linux/lightnvm.h>
22 #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
28 /* A row is a collection of flash blocks for a system block. */
31 int act_blk
[MAX_SYSBLKS
];
34 struct ppa_addr ppas
[MAX_SYSBLKS
* MAX_BLKS_PR_SYSBLK
];/* all sysblks */
37 static inline int scan_ppa_idx(int row
, int blkid
)
39 return (row
* MAX_BLKS_PR_SYSBLK
) + blkid
;
42 static void nvm_sysblk_to_cpu(struct nvm_sb_info
*info
,
43 struct nvm_system_block
*sb
)
45 info
->seqnr
= be32_to_cpu(sb
->seqnr
);
46 info
->erase_cnt
= be32_to_cpu(sb
->erase_cnt
);
47 info
->version
= be16_to_cpu(sb
->version
);
48 strncpy(info
->mmtype
, sb
->mmtype
, NVM_MMTYPE_LEN
);
49 info
->fs_ppa
.ppa
= be64_to_cpu(sb
->fs_ppa
);
52 static void nvm_cpu_to_sysblk(struct nvm_system_block
*sb
,
53 struct nvm_sb_info
*info
)
55 sb
->magic
= cpu_to_be32(NVM_SYSBLK_MAGIC
);
56 sb
->seqnr
= cpu_to_be32(info
->seqnr
);
57 sb
->erase_cnt
= cpu_to_be32(info
->erase_cnt
);
58 sb
->version
= cpu_to_be16(info
->version
);
59 strncpy(sb
->mmtype
, info
->mmtype
, NVM_MMTYPE_LEN
);
60 sb
->fs_ppa
= cpu_to_be64(info
->fs_ppa
.ppa
);
63 static int nvm_setup_sysblks(struct nvm_dev
*dev
, struct ppa_addr
*sysblk_ppas
)
65 struct nvm_geo
*geo
= &dev
->geo
;
66 int nr_rows
= min_t(int, MAX_SYSBLKS
, geo
->nr_chnls
);
69 for (i
= 0; i
< nr_rows
; i
++)
70 sysblk_ppas
[i
].ppa
= 0;
72 /* if possible, place sysblk at first channel, middle channel and last
73 * channel of the device. If not, create only one or two sys blocks
75 switch (geo
->nr_chnls
) {
77 sysblk_ppas
[1].g
.ch
= 1;
80 sysblk_ppas
[0].g
.ch
= 0;
83 sysblk_ppas
[0].g
.ch
= 0;
84 sysblk_ppas
[1].g
.ch
= geo
->nr_chnls
/ 2;
85 sysblk_ppas
[2].g
.ch
= geo
->nr_chnls
- 1;
92 static void nvm_setup_sysblk_scan(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
93 struct ppa_addr
*sysblk_ppas
)
95 memset(s
, 0, sizeof(struct sysblk_scan
));
96 s
->nr_rows
= nvm_setup_sysblks(dev
, sysblk_ppas
);
99 static int sysblk_get_free_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
100 u8
*blks
, int nr_blks
,
101 struct sysblk_scan
*s
)
103 struct ppa_addr
*sppa
;
106 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
110 for (i
= 0; i
< nr_blks
; i
++) {
111 if (blks
[i
] == NVM_BLK_T_HOST
)
114 if (blks
[i
] != NVM_BLK_T_FREE
)
117 sppa
= &s
->ppas
[scan_ppa_idx(s
->row
, blkid
)];
118 sppa
->g
.ch
= ppa
.g
.ch
;
119 sppa
->g
.lun
= ppa
.g
.lun
;
124 pr_debug("nvm: use (%u %u %u) as sysblk\n",
125 sppa
->g
.ch
, sppa
->g
.lun
, sppa
->g
.blk
);
126 if (blkid
> MAX_BLKS_PR_SYSBLK
- 1)
130 pr_err("nvm: sysblk failed get sysblk\n");
134 static int sysblk_get_host_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
135 u8
*blks
, int nr_blks
,
136 struct sysblk_scan
*s
)
138 int i
, nr_sysblk
= 0;
140 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
144 for (i
= 0; i
< nr_blks
; i
++) {
145 if (blks
[i
] != NVM_BLK_T_HOST
)
148 if (s
->nr_ppas
== MAX_BLKS_PR_SYSBLK
* MAX_SYSBLKS
) {
149 pr_err("nvm: too many host blks\n");
155 s
->ppas
[scan_ppa_idx(s
->row
, nr_sysblk
)] = ppa
;
163 static int nvm_get_all_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
164 struct ppa_addr
*ppas
, int get_free
)
166 struct nvm_geo
*geo
= &dev
->geo
;
167 int i
, nr_blks
, ret
= 0;
171 nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
173 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
177 for (i
= 0; i
< s
->nr_rows
; i
++) {
180 ret
= nvm_get_bb_tbl(dev
, ppas
[i
], blks
);
182 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
189 ret
= sysblk_get_free_blks(dev
, ppas
[i
], blks
, nr_blks
,
192 ret
= sysblk_get_host_blks(dev
, ppas
[i
], blks
, nr_blks
,
205 * scans a block for latest sysblk.
207 * 0 - newer sysblk not found. PPA is updated to latest page.
208 * 1 - newer sysblk found and stored in *cur. PPA is updated to
212 static int nvm_scan_block(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
213 struct nvm_system_block
*sblk
)
215 struct nvm_geo
*geo
= &dev
->geo
;
216 struct nvm_system_block
*cur
;
217 int pg
, ret
, found
= 0;
219 /* the full buffer for a flash page is allocated. Only the first of it
220 * contains the system block information
222 cur
= kmalloc(geo
->pfpg_size
, GFP_KERNEL
);
226 /* perform linear scan through the block */
227 for (pg
= 0; pg
< dev
->lps_per_blk
; pg
++) {
228 ppa
->g
.pg
= ppa_to_slc(dev
, pg
);
230 ret
= nvm_submit_ppa(dev
, ppa
, 1, NVM_OP_PREAD
, NVM_IO_SLC_MODE
,
231 cur
, geo
->pfpg_size
);
233 if (ret
== NVM_RSP_ERR_EMPTYPAGE
) {
234 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
241 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
247 break; /* if we can't read a page, continue to the
252 if (be32_to_cpu(cur
->magic
) != NVM_SYSBLK_MAGIC
) {
253 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
258 break; /* last valid page already found */
261 if (be32_to_cpu(cur
->seqnr
) < be32_to_cpu(sblk
->seqnr
))
264 memcpy(sblk
, cur
, sizeof(struct nvm_system_block
));
273 static int nvm_sysblk_set_bb_tbl(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
276 return nvm_set_bb_tbl(dev
, s
->ppas
, s
->nr_ppas
, type
);
279 static int nvm_write_and_verify(struct nvm_dev
*dev
, struct nvm_sb_info
*info
,
280 struct sysblk_scan
*s
)
282 struct nvm_geo
*geo
= &dev
->geo
;
283 struct nvm_system_block nvmsb
;
285 int i
, sect
, ret
= 0;
286 struct ppa_addr
*ppas
;
288 nvm_cpu_to_sysblk(&nvmsb
, info
);
290 buf
= kzalloc(geo
->pfpg_size
, GFP_KERNEL
);
293 memcpy(buf
, &nvmsb
, sizeof(struct nvm_system_block
));
295 ppas
= kcalloc(geo
->sec_per_pg
, sizeof(struct ppa_addr
), GFP_KERNEL
);
301 /* Write and verify */
302 for (i
= 0; i
< s
->nr_rows
; i
++) {
303 ppas
[0] = s
->ppas
[scan_ppa_idx(i
, s
->act_blk
[i
])];
305 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
311 /* Expand to all sectors within a flash page */
312 if (geo
->sec_per_pg
> 1) {
313 for (sect
= 1; sect
< geo
->sec_per_pg
; sect
++) {
314 ppas
[sect
].ppa
= ppas
[0].ppa
;
315 ppas
[sect
].g
.sec
= sect
;
319 ret
= nvm_submit_ppa(dev
, ppas
, geo
->sec_per_pg
, NVM_OP_PWRITE
,
320 NVM_IO_SLC_MODE
, buf
, geo
->pfpg_size
);
322 pr_err("nvm: sysblk failed program (%u %u %u)\n",
329 ret
= nvm_submit_ppa(dev
, ppas
, geo
->sec_per_pg
, NVM_OP_PREAD
,
330 NVM_IO_SLC_MODE
, buf
, geo
->pfpg_size
);
332 pr_err("nvm: sysblk failed read (%u %u %u)\n",
339 if (memcmp(buf
, &nvmsb
, sizeof(struct nvm_system_block
))) {
340 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
356 static int nvm_prepare_new_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
)
359 unsigned long nxt_blk
;
360 struct ppa_addr
*ppa
;
362 for (i
= 0; i
< s
->nr_rows
; i
++) {
363 nxt_blk
= (s
->act_blk
[i
] + 1) % MAX_BLKS_PR_SYSBLK
;
364 ppa
= &s
->ppas
[scan_ppa_idx(i
, nxt_blk
)];
365 ppa
->g
.pg
= ppa_to_slc(dev
, 0);
367 ret
= nvm_erase_ppa(dev
, ppa
, 1, 0);
371 s
->act_blk
[i
] = nxt_blk
;
377 int nvm_get_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
379 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
380 struct sysblk_scan s
;
381 struct nvm_system_block
*cur
;
386 * 1. setup sysblk locations
387 * 2. get bad block list
388 * 3. filter on host-specific (type 3)
389 * 4. iterate through all and find the highest seq nr.
390 * 5. return superblock information
393 if (!dev
->ops
->get_bb_tbl
)
396 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
398 mutex_lock(&dev
->mlock
);
399 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, 0);
403 /* no sysblocks initialized */
407 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
411 /* find the latest block across all sysblocks */
412 for (i
= 0; i
< s
.nr_rows
; i
++) {
413 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
414 struct ppa_addr ppa
= s
.ppas
[scan_ppa_idx(i
, j
)];
416 ret
= nvm_scan_block(dev
, &ppa
, cur
);
424 nvm_sysblk_to_cpu(info
, cur
);
428 mutex_unlock(&dev
->mlock
);
435 int nvm_update_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*new)
437 /* 1. for each latest superblock
439 * a. write new flash page entry with the updated information
441 * a. find next available block on lun (linear search)
442 * if none, continue to next lun
443 * if none at all, report error. also report that it wasn't
444 * possible to write to all superblocks.
445 * c. write data to block.
447 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
448 struct sysblk_scan s
;
449 struct nvm_system_block
*cur
;
450 int i
, j
, ppaidx
, found
= 0;
453 if (!dev
->ops
->get_bb_tbl
)
456 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
458 mutex_lock(&dev
->mlock
);
459 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, 0);
463 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
467 /* Get the latest sysblk for each sysblk row */
468 for (i
= 0; i
< s
.nr_rows
; i
++) {
470 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
471 ppaidx
= scan_ppa_idx(i
, j
);
472 ret
= nvm_scan_block(dev
, &s
.ppas
[ppaidx
], cur
);
482 pr_err("nvm: no valid sysblks found to update\n");
488 * All sysblocks found. Check that they have same page id in their flash
491 for (i
= 1; i
< s
.nr_rows
; i
++) {
492 struct ppa_addr l
= s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])];
493 struct ppa_addr r
= s
.ppas
[scan_ppa_idx(i
, s
.act_blk
[i
])];
495 if (l
.g
.pg
!= r
.g
.pg
) {
496 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
503 * Check that there haven't been another update to the seqnr since we
506 if ((new->seqnr
- 1) != be32_to_cpu(cur
->seqnr
)) {
507 pr_err("nvm: seq is not sequential\n");
513 * When all pages in a block has been written, a new block is selected
514 * and writing is performed on the new block.
516 if (s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])].g
.pg
==
517 dev
->lps_per_blk
- 1) {
518 ret
= nvm_prepare_new_sysblks(dev
, &s
);
523 ret
= nvm_write_and_verify(dev
, new, &s
);
527 mutex_unlock(&dev
->mlock
);
532 int nvm_init_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
534 struct nvm_geo
*geo
= &dev
->geo
;
535 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
536 struct sysblk_scan s
;
540 * 1. select master blocks and select first available blks
541 * 2. get bad block list
542 * 3. mark MAX_SYSBLKS block as host-based device allocated.
543 * 4. write and verify data to block
546 if (!dev
->ops
->get_bb_tbl
|| !dev
->ops
->set_bb_tbl
)
549 if (!(geo
->mccap
& NVM_ID_CAP_SLC
) || !dev
->lps_per_blk
) {
550 pr_err("nvm: memory does not support SLC access\n");
554 /* Index all sysblocks and mark them as host-driven */
555 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
557 mutex_lock(&dev
->mlock
);
558 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, 1);
562 ret
= nvm_sysblk_set_bb_tbl(dev
, &s
, NVM_BLK_T_HOST
);
566 /* Write to the first block of each row */
567 ret
= nvm_write_and_verify(dev
, info
, &s
);
569 mutex_unlock(&dev
->mlock
);
573 static int factory_nblks(int nblks
)
575 /* Round up to nearest BITS_PER_LONG */
576 return (nblks
+ (BITS_PER_LONG
- 1)) & ~(BITS_PER_LONG
- 1);
579 static unsigned int factory_blk_offset(struct nvm_geo
*geo
, struct ppa_addr ppa
)
581 int nblks
= factory_nblks(geo
->blks_per_lun
);
583 return ((ppa
.g
.ch
* geo
->luns_per_chnl
* nblks
) + (ppa
.g
.lun
* nblks
)) /
587 static int nvm_factory_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
588 u8
*blks
, int nr_blks
,
589 unsigned long *blk_bitmap
, int flags
)
593 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
597 lunoff
= factory_blk_offset(&dev
->geo
, ppa
);
599 /* non-set bits correspond to the block must be erased */
600 for (i
= 0; i
< nr_blks
; i
++) {
603 if (flags
& NVM_FACTORY_ERASE_ONLY_USER
)
604 set_bit(i
, &blk_bitmap
[lunoff
]);
607 if (!(flags
& NVM_FACTORY_RESET_HOST_BLKS
))
608 set_bit(i
, &blk_bitmap
[lunoff
]);
610 case NVM_BLK_T_GRWN_BAD
:
611 if (!(flags
& NVM_FACTORY_RESET_GRWN_BBLKS
))
612 set_bit(i
, &blk_bitmap
[lunoff
]);
615 set_bit(i
, &blk_bitmap
[lunoff
]);
623 static int nvm_fact_get_blks(struct nvm_dev
*dev
, struct ppa_addr
*erase_list
,
624 int max_ppas
, unsigned long *blk_bitmap
)
626 struct nvm_geo
*geo
= &dev
->geo
;
628 int ch
, lun
, blkid
, idx
, done
= 0, ppa_cnt
= 0;
629 unsigned long *offset
;
633 nvm_for_each_lun_ppa(geo
, ppa
, ch
, lun
) {
634 idx
= factory_blk_offset(geo
, ppa
);
635 offset
= &blk_bitmap
[idx
];
637 blkid
= find_first_zero_bit(offset
, geo
->blks_per_lun
);
638 if (blkid
>= geo
->blks_per_lun
)
640 set_bit(blkid
, offset
);
643 pr_debug("nvm: erase ppa (%u %u %u)\n",
648 erase_list
[ppa_cnt
] = ppa
;
652 if (ppa_cnt
== max_ppas
)
660 static int nvm_fact_select_blks(struct nvm_dev
*dev
, unsigned long *blk_bitmap
,
663 struct nvm_geo
*geo
= &dev
->geo
;
665 int ch
, lun
, nr_blks
, ret
= 0;
668 nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
669 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
673 nvm_for_each_lun_ppa(geo
, ppa
, ch
, lun
) {
674 ret
= nvm_get_bb_tbl(dev
, ppa
, blks
);
676 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
677 ppa
.g
.ch
, ppa
.g
.blk
);
679 ret
= nvm_factory_blks(dev
, ppa
, blks
, nr_blks
, blk_bitmap
,
689 int nvm_dev_factory(struct nvm_dev
*dev
, int flags
)
691 struct nvm_geo
*geo
= &dev
->geo
;
692 struct ppa_addr
*ppas
;
693 int ppa_cnt
, ret
= -ENOMEM
;
694 int max_ppas
= dev
->ops
->max_phys_sect
/ geo
->nr_planes
;
695 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
696 struct sysblk_scan s
;
697 unsigned long *blk_bitmap
;
699 blk_bitmap
= kzalloc(factory_nblks(geo
->blks_per_lun
) * geo
->nr_luns
,
704 ppas
= kcalloc(max_ppas
, sizeof(struct ppa_addr
), GFP_KERNEL
);
708 /* create list of blks to be erased */
709 ret
= nvm_fact_select_blks(dev
, blk_bitmap
, flags
);
713 /* continue to erase until list of blks until empty */
715 nvm_fact_get_blks(dev
, ppas
, max_ppas
, blk_bitmap
)) > 0)
716 nvm_erase_ppa(dev
, ppas
, ppa_cnt
, 0);
718 /* mark host reserved blocks free */
719 if (flags
& NVM_FACTORY_RESET_HOST_BLKS
) {
720 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
721 mutex_lock(&dev
->mlock
);
722 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, 0);
724 ret
= nvm_sysblk_set_bb_tbl(dev
, &s
, NVM_BLK_T_FREE
);
725 mutex_unlock(&dev
->mlock
);
733 EXPORT_SYMBOL(nvm_dev_factory
);