2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 #include <linux/lightnvm.h>
22 #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
28 /* A row is a collection of flash blocks for a system block. */
31 int act_blk
[MAX_SYSBLKS
];
34 struct ppa_addr ppas
[MAX_SYSBLKS
* MAX_BLKS_PR_SYSBLK
];/* all sysblks */
37 static inline int scan_ppa_idx(int row
, int blkid
)
39 return (row
* MAX_BLKS_PR_SYSBLK
) + blkid
;
42 void nvm_sysblk_to_cpu(struct nvm_sb_info
*info
, struct nvm_system_block
*sb
)
44 info
->seqnr
= be32_to_cpu(sb
->seqnr
);
45 info
->erase_cnt
= be32_to_cpu(sb
->erase_cnt
);
46 info
->version
= be16_to_cpu(sb
->version
);
47 strncpy(info
->mmtype
, sb
->mmtype
, NVM_MMTYPE_LEN
);
48 info
->fs_ppa
.ppa
= be64_to_cpu(sb
->fs_ppa
);
51 void nvm_cpu_to_sysblk(struct nvm_system_block
*sb
, struct nvm_sb_info
*info
)
53 sb
->magic
= cpu_to_be32(NVM_SYSBLK_MAGIC
);
54 sb
->seqnr
= cpu_to_be32(info
->seqnr
);
55 sb
->erase_cnt
= cpu_to_be32(info
->erase_cnt
);
56 sb
->version
= cpu_to_be16(info
->version
);
57 strncpy(sb
->mmtype
, info
->mmtype
, NVM_MMTYPE_LEN
);
58 sb
->fs_ppa
= cpu_to_be64(info
->fs_ppa
.ppa
);
61 static int nvm_setup_sysblks(struct nvm_dev
*dev
, struct ppa_addr
*sysblk_ppas
)
63 int nr_rows
= min_t(int, MAX_SYSBLKS
, dev
->nr_chnls
);
66 for (i
= 0; i
< nr_rows
; i
++)
67 sysblk_ppas
[i
].ppa
= 0;
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
72 switch (dev
->nr_chnls
) {
74 sysblk_ppas
[1].g
.ch
= 1;
77 sysblk_ppas
[0].g
.ch
= 0;
80 sysblk_ppas
[0].g
.ch
= 0;
81 sysblk_ppas
[1].g
.ch
= dev
->nr_chnls
/ 2;
82 sysblk_ppas
[2].g
.ch
= dev
->nr_chnls
- 1;
89 void nvm_setup_sysblk_scan(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
90 struct ppa_addr
*sysblk_ppas
)
92 memset(s
, 0, sizeof(struct sysblk_scan
));
93 s
->nr_rows
= nvm_setup_sysblks(dev
, sysblk_ppas
);
96 static int sysblk_get_host_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
97 u8
*blks
, int nr_blks
, void *private)
99 struct sysblk_scan
*s
= private;
100 int i
, nr_sysblk
= 0;
102 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
106 for (i
= 0; i
< nr_blks
; i
++) {
107 if (blks
[i
] != NVM_BLK_T_HOST
)
110 if (s
->nr_ppas
== MAX_BLKS_PR_SYSBLK
* MAX_SYSBLKS
) {
111 pr_err("nvm: too many host blks\n");
117 s
->ppas
[scan_ppa_idx(s
->row
, nr_sysblk
)] = ppa
;
125 static int nvm_get_all_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
,
126 struct ppa_addr
*ppas
, nvm_bb_update_fn
*fn
)
128 struct ppa_addr dppa
;
133 for (i
= 0; i
< s
->nr_rows
; i
++) {
134 dppa
= generic_to_dev_addr(dev
, ppas
[i
]);
137 ret
= dev
->ops
->get_bb_tbl(dev
, dppa
, fn
, s
);
139 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
150 * scans a block for latest sysblk.
152 * 0 - newer sysblk not found. PPA is updated to latest page.
153 * 1 - newer sysblk found and stored in *cur. PPA is updated to
157 static int nvm_scan_block(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
158 struct nvm_system_block
*sblk
)
160 struct nvm_system_block
*cur
;
161 int pg
, ret
, found
= 0;
163 /* the full buffer for a flash page is allocated. Only the first of it
164 * contains the system block information
166 cur
= kmalloc(dev
->pfpg_size
, GFP_KERNEL
);
170 /* perform linear scan through the block */
171 for (pg
= 0; pg
< dev
->lps_per_blk
; pg
++) {
172 ppa
->g
.pg
= ppa_to_slc(dev
, pg
);
174 ret
= nvm_submit_ppa(dev
, ppa
, 1, NVM_OP_PREAD
, NVM_IO_SLC_MODE
,
175 cur
, dev
->pfpg_size
);
177 if (ret
== NVM_RSP_ERR_EMPTYPAGE
) {
178 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
185 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
191 break; /* if we can't read a page, continue to the
196 if (be32_to_cpu(cur
->magic
) != NVM_SYSBLK_MAGIC
) {
197 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
202 break; /* last valid page already found */
205 if (be32_to_cpu(cur
->seqnr
) < be32_to_cpu(sblk
->seqnr
))
208 memcpy(sblk
, cur
, sizeof(struct nvm_system_block
));
217 static int nvm_set_bb_tbl(struct nvm_dev
*dev
, struct sysblk_scan
*s
, int type
)
222 if (s
->nr_ppas
> dev
->ops
->max_phys_sect
) {
223 pr_err("nvm: unable to update all sysblocks atomically\n");
227 memset(&rqd
, 0, sizeof(struct nvm_rq
));
229 nvm_set_rqd_ppalist(dev
, &rqd
, s
->ppas
, s
->nr_ppas
);
230 nvm_generic_to_addr_mode(dev
, &rqd
);
232 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
, type
);
233 nvm_free_rqd_ppalist(dev
, &rqd
);
235 pr_err("nvm: sysblk failed bb mark\n");
242 static int sysblk_get_free_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
243 u8
*blks
, int nr_blks
, void *private)
245 struct sysblk_scan
*s
= private;
246 struct ppa_addr
*sppa
;
249 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
253 for (i
= 0; i
< nr_blks
; i
++) {
254 if (blks
[i
] == NVM_BLK_T_HOST
)
257 if (blks
[i
] != NVM_BLK_T_FREE
)
260 sppa
= &s
->ppas
[scan_ppa_idx(s
->row
, blkid
)];
261 sppa
->g
.ch
= ppa
.g
.ch
;
262 sppa
->g
.lun
= ppa
.g
.lun
;
267 pr_debug("nvm: use (%u %u %u) as sysblk\n",
268 sppa
->g
.ch
, sppa
->g
.lun
, sppa
->g
.blk
);
269 if (blkid
> MAX_BLKS_PR_SYSBLK
- 1)
273 pr_err("nvm: sysblk failed get sysblk\n");
277 static int nvm_write_and_verify(struct nvm_dev
*dev
, struct nvm_sb_info
*info
,
278 struct sysblk_scan
*s
)
280 struct nvm_system_block nvmsb
;
282 int i
, sect
, ret
= 0;
283 struct ppa_addr
*ppas
;
285 nvm_cpu_to_sysblk(&nvmsb
, info
);
287 buf
= kzalloc(dev
->pfpg_size
, GFP_KERNEL
);
290 memcpy(buf
, &nvmsb
, sizeof(struct nvm_system_block
));
292 ppas
= kcalloc(dev
->sec_per_pg
, sizeof(struct ppa_addr
), GFP_KERNEL
);
298 /* Write and verify */
299 for (i
= 0; i
< s
->nr_rows
; i
++) {
300 ppas
[0] = s
->ppas
[scan_ppa_idx(i
, s
->act_blk
[i
])];
302 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
308 /* Expand to all sectors within a flash page */
309 if (dev
->sec_per_pg
> 1) {
310 for (sect
= 1; sect
< dev
->sec_per_pg
; sect
++) {
311 ppas
[sect
].ppa
= ppas
[0].ppa
;
312 ppas
[sect
].g
.sec
= sect
;
316 ret
= nvm_submit_ppa(dev
, ppas
, dev
->sec_per_pg
, NVM_OP_PWRITE
,
317 NVM_IO_SLC_MODE
, buf
, dev
->pfpg_size
);
319 pr_err("nvm: sysblk failed program (%u %u %u)\n",
326 ret
= nvm_submit_ppa(dev
, ppas
, dev
->sec_per_pg
, NVM_OP_PREAD
,
327 NVM_IO_SLC_MODE
, buf
, dev
->pfpg_size
);
329 pr_err("nvm: sysblk failed read (%u %u %u)\n",
336 if (memcmp(buf
, &nvmsb
, sizeof(struct nvm_system_block
))) {
337 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
353 static int nvm_prepare_new_sysblks(struct nvm_dev
*dev
, struct sysblk_scan
*s
)
356 unsigned long nxt_blk
;
357 struct ppa_addr
*ppa
;
359 for (i
= 0; i
< s
->nr_rows
; i
++) {
360 nxt_blk
= (s
->act_blk
[i
] + 1) % MAX_BLKS_PR_SYSBLK
;
361 ppa
= &s
->ppas
[scan_ppa_idx(i
, nxt_blk
)];
362 ppa
->g
.pg
= ppa_to_slc(dev
, 0);
364 ret
= nvm_erase_ppa(dev
, ppa
, 1);
368 s
->act_blk
[i
] = nxt_blk
;
374 int nvm_get_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
376 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
377 struct sysblk_scan s
;
378 struct nvm_system_block
*cur
;
383 * 1. setup sysblk locations
384 * 2. get bad block list
385 * 3. filter on host-specific (type 3)
386 * 4. iterate through all and find the highest seq nr.
387 * 5. return superblock information
390 if (!dev
->ops
->get_bb_tbl
)
393 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
395 mutex_lock(&dev
->mlock
);
396 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_host_blks
);
400 /* no sysblocks initialized */
404 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
408 /* find the latest block across all sysblocks */
409 for (i
= 0; i
< s
.nr_rows
; i
++) {
410 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
411 struct ppa_addr ppa
= s
.ppas
[scan_ppa_idx(i
, j
)];
413 ret
= nvm_scan_block(dev
, &ppa
, cur
);
421 nvm_sysblk_to_cpu(info
, cur
);
425 mutex_unlock(&dev
->mlock
);
432 int nvm_update_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*new)
434 /* 1. for each latest superblock
436 * a. write new flash page entry with the updated information
438 * a. find next available block on lun (linear search)
439 * if none, continue to next lun
440 * if none at all, report error. also report that it wasn't
441 * possible to write to all superblocks.
442 * c. write data to block.
444 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
445 struct sysblk_scan s
;
446 struct nvm_system_block
*cur
;
447 int i
, j
, ppaidx
, found
= 0;
450 if (!dev
->ops
->get_bb_tbl
)
453 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
455 mutex_lock(&dev
->mlock
);
456 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_host_blks
);
460 cur
= kzalloc(sizeof(struct nvm_system_block
), GFP_KERNEL
);
464 /* Get the latest sysblk for each sysblk row */
465 for (i
= 0; i
< s
.nr_rows
; i
++) {
467 for (j
= 0; j
< MAX_BLKS_PR_SYSBLK
; j
++) {
468 ppaidx
= scan_ppa_idx(i
, j
);
469 ret
= nvm_scan_block(dev
, &s
.ppas
[ppaidx
], cur
);
479 pr_err("nvm: no valid sysblks found to update\n");
485 * All sysblocks found. Check that they have same page id in their flash
488 for (i
= 1; i
< s
.nr_rows
; i
++) {
489 struct ppa_addr l
= s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])];
490 struct ppa_addr r
= s
.ppas
[scan_ppa_idx(i
, s
.act_blk
[i
])];
492 if (l
.g
.pg
!= r
.g
.pg
) {
493 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
500 * Check that there haven't been another update to the seqnr since we
503 if ((new->seqnr
- 1) != be32_to_cpu(cur
->seqnr
)) {
504 pr_err("nvm: seq is not sequential\n");
510 * When all pages in a block has been written, a new block is selected
511 * and writing is performed on the new block.
513 if (s
.ppas
[scan_ppa_idx(0, s
.act_blk
[0])].g
.pg
==
514 dev
->lps_per_blk
- 1) {
515 ret
= nvm_prepare_new_sysblks(dev
, &s
);
520 ret
= nvm_write_and_verify(dev
, new, &s
);
524 mutex_unlock(&dev
->mlock
);
529 int nvm_init_sysblock(struct nvm_dev
*dev
, struct nvm_sb_info
*info
)
531 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
532 struct sysblk_scan s
;
536 * 1. select master blocks and select first available blks
537 * 2. get bad block list
538 * 3. mark MAX_SYSBLKS block as host-based device allocated.
539 * 4. write and verify data to block
542 if (!dev
->ops
->get_bb_tbl
|| !dev
->ops
->set_bb_tbl
)
545 if (!(dev
->mccap
& NVM_ID_CAP_SLC
) || !dev
->lps_per_blk
) {
546 pr_err("nvm: memory does not support SLC access\n");
550 /* Index all sysblocks and mark them as host-driven */
551 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
553 mutex_lock(&dev
->mlock
);
554 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
, sysblk_get_free_blks
);
558 ret
= nvm_set_bb_tbl(dev
, &s
, NVM_BLK_T_HOST
);
562 /* Write to the first block of each row */
563 ret
= nvm_write_and_verify(dev
, info
, &s
);
565 mutex_unlock(&dev
->mlock
);
569 struct factory_blks
{
575 static int factory_nblks(int nblks
)
577 /* Round up to nearest BITS_PER_LONG */
578 return (nblks
+ (BITS_PER_LONG
- 1)) & ~(BITS_PER_LONG
- 1);
581 static unsigned int factory_blk_offset(struct nvm_dev
*dev
, int ch
, int lun
)
583 int nblks
= factory_nblks(dev
->blks_per_lun
);
585 return ((ch
* dev
->luns_per_chnl
* nblks
) + (lun
* nblks
)) /
589 static int nvm_factory_blks(struct nvm_dev
*dev
, struct ppa_addr ppa
,
590 u8
*blks
, int nr_blks
, void *private)
592 struct factory_blks
*f
= private;
595 nr_blks
= nvm_bb_tbl_fold(dev
, blks
, nr_blks
);
599 lunoff
= factory_blk_offset(dev
, ppa
.g
.ch
, ppa
.g
.lun
);
601 /* non-set bits correspond to the block must be erased */
602 for (i
= 0; i
< nr_blks
; i
++) {
605 if (f
->flags
& NVM_FACTORY_ERASE_ONLY_USER
)
606 set_bit(i
, &f
->blks
[lunoff
]);
609 if (!(f
->flags
& NVM_FACTORY_RESET_HOST_BLKS
))
610 set_bit(i
, &f
->blks
[lunoff
]);
612 case NVM_BLK_T_GRWN_BAD
:
613 if (!(f
->flags
& NVM_FACTORY_RESET_GRWN_BBLKS
))
614 set_bit(i
, &f
->blks
[lunoff
]);
617 set_bit(i
, &f
->blks
[lunoff
]);
625 static int nvm_fact_get_blks(struct nvm_dev
*dev
, struct ppa_addr
*erase_list
,
626 int max_ppas
, struct factory_blks
*f
)
629 int ch
, lun
, blkid
, idx
, done
= 0, ppa_cnt
= 0;
630 unsigned long *offset
;
634 for (ch
= 0; ch
< dev
->nr_chnls
; ch
++) {
635 for (lun
= 0; lun
< dev
->luns_per_chnl
; lun
++) {
636 idx
= factory_blk_offset(dev
, ch
, lun
);
637 offset
= &f
->blks
[idx
];
639 blkid
= find_first_zero_bit(offset
,
641 if (blkid
>= dev
->blks_per_lun
)
643 set_bit(blkid
, offset
);
649 pr_debug("nvm: erase ppa (%u %u %u)\n",
654 erase_list
[ppa_cnt
] = ppa
;
658 if (ppa_cnt
== max_ppas
)
667 static int nvm_fact_get_bb_tbl(struct nvm_dev
*dev
, struct ppa_addr ppa
,
668 nvm_bb_update_fn
*fn
, void *priv
)
670 struct ppa_addr dev_ppa
;
673 dev_ppa
= generic_to_dev_addr(dev
, ppa
);
675 ret
= dev
->ops
->get_bb_tbl(dev
, dev_ppa
, fn
, priv
);
677 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
678 ppa
.g
.ch
, ppa
.g
.blk
);
682 static int nvm_fact_select_blks(struct nvm_dev
*dev
, struct factory_blks
*f
)
688 for (ch
= 0; ch
< dev
->nr_chnls
; ch
++) {
689 for (lun
= 0; lun
< dev
->luns_per_chnl
; lun
++) {
693 ret
= nvm_fact_get_bb_tbl(dev
, ppa
, nvm_factory_blks
,
703 int nvm_dev_factory(struct nvm_dev
*dev
, int flags
)
705 struct factory_blks f
;
706 struct ppa_addr
*ppas
;
707 int ppa_cnt
, ret
= -ENOMEM
;
708 int max_ppas
= dev
->ops
->max_phys_sect
/ dev
->nr_planes
;
709 struct ppa_addr sysblk_ppas
[MAX_SYSBLKS
];
710 struct sysblk_scan s
;
712 f
.blks
= kzalloc(factory_nblks(dev
->blks_per_lun
) * dev
->nr_luns
,
717 ppas
= kcalloc(max_ppas
, sizeof(struct ppa_addr
), GFP_KERNEL
);
724 /* create list of blks to be erased */
725 ret
= nvm_fact_select_blks(dev
, &f
);
729 /* continue to erase until list of blks until empty */
730 while ((ppa_cnt
= nvm_fact_get_blks(dev
, ppas
, max_ppas
, &f
)) > 0)
731 nvm_erase_ppa(dev
, ppas
, ppa_cnt
);
733 /* mark host reserved blocks free */
734 if (flags
& NVM_FACTORY_RESET_HOST_BLKS
) {
735 nvm_setup_sysblk_scan(dev
, &s
, sysblk_ppas
);
736 mutex_lock(&dev
->mlock
);
737 ret
= nvm_get_all_sysblks(dev
, &s
, sysblk_ppas
,
738 sysblk_get_host_blks
);
740 ret
= nvm_set_bb_tbl(dev
, &s
, NVM_BLK_T_FREE
);
741 mutex_unlock(&dev
->mlock
);
749 EXPORT_SYMBOL(nvm_dev_factory
);