lightnvm: add fpg_size and pfpg_size to struct nvm_dev
[deliverable/linux.git] / drivers / lightnvm / sysblk.c
CommitLineData
e3eb3799
MB
1/*
2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 */
19
20#include <linux/lightnvm.h>
21
22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
25 */
26
27struct sysblk_scan {
28 /* A row is a collection of flash blocks for a system block. */
29 int nr_rows;
30 int row;
31 int act_blk[MAX_SYSBLKS];
32
33 int nr_ppas;
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
35};
36
37static inline int scan_ppa_idx(int row, int blkid)
38{
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
40}
41
42void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
43{
44 info->seqnr = be32_to_cpu(sb->seqnr);
45 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
46 info->version = be16_to_cpu(sb->version);
47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
49}
50
51void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
52{
53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
54 sb->seqnr = cpu_to_be32(info->seqnr);
55 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
56 sb->version = cpu_to_be16(info->version);
57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
59}
60
61static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
62{
63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
64 int i;
65
66 for (i = 0; i < nr_rows; i++)
67 sysblk_ppas[i].ppa = 0;
68
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
71 */
72 switch (dev->nr_chnls) {
73 case 2:
74 sysblk_ppas[1].g.ch = 1;
75 /* fall-through */
76 case 1:
77 sysblk_ppas[0].g.ch = 0;
78 break;
79 default:
80 sysblk_ppas[0].g.ch = 0;
81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
83 break;
84 }
85
86 return nr_rows;
87}
88
89void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
90 struct ppa_addr *sysblk_ppas)
91{
92 memset(s, 0, sizeof(struct sysblk_scan));
93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
94}
95
96static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
97 void *private)
98{
99 struct sysblk_scan *s = private;
100 int i, nr_sysblk = 0;
101
102 for (i = 0; i < nr_blks; i++) {
103 if (blks[i] != NVM_BLK_T_HOST)
104 continue;
105
106 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
107 pr_err("nvm: too many host blks\n");
108 return -EINVAL;
109 }
110
111 ppa.g.blk = i;
112
113 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
114 s->nr_ppas++;
115 nr_sysblk++;
116 }
117
118 return 0;
119}
120
121static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
122 struct ppa_addr *ppas, nvm_bb_update_fn *fn)
123{
124 struct ppa_addr dppa;
57aac2f1 125 int i, ret = 0;
e3eb3799
MB
126
127 s->nr_ppas = 0;
128
129 for (i = 0; i < s->nr_rows; i++) {
130 dppa = generic_to_dev_addr(dev, ppas[i]);
131 s->row = i;
132
133 ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
134 if (ret) {
135 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
136 ppas[i].g.ch,
137 ppas[i].g.blk);
138 return ret;
139 }
140 }
141
142 return ret;
143}
144
145/*
146 * scans a block for latest sysblk.
147 * Returns:
148 * 0 - newer sysblk not found. PPA is updated to latest page.
149 * 1 - newer sysblk found and stored in *cur. PPA is updated to
150 * next valid page.
151 * <0- error.
152 */
153static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
154 struct nvm_system_block *sblk)
155{
156 struct nvm_system_block *cur;
4891d120 157 int pg, ret, found = 0;
e3eb3799
MB
158
159 /* the full buffer for a flash page is allocated. Only the first of it
160 * contains the system block information
161 */
4891d120 162 cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
e3eb3799
MB
163 if (!cur)
164 return -ENOMEM;
165
166 /* perform linear scan through the block */
167 for (pg = 0; pg < dev->lps_per_blk; pg++) {
168 ppa->g.pg = ppa_to_slc(dev, pg);
169
170 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
4891d120 171 cur, dev->pfpg_size);
e3eb3799
MB
172 if (ret) {
173 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
174 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
175 ppa->g.ch,
176 ppa->g.lun,
177 ppa->g.blk,
178 ppa->g.pg);
179 break;
180 }
181 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
182 ret,
183 ppa->g.ch,
184 ppa->g.lun,
185 ppa->g.blk,
186 ppa->g.pg);
187 break; /* if we can't read a page, continue to the
188 * next blk
189 */
190 }
191
192 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
193 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
194 ppa->g.ch,
195 ppa->g.lun,
196 ppa->g.blk,
197 ppa->g.pg);
198 break; /* last valid page already found */
199 }
200
201 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
202 continue;
203
204 memcpy(sblk, cur, sizeof(struct nvm_system_block));
205 found = 1;
206 }
207
208 kfree(cur);
209
210 return found;
211}
212
213static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
214{
215 struct nvm_rq rqd;
216 int ret;
217
218 if (s->nr_ppas > dev->ops->max_phys_sect) {
219 pr_err("nvm: unable to update all sysblocks atomically\n");
220 return -EINVAL;
221 }
222
223 memset(&rqd, 0, sizeof(struct nvm_rq));
224
225 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
226 nvm_generic_to_addr_mode(dev, &rqd);
227
228 ret = dev->ops->set_bb_tbl(dev, &rqd, type);
229 nvm_free_rqd_ppalist(dev, &rqd);
230 if (ret) {
231 pr_err("nvm: sysblk failed bb mark\n");
232 return -EINVAL;
233 }
234
235 return 0;
236}
237
238static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
239 void *private)
240{
241 struct sysblk_scan *s = private;
242 struct ppa_addr *sppa;
243 int i, blkid = 0;
244
245 for (i = 0; i < nr_blks; i++) {
246 if (blks[i] == NVM_BLK_T_HOST)
247 return -EEXIST;
248
249 if (blks[i] != NVM_BLK_T_FREE)
250 continue;
251
252 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
253 sppa->g.ch = ppa.g.ch;
254 sppa->g.lun = ppa.g.lun;
255 sppa->g.blk = i;
256 s->nr_ppas++;
257 blkid++;
258
259 pr_debug("nvm: use (%u %u %u) as sysblk\n",
260 sppa->g.ch, sppa->g.lun, sppa->g.blk);
261 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
262 return 0;
263 }
264
265 pr_err("nvm: sysblk failed get sysblk\n");
266 return -EINVAL;
267}
268
269static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
270 struct sysblk_scan *s)
271{
272 struct nvm_system_block nvmsb;
273 void *buf;
4891d120 274 int i, sect, ret = 0;
e3eb3799
MB
275 struct ppa_addr *ppas;
276
277 nvm_cpu_to_sysblk(&nvmsb, info);
278
4891d120 279 buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
e3eb3799
MB
280 if (!buf)
281 return -ENOMEM;
282 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
283
284 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
285 if (!ppas) {
286 ret = -ENOMEM;
287 goto err;
288 }
289
290 /* Write and verify */
291 for (i = 0; i < s->nr_rows; i++) {
292 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
293
294 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
295 ppas[0].g.ch,
296 ppas[0].g.lun,
297 ppas[0].g.blk,
298 ppas[0].g.pg);
299
300 /* Expand to all sectors within a flash page */
301 if (dev->sec_per_pg > 1) {
302 for (sect = 1; sect < dev->sec_per_pg; sect++) {
303 ppas[sect].ppa = ppas[0].ppa;
304 ppas[sect].g.sec = sect;
305 }
306 }
307
308 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
4891d120 309 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
e3eb3799
MB
310 if (ret) {
311 pr_err("nvm: sysblk failed program (%u %u %u)\n",
312 ppas[0].g.ch,
313 ppas[0].g.lun,
314 ppas[0].g.blk);
315 break;
316 }
317
318 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
4891d120 319 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
e3eb3799
MB
320 if (ret) {
321 pr_err("nvm: sysblk failed read (%u %u %u)\n",
322 ppas[0].g.ch,
323 ppas[0].g.lun,
324 ppas[0].g.blk);
325 break;
326 }
327
328 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
329 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
330 ppas[0].g.ch,
331 ppas[0].g.lun,
332 ppas[0].g.blk);
333 ret = -EINVAL;
334 break;
335 }
336 }
337
338 kfree(ppas);
339err:
340 kfree(buf);
341
342 return ret;
343}
344
345static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
346{
347 int i, ret;
348 unsigned long nxt_blk;
349 struct ppa_addr *ppa;
350
351 for (i = 0; i < s->nr_rows; i++) {
352 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
353 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
354 ppa->g.pg = ppa_to_slc(dev, 0);
355
356 ret = nvm_erase_ppa(dev, ppa, 1);
357 if (ret)
358 return ret;
359
360 s->act_blk[i] = nxt_blk;
361 }
362
363 return 0;
364}
365
366int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
367{
368 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
369 struct sysblk_scan s;
370 struct nvm_system_block *cur;
371 int i, j, found = 0;
372 int ret = -ENOMEM;
373
374 /*
375 * 1. setup sysblk locations
376 * 2. get bad block list
377 * 3. filter on host-specific (type 3)
378 * 4. iterate through all and find the highest seq nr.
379 * 5. return superblock information
380 */
381
382 if (!dev->ops->get_bb_tbl)
383 return -EINVAL;
384
385 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
386
387 mutex_lock(&dev->mlock);
388 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
389 if (ret)
390 goto err_sysblk;
391
392 /* no sysblocks initialized */
393 if (!s.nr_ppas)
394 goto err_sysblk;
395
396 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
397 if (!cur)
398 goto err_sysblk;
399
400 /* find the latest block across all sysblocks */
401 for (i = 0; i < s.nr_rows; i++) {
402 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
403 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
404
405 ret = nvm_scan_block(dev, &ppa, cur);
406 if (ret > 0)
407 found = 1;
408 else if (ret < 0)
409 break;
410 }
411 }
412
413 nvm_sysblk_to_cpu(info, cur);
414
415 kfree(cur);
416err_sysblk:
417 mutex_unlock(&dev->mlock);
418
419 if (found)
420 return 1;
421 return ret;
422}
423
424int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
425{
426 /* 1. for each latest superblock
427 * 2. if room
428 * a. write new flash page entry with the updated information
429 * 3. if no room
430 * a. find next available block on lun (linear search)
431 * if none, continue to next lun
432 * if none at all, report error. also report that it wasn't
433 * possible to write to all superblocks.
434 * c. write data to block.
435 */
436 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
437 struct sysblk_scan s;
438 struct nvm_system_block *cur;
439 int i, j, ppaidx, found = 0;
440 int ret = -ENOMEM;
441
442 if (!dev->ops->get_bb_tbl)
443 return -EINVAL;
444
445 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
446
447 mutex_lock(&dev->mlock);
448 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
449 if (ret)
450 goto err_sysblk;
451
452 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
453 if (!cur)
454 goto err_sysblk;
455
456 /* Get the latest sysblk for each sysblk row */
457 for (i = 0; i < s.nr_rows; i++) {
458 found = 0;
459 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
460 ppaidx = scan_ppa_idx(i, j);
461 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
462 if (ret > 0) {
463 s.act_blk[i] = j;
464 found = 1;
465 } else if (ret < 0)
466 break;
467 }
468 }
469
470 if (!found) {
471 pr_err("nvm: no valid sysblks found to update\n");
472 ret = -EINVAL;
473 goto err_cur;
474 }
475
476 /*
477 * All sysblocks found. Check that they have same page id in their flash
478 * blocks
479 */
480 for (i = 1; i < s.nr_rows; i++) {
481 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
482 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
483
484 if (l.g.pg != r.g.pg) {
485 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
486 ret = -EINVAL;
487 goto err_cur;
488 }
489 }
490
491 /*
492 * Check that there haven't been another update to the seqnr since we
493 * began
494 */
495 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
496 pr_err("nvm: seq is not sequential\n");
497 ret = -EINVAL;
498 goto err_cur;
499 }
500
501 /*
502 * When all pages in a block has been written, a new block is selected
503 * and writing is performed on the new block.
504 */
505 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
506 dev->lps_per_blk - 1) {
507 ret = nvm_prepare_new_sysblks(dev, &s);
508 if (ret)
509 goto err_cur;
510 }
511
512 ret = nvm_write_and_verify(dev, new, &s);
513err_cur:
514 kfree(cur);
515err_sysblk:
516 mutex_unlock(&dev->mlock);
517
518 return ret;
519}
520
521int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
522{
523 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
524 struct sysblk_scan s;
525 int ret;
526
527 /*
528 * 1. select master blocks and select first available blks
529 * 2. get bad block list
530 * 3. mark MAX_SYSBLKS block as host-based device allocated.
531 * 4. write and verify data to block
532 */
533
534 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
535 return -EINVAL;
536
537 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
538 pr_err("nvm: memory does not support SLC access\n");
539 return -EINVAL;
540 }
541
542 /* Index all sysblocks and mark them as host-driven */
543 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
544
545 mutex_lock(&dev->mlock);
546 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
547 if (ret)
548 goto err_mark;
549
550 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
551 if (ret)
552 goto err_mark;
553
554 /* Write to the first block of each row */
555 ret = nvm_write_and_verify(dev, info, &s);
556err_mark:
557 mutex_unlock(&dev->mlock);
558 return ret;
559}
8b4970c4
MB
560
561struct factory_blks {
562 struct nvm_dev *dev;
563 int flags;
564 unsigned long *blks;
565};
566
567static int factory_nblks(int nblks)
568{
569 /* Round up to nearest BITS_PER_LONG */
570 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
571}
572
573static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
574{
575 int nblks = factory_nblks(dev->blks_per_lun);
576
577 return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
578 BITS_PER_LONG;
579}
580
581static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
582 void *private)
583{
584 struct factory_blks *f = private;
585 struct nvm_dev *dev = f->dev;
586 int i, lunoff;
587
588 lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
589
590 /* non-set bits correspond to the block must be erased */
591 for (i = 0; i < nr_blks; i++) {
592 switch (blks[i]) {
593 case NVM_BLK_T_FREE:
594 if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
595 set_bit(i, &f->blks[lunoff]);
596 break;
597 case NVM_BLK_T_HOST:
598 if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
599 set_bit(i, &f->blks[lunoff]);
600 break;
601 case NVM_BLK_T_GRWN_BAD:
602 if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
603 set_bit(i, &f->blks[lunoff]);
604 break;
605 default:
606 set_bit(i, &f->blks[lunoff]);
607 break;
608 }
609 }
610
611 return 0;
612}
613
614static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
615 int max_ppas, struct factory_blks *f)
616{
617 struct ppa_addr ppa;
618 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
619 unsigned long *offset;
620
621 while (!done) {
622 done = 1;
623 for (ch = 0; ch < dev->nr_chnls; ch++) {
624 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
625 idx = factory_blk_offset(dev, ch, lun);
626 offset = &f->blks[idx];
627
628 blkid = find_first_zero_bit(offset,
629 dev->blks_per_lun);
630 if (blkid >= dev->blks_per_lun)
631 continue;
632 set_bit(blkid, offset);
633
634 ppa.ppa = 0;
635 ppa.g.ch = ch;
636 ppa.g.lun = lun;
637 ppa.g.blk = blkid;
638 pr_debug("nvm: erase ppa (%u %u %u)\n",
639 ppa.g.ch,
640 ppa.g.lun,
641 ppa.g.blk);
642
643 erase_list[ppa_cnt] = ppa;
644 ppa_cnt++;
645 done = 0;
646
647 if (ppa_cnt == max_ppas)
648 return ppa_cnt;
649 }
650 }
651 }
652
653 return ppa_cnt;
654}
655
656static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
657 nvm_bb_update_fn *fn, void *priv)
658{
659 struct ppa_addr dev_ppa;
660 int ret;
661
662 dev_ppa = generic_to_dev_addr(dev, ppa);
663
664 ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
665 if (ret)
666 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
667 ppa.g.ch, ppa.g.blk);
668 return ret;
669}
670
671static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
672{
673 int ch, lun, ret;
674 struct ppa_addr ppa;
675
676 ppa.ppa = 0;
677 for (ch = 0; ch < dev->nr_chnls; ch++) {
678 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
679 ppa.g.ch = ch;
680 ppa.g.lun = lun;
681
682 ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
683 f);
684 if (ret)
685 return ret;
686 }
687 }
688
689 return 0;
690}
691
692int nvm_dev_factory(struct nvm_dev *dev, int flags)
693{
694 struct factory_blks f;
695 struct ppa_addr *ppas;
696 int ppa_cnt, ret = -ENOMEM;
697 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
698 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
699 struct sysblk_scan s;
700
701 f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
702 GFP_KERNEL);
703 if (!f.blks)
704 return ret;
705
706 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
707 if (!ppas)
708 goto err_blks;
709
710 f.dev = dev;
711 f.flags = flags;
712
713 /* create list of blks to be erased */
714 ret = nvm_fact_select_blks(dev, &f);
715 if (ret)
716 goto err_ppas;
717
718 /* continue to erase until list of blks until empty */
719 while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
720 nvm_erase_ppa(dev, ppas, ppa_cnt);
721
722 /* mark host reserved blocks free */
723 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
724 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
725 mutex_lock(&dev->mlock);
726 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
727 sysblk_get_host_blks);
728 if (!ret)
729 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
730 mutex_unlock(&dev->mlock);
731 }
732err_ppas:
733 kfree(ppas);
734err_blks:
735 kfree(f.blks);
736 return ret;
737}
738EXPORT_SYMBOL(nvm_dev_factory);
This page took 0.072467 seconds and 5 git commands to generate.