mtd: sh_flctl: Restructure the hardware ECC handling
[deliverable/linux.git] / drivers / mtd / nand / sh_flctl.c
1 /*
2 * SuperH FLCTL nand controller
3 *
4 * Copyright (c) 2008 Renesas Solutions Corp.
5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6 *
7 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/nand.h>
35 #include <linux/mtd/partitions.h>
36 #include <linux/mtd/sh_flctl.h>
37
38 static struct nand_ecclayout flctl_4secc_oob_16 = {
39 .eccbytes = 10,
40 .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
41 .oobfree = {
42 {.offset = 12,
43 . length = 4} },
44 };
45
46 static struct nand_ecclayout flctl_4secc_oob_64 = {
47 .eccbytes = 4 * 10,
48 .eccpos = {
49 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
50 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
51 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
52 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
53 .oobfree = {
54 {.offset = 2, .length = 4},
55 {.offset = 16, .length = 6},
56 {.offset = 32, .length = 6},
57 {.offset = 48, .length = 6} },
58 };
59
60 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
61
62 static struct nand_bbt_descr flctl_4secc_smallpage = {
63 .options = NAND_BBT_SCAN2NDPAGE,
64 .offs = 11,
65 .len = 1,
66 .pattern = scan_ff_pattern,
67 };
68
69 static struct nand_bbt_descr flctl_4secc_largepage = {
70 .options = NAND_BBT_SCAN2NDPAGE,
71 .offs = 0,
72 .len = 2,
73 .pattern = scan_ff_pattern,
74 };
75
76 static void empty_fifo(struct sh_flctl *flctl)
77 {
78 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
79 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
80 }
81
82 static void start_translation(struct sh_flctl *flctl)
83 {
84 writeb(TRSTRT, FLTRCR(flctl));
85 }
86
87 static void timeout_error(struct sh_flctl *flctl, const char *str)
88 {
89 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
90 }
91
92 static void wait_completion(struct sh_flctl *flctl)
93 {
94 uint32_t timeout = LOOP_TIMEOUT_MAX;
95
96 while (timeout--) {
97 if (readb(FLTRCR(flctl)) & TREND) {
98 writeb(0x0, FLTRCR(flctl));
99 return;
100 }
101 udelay(1);
102 }
103
104 timeout_error(flctl, __func__);
105 writeb(0x0, FLTRCR(flctl));
106 }
107
108 static void set_addr(struct mtd_info *mtd, int column, int page_addr)
109 {
110 struct sh_flctl *flctl = mtd_to_flctl(mtd);
111 uint32_t addr = 0;
112
113 if (column == -1) {
114 addr = page_addr; /* ERASE1 */
115 } else if (page_addr != -1) {
116 /* SEQIN, READ0, etc.. */
117 if (flctl->chip.options & NAND_BUSWIDTH_16)
118 column >>= 1;
119 if (flctl->page_size) {
120 addr = column & 0x0FFF;
121 addr |= (page_addr & 0xff) << 16;
122 addr |= ((page_addr >> 8) & 0xff) << 24;
123 /* big than 128MB */
124 if (flctl->rw_ADRCNT == ADRCNT2_E) {
125 uint32_t addr2;
126 addr2 = (page_addr >> 16) & 0xff;
127 writel(addr2, FLADR2(flctl));
128 }
129 } else {
130 addr = column;
131 addr |= (page_addr & 0xff) << 8;
132 addr |= ((page_addr >> 8) & 0xff) << 16;
133 addr |= ((page_addr >> 16) & 0xff) << 24;
134 }
135 }
136 writel(addr, FLADR(flctl));
137 }
138
139 static void wait_rfifo_ready(struct sh_flctl *flctl)
140 {
141 uint32_t timeout = LOOP_TIMEOUT_MAX;
142
143 while (timeout--) {
144 uint32_t val;
145 /* check FIFO */
146 val = readl(FLDTCNTR(flctl)) >> 16;
147 if (val & 0xFF)
148 return;
149 udelay(1);
150 }
151 timeout_error(flctl, __func__);
152 }
153
154 static void wait_wfifo_ready(struct sh_flctl *flctl)
155 {
156 uint32_t len, timeout = LOOP_TIMEOUT_MAX;
157
158 while (timeout--) {
159 /* check FIFO */
160 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
161 if (len >= 4)
162 return;
163 udelay(1);
164 }
165 timeout_error(flctl, __func__);
166 }
167
168 static enum flctl_ecc_res_t wait_recfifo_ready
169 (struct sh_flctl *flctl, int sector_number)
170 {
171 uint32_t timeout = LOOP_TIMEOUT_MAX;
172 void __iomem *ecc_reg[4];
173 int i;
174 int state = FL_SUCCESS;
175 uint32_t data, size;
176
177 /*
178 * First this loops checks in FLDTCNTR if we are ready to read out the
179 * oob data. This is the case if either all went fine without errors or
180 * if the bottom part of the loop corrected the errors or marked them as
181 * uncorrectable and the controller is given time to push the data into
182 * the FIFO.
183 */
184 while (timeout--) {
185 /* check if all is ok and we can read out the OOB */
186 size = readl(FLDTCNTR(flctl)) >> 24;
187 if ((size & 0xFF) == 4)
188 return state;
189
190 /* check if a correction code has been calculated */
191 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
192 /*
193 * either we wait for the fifo to be filled or a
194 * correction pattern is being generated
195 */
196 udelay(1);
197 continue;
198 }
199
200 /* check for an uncorrectable error */
201 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
202 /* check if we face a non-empty page */
203 for (i = 0; i < 512; i++) {
204 if (flctl->done_buff[i] != 0xff) {
205 state = FL_ERROR; /* can't correct */
206 break;
207 }
208 }
209
210 if (state == FL_SUCCESS)
211 dev_dbg(&flctl->pdev->dev,
212 "reading empty sector %d, ecc error ignored\n",
213 sector_number);
214
215 writel(0, FL4ECCCR(flctl));
216 continue;
217 }
218
219 /* start error correction */
220 ecc_reg[0] = FL4ECCRESULT0(flctl);
221 ecc_reg[1] = FL4ECCRESULT1(flctl);
222 ecc_reg[2] = FL4ECCRESULT2(flctl);
223 ecc_reg[3] = FL4ECCRESULT3(flctl);
224
225 for (i = 0; i < 3; i++) {
226 uint8_t org;
227 int index;
228
229 data = readl(ecc_reg[i]);
230
231 if (flctl->page_size)
232 index = (512 * sector_number) +
233 (data >> 16);
234 else
235 index = data >> 16;
236
237 org = flctl->done_buff[index];
238 flctl->done_buff[index] = org ^ (data & 0xFF);
239 }
240 state = FL_REPAIRABLE;
241 writel(0, FL4ECCCR(flctl));
242 }
243
244 timeout_error(flctl, __func__);
245 return FL_TIMEOUT; /* timeout */
246 }
247
248 static void wait_wecfifo_ready(struct sh_flctl *flctl)
249 {
250 uint32_t timeout = LOOP_TIMEOUT_MAX;
251 uint32_t len;
252
253 while (timeout--) {
254 /* check FLECFIFO */
255 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
256 if (len >= 4)
257 return;
258 udelay(1);
259 }
260 timeout_error(flctl, __func__);
261 }
262
263 static void read_datareg(struct sh_flctl *flctl, int offset)
264 {
265 unsigned long data;
266 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
267
268 wait_completion(flctl);
269
270 data = readl(FLDATAR(flctl));
271 *buf = le32_to_cpu(data);
272 }
273
274 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
275 {
276 int i, len_4align;
277 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
278 void *fifo_addr = (void *)FLDTFIFO(flctl);
279
280 len_4align = (rlen + 3) / 4;
281
282 for (i = 0; i < len_4align; i++) {
283 wait_rfifo_ready(flctl);
284 buf[i] = readl(fifo_addr);
285 buf[i] = be32_to_cpu(buf[i]);
286 }
287 }
288
289 static enum flctl_ecc_res_t read_ecfiforeg
290 (struct sh_flctl *flctl, uint8_t *buff, int sector)
291 {
292 int i;
293 enum flctl_ecc_res_t res;
294 unsigned long *ecc_buf = (unsigned long *)buff;
295
296 res = wait_recfifo_ready(flctl , sector);
297
298 if (res != FL_ERROR) {
299 for (i = 0; i < 4; i++) {
300 ecc_buf[i] = readl(FLECFIFO(flctl));
301 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
302 }
303 }
304
305 return res;
306 }
307
308 static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
309 {
310 int i, len_4align;
311 unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
312 void *fifo_addr = (void *)FLDTFIFO(flctl);
313
314 len_4align = (rlen + 3) / 4;
315 for (i = 0; i < len_4align; i++) {
316 wait_wfifo_ready(flctl);
317 writel(cpu_to_be32(data[i]), fifo_addr);
318 }
319 }
320
321 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
322 {
323 struct sh_flctl *flctl = mtd_to_flctl(mtd);
324 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
325 uint32_t flcmdcr_val, addr_len_bytes = 0;
326
327 /* Set SNAND bit if page size is 2048byte */
328 if (flctl->page_size)
329 flcmncr_val |= SNAND_E;
330 else
331 flcmncr_val &= ~SNAND_E;
332
333 /* default FLCMDCR val */
334 flcmdcr_val = DOCMD1_E | DOADR_E;
335
336 /* Set for FLCMDCR */
337 switch (cmd) {
338 case NAND_CMD_ERASE1:
339 addr_len_bytes = flctl->erase_ADRCNT;
340 flcmdcr_val |= DOCMD2_E;
341 break;
342 case NAND_CMD_READ0:
343 case NAND_CMD_READOOB:
344 case NAND_CMD_RNDOUT:
345 addr_len_bytes = flctl->rw_ADRCNT;
346 flcmdcr_val |= CDSRC_E;
347 if (flctl->chip.options & NAND_BUSWIDTH_16)
348 flcmncr_val |= SEL_16BIT;
349 break;
350 case NAND_CMD_SEQIN:
351 /* This case is that cmd is READ0 or READ1 or READ00 */
352 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
353 break;
354 case NAND_CMD_PAGEPROG:
355 addr_len_bytes = flctl->rw_ADRCNT;
356 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
357 if (flctl->chip.options & NAND_BUSWIDTH_16)
358 flcmncr_val |= SEL_16BIT;
359 break;
360 case NAND_CMD_READID:
361 flcmncr_val &= ~SNAND_E;
362 flcmdcr_val |= CDSRC_E;
363 addr_len_bytes = ADRCNT_1;
364 break;
365 case NAND_CMD_STATUS:
366 case NAND_CMD_RESET:
367 flcmncr_val &= ~SNAND_E;
368 flcmdcr_val &= ~(DOADR_E | DOSR_E);
369 break;
370 default:
371 break;
372 }
373
374 /* Set address bytes parameter */
375 flcmdcr_val |= addr_len_bytes;
376
377 /* Now actually write */
378 writel(flcmncr_val, FLCMNCR(flctl));
379 writel(flcmdcr_val, FLCMDCR(flctl));
380 writel(flcmcdr_val, FLCMCDR(flctl));
381 }
382
383 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
384 uint8_t *buf, int oob_required, int page)
385 {
386 chip->read_buf(mtd, buf, mtd->writesize);
387 return 0;
388 }
389
390 static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
391 const uint8_t *buf, int oob_required)
392 {
393 chip->write_buf(mtd, buf, mtd->writesize);
394 }
395
396 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
397 {
398 struct sh_flctl *flctl = mtd_to_flctl(mtd);
399 int sector, page_sectors;
400 enum flctl_ecc_res_t ecc_result;
401
402 page_sectors = flctl->page_size ? 4 : 1;
403
404 set_cmd_regs(mtd, NAND_CMD_READ0,
405 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
406
407 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
408 FLCMNCR(flctl));
409 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
410 writel(page_addr << 2, FLADR(flctl));
411
412 empty_fifo(flctl);
413 start_translation(flctl);
414
415 for (sector = 0; sector < page_sectors; sector++) {
416 read_fiforeg(flctl, 512, 512 * sector);
417
418 ecc_result = read_ecfiforeg(flctl,
419 &flctl->done_buff[mtd->writesize + 16 * sector],
420 sector);
421
422 switch (ecc_result) {
423 case FL_REPAIRABLE:
424 dev_info(&flctl->pdev->dev,
425 "applied ecc on page 0x%x", page_addr);
426 flctl->mtd.ecc_stats.corrected++;
427 break;
428 case FL_ERROR:
429 dev_warn(&flctl->pdev->dev,
430 "page 0x%x contains corrupted data\n",
431 page_addr);
432 flctl->mtd.ecc_stats.failed++;
433 break;
434 default:
435 ;
436 }
437 }
438
439 wait_completion(flctl);
440
441 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
442 FLCMNCR(flctl));
443 }
444
445 static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
446 {
447 struct sh_flctl *flctl = mtd_to_flctl(mtd);
448 int page_sectors = flctl->page_size ? 4 : 1;
449 int i;
450
451 set_cmd_regs(mtd, NAND_CMD_READ0,
452 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
453
454 empty_fifo(flctl);
455
456 for (i = 0; i < page_sectors; i++) {
457 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
458 writel(16, FLDTCNTR(flctl));
459
460 start_translation(flctl);
461 read_fiforeg(flctl, 16, 16 * i);
462 wait_completion(flctl);
463 }
464 }
465
466 static void execmd_write_page_sector(struct mtd_info *mtd)
467 {
468 struct sh_flctl *flctl = mtd_to_flctl(mtd);
469 int i, page_addr = flctl->seqin_page_addr;
470 int sector, page_sectors;
471
472 page_sectors = flctl->page_size ? 4 : 1;
473
474 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
475 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
476
477 empty_fifo(flctl);
478 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
479 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
480 writel(page_addr << 2, FLADR(flctl));
481 start_translation(flctl);
482
483 for (sector = 0; sector < page_sectors; sector++) {
484 write_fiforeg(flctl, 512, 512 * sector);
485
486 for (i = 0; i < 4; i++) {
487 wait_wecfifo_ready(flctl); /* wait for write ready */
488 writel(0xFFFFFFFF, FLECFIFO(flctl));
489 }
490 }
491
492 wait_completion(flctl);
493 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
494 }
495
496 static void execmd_write_oob(struct mtd_info *mtd)
497 {
498 struct sh_flctl *flctl = mtd_to_flctl(mtd);
499 int page_addr = flctl->seqin_page_addr;
500 int sector, page_sectors;
501
502 page_sectors = flctl->page_size ? 4 : 1;
503
504 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
505 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
506
507 for (sector = 0; sector < page_sectors; sector++) {
508 empty_fifo(flctl);
509 set_addr(mtd, sector * 528 + 512, page_addr);
510 writel(16, FLDTCNTR(flctl)); /* set read size */
511
512 start_translation(flctl);
513 write_fiforeg(flctl, 16, 16 * sector);
514 wait_completion(flctl);
515 }
516 }
517
518 static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
519 int column, int page_addr)
520 {
521 struct sh_flctl *flctl = mtd_to_flctl(mtd);
522 uint32_t read_cmd = 0;
523
524 pm_runtime_get_sync(&flctl->pdev->dev);
525
526 flctl->read_bytes = 0;
527 if (command != NAND_CMD_PAGEPROG)
528 flctl->index = 0;
529
530 switch (command) {
531 case NAND_CMD_READ1:
532 case NAND_CMD_READ0:
533 if (flctl->hwecc) {
534 /* read page with hwecc */
535 execmd_read_page_sector(mtd, page_addr);
536 break;
537 }
538 if (flctl->page_size)
539 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
540 | command);
541 else
542 set_cmd_regs(mtd, command, command);
543
544 set_addr(mtd, 0, page_addr);
545
546 flctl->read_bytes = mtd->writesize + mtd->oobsize;
547 if (flctl->chip.options & NAND_BUSWIDTH_16)
548 column >>= 1;
549 flctl->index += column;
550 goto read_normal_exit;
551
552 case NAND_CMD_READOOB:
553 if (flctl->hwecc) {
554 /* read page with hwecc */
555 execmd_read_oob(mtd, page_addr);
556 break;
557 }
558
559 if (flctl->page_size) {
560 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
561 | NAND_CMD_READ0);
562 set_addr(mtd, mtd->writesize, page_addr);
563 } else {
564 set_cmd_regs(mtd, command, command);
565 set_addr(mtd, 0, page_addr);
566 }
567 flctl->read_bytes = mtd->oobsize;
568 goto read_normal_exit;
569
570 case NAND_CMD_RNDOUT:
571 if (flctl->hwecc)
572 break;
573
574 if (flctl->page_size)
575 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
576 | command);
577 else
578 set_cmd_regs(mtd, command, command);
579
580 set_addr(mtd, column, 0);
581
582 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
583 goto read_normal_exit;
584
585 case NAND_CMD_READID:
586 set_cmd_regs(mtd, command, command);
587
588 /* READID is always performed using an 8-bit bus */
589 if (flctl->chip.options & NAND_BUSWIDTH_16)
590 column <<= 1;
591 set_addr(mtd, column, 0);
592
593 flctl->read_bytes = 8;
594 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
595 empty_fifo(flctl);
596 start_translation(flctl);
597 read_fiforeg(flctl, flctl->read_bytes, 0);
598 wait_completion(flctl);
599 break;
600
601 case NAND_CMD_ERASE1:
602 flctl->erase1_page_addr = page_addr;
603 break;
604
605 case NAND_CMD_ERASE2:
606 set_cmd_regs(mtd, NAND_CMD_ERASE1,
607 (command << 8) | NAND_CMD_ERASE1);
608 set_addr(mtd, -1, flctl->erase1_page_addr);
609 start_translation(flctl);
610 wait_completion(flctl);
611 break;
612
613 case NAND_CMD_SEQIN:
614 if (!flctl->page_size) {
615 /* output read command */
616 if (column >= mtd->writesize) {
617 column -= mtd->writesize;
618 read_cmd = NAND_CMD_READOOB;
619 } else if (column < 256) {
620 read_cmd = NAND_CMD_READ0;
621 } else {
622 column -= 256;
623 read_cmd = NAND_CMD_READ1;
624 }
625 }
626 flctl->seqin_column = column;
627 flctl->seqin_page_addr = page_addr;
628 flctl->seqin_read_cmd = read_cmd;
629 break;
630
631 case NAND_CMD_PAGEPROG:
632 empty_fifo(flctl);
633 if (!flctl->page_size) {
634 set_cmd_regs(mtd, NAND_CMD_SEQIN,
635 flctl->seqin_read_cmd);
636 set_addr(mtd, -1, -1);
637 writel(0, FLDTCNTR(flctl)); /* set 0 size */
638 start_translation(flctl);
639 wait_completion(flctl);
640 }
641 if (flctl->hwecc) {
642 /* write page with hwecc */
643 if (flctl->seqin_column == mtd->writesize)
644 execmd_write_oob(mtd);
645 else if (!flctl->seqin_column)
646 execmd_write_page_sector(mtd);
647 else
648 printk(KERN_ERR "Invalid address !?\n");
649 break;
650 }
651 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
652 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
653 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
654 start_translation(flctl);
655 write_fiforeg(flctl, flctl->index, 0);
656 wait_completion(flctl);
657 break;
658
659 case NAND_CMD_STATUS:
660 set_cmd_regs(mtd, command, command);
661 set_addr(mtd, -1, -1);
662
663 flctl->read_bytes = 1;
664 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
665 start_translation(flctl);
666 read_datareg(flctl, 0); /* read and end */
667 break;
668
669 case NAND_CMD_RESET:
670 set_cmd_regs(mtd, command, command);
671 set_addr(mtd, -1, -1);
672
673 writel(0, FLDTCNTR(flctl)); /* set 0 size */
674 start_translation(flctl);
675 wait_completion(flctl);
676 break;
677
678 default:
679 break;
680 }
681 goto runtime_exit;
682
683 read_normal_exit:
684 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
685 empty_fifo(flctl);
686 start_translation(flctl);
687 read_fiforeg(flctl, flctl->read_bytes, 0);
688 wait_completion(flctl);
689 runtime_exit:
690 pm_runtime_put_sync(&flctl->pdev->dev);
691 return;
692 }
693
694 static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
695 {
696 struct sh_flctl *flctl = mtd_to_flctl(mtd);
697 int ret;
698
699 switch (chipnr) {
700 case -1:
701 flctl->flcmncr_base &= ~CE0_ENABLE;
702
703 pm_runtime_get_sync(&flctl->pdev->dev);
704 writel(flctl->flcmncr_base, FLCMNCR(flctl));
705
706 if (flctl->qos_request) {
707 dev_pm_qos_remove_request(&flctl->pm_qos);
708 flctl->qos_request = 0;
709 }
710
711 pm_runtime_put_sync(&flctl->pdev->dev);
712 break;
713 case 0:
714 flctl->flcmncr_base |= CE0_ENABLE;
715
716 if (!flctl->qos_request) {
717 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
718 &flctl->pm_qos, 100);
719 if (ret < 0)
720 dev_err(&flctl->pdev->dev,
721 "PM QoS request failed: %d\n", ret);
722 flctl->qos_request = 1;
723 }
724
725 if (flctl->holden) {
726 pm_runtime_get_sync(&flctl->pdev->dev);
727 writel(HOLDEN, FLHOLDCR(flctl));
728 pm_runtime_put_sync(&flctl->pdev->dev);
729 }
730 break;
731 default:
732 BUG();
733 }
734 }
735
736 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
737 {
738 struct sh_flctl *flctl = mtd_to_flctl(mtd);
739 int i, index = flctl->index;
740
741 for (i = 0; i < len; i++)
742 flctl->done_buff[index + i] = buf[i];
743 flctl->index += len;
744 }
745
746 static uint8_t flctl_read_byte(struct mtd_info *mtd)
747 {
748 struct sh_flctl *flctl = mtd_to_flctl(mtd);
749 int index = flctl->index;
750 uint8_t data;
751
752 data = flctl->done_buff[index];
753 flctl->index++;
754 return data;
755 }
756
757 static uint16_t flctl_read_word(struct mtd_info *mtd)
758 {
759 struct sh_flctl *flctl = mtd_to_flctl(mtd);
760 int index = flctl->index;
761 uint16_t data;
762 uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
763
764 data = *buf;
765 flctl->index += 2;
766 return data;
767 }
768
769 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
770 {
771 int i;
772
773 for (i = 0; i < len; i++)
774 buf[i] = flctl_read_byte(mtd);
775 }
776
777 static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
778 {
779 int i;
780
781 for (i = 0; i < len; i++)
782 if (buf[i] != flctl_read_byte(mtd))
783 return -EFAULT;
784 return 0;
785 }
786
787 static int flctl_chip_init_tail(struct mtd_info *mtd)
788 {
789 struct sh_flctl *flctl = mtd_to_flctl(mtd);
790 struct nand_chip *chip = &flctl->chip;
791
792 if (mtd->writesize == 512) {
793 flctl->page_size = 0;
794 if (chip->chipsize > (32 << 20)) {
795 /* big than 32MB */
796 flctl->rw_ADRCNT = ADRCNT_4;
797 flctl->erase_ADRCNT = ADRCNT_3;
798 } else if (chip->chipsize > (2 << 16)) {
799 /* big than 128KB */
800 flctl->rw_ADRCNT = ADRCNT_3;
801 flctl->erase_ADRCNT = ADRCNT_2;
802 } else {
803 flctl->rw_ADRCNT = ADRCNT_2;
804 flctl->erase_ADRCNT = ADRCNT_1;
805 }
806 } else {
807 flctl->page_size = 1;
808 if (chip->chipsize > (128 << 20)) {
809 /* big than 128MB */
810 flctl->rw_ADRCNT = ADRCNT2_E;
811 flctl->erase_ADRCNT = ADRCNT_3;
812 } else if (chip->chipsize > (8 << 16)) {
813 /* big than 512KB */
814 flctl->rw_ADRCNT = ADRCNT_4;
815 flctl->erase_ADRCNT = ADRCNT_2;
816 } else {
817 flctl->rw_ADRCNT = ADRCNT_3;
818 flctl->erase_ADRCNT = ADRCNT_1;
819 }
820 }
821
822 if (flctl->hwecc) {
823 if (mtd->writesize == 512) {
824 chip->ecc.layout = &flctl_4secc_oob_16;
825 chip->badblock_pattern = &flctl_4secc_smallpage;
826 } else {
827 chip->ecc.layout = &flctl_4secc_oob_64;
828 chip->badblock_pattern = &flctl_4secc_largepage;
829 }
830
831 chip->ecc.size = 512;
832 chip->ecc.bytes = 10;
833 chip->ecc.strength = 4;
834 chip->ecc.read_page = flctl_read_page_hwecc;
835 chip->ecc.write_page = flctl_write_page_hwecc;
836 chip->ecc.mode = NAND_ECC_HW;
837
838 /* 4 symbols ECC enabled */
839 flctl->flcmncr_base |= _4ECCEN;
840 } else {
841 chip->ecc.mode = NAND_ECC_SOFT;
842 }
843
844 return 0;
845 }
846
847 static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
848 {
849 struct sh_flctl *flctl = dev_id;
850
851 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
852 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
853
854 return IRQ_HANDLED;
855 }
856
857 static int __devinit flctl_probe(struct platform_device *pdev)
858 {
859 struct resource *res;
860 struct sh_flctl *flctl;
861 struct mtd_info *flctl_mtd;
862 struct nand_chip *nand;
863 struct sh_flctl_platform_data *pdata;
864 int ret = -ENXIO;
865 int irq;
866
867 pdata = pdev->dev.platform_data;
868 if (pdata == NULL) {
869 dev_err(&pdev->dev, "no platform data defined\n");
870 return -EINVAL;
871 }
872
873 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
874 if (!flctl) {
875 dev_err(&pdev->dev, "failed to allocate driver data\n");
876 return -ENOMEM;
877 }
878
879 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
880 if (!res) {
881 dev_err(&pdev->dev, "failed to get I/O memory\n");
882 goto err_iomap;
883 }
884
885 flctl->reg = ioremap(res->start, resource_size(res));
886 if (flctl->reg == NULL) {
887 dev_err(&pdev->dev, "failed to remap I/O memory\n");
888 goto err_iomap;
889 }
890
891 irq = platform_get_irq(pdev, 0);
892 if (irq < 0) {
893 dev_err(&pdev->dev, "failed to get flste irq data\n");
894 goto err_flste;
895 }
896
897 ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
898 if (ret) {
899 dev_err(&pdev->dev, "request interrupt failed.\n");
900 goto err_flste;
901 }
902
903 platform_set_drvdata(pdev, flctl);
904 flctl_mtd = &flctl->mtd;
905 nand = &flctl->chip;
906 flctl_mtd->priv = nand;
907 flctl->pdev = pdev;
908 flctl->hwecc = pdata->has_hwecc;
909 flctl->holden = pdata->use_holden;
910 flctl->flcmncr_base = pdata->flcmncr_val;
911 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
912
913 /* Set address of hardware control function */
914 /* 20 us command delay time */
915 nand->chip_delay = 20;
916
917 nand->read_byte = flctl_read_byte;
918 nand->write_buf = flctl_write_buf;
919 nand->read_buf = flctl_read_buf;
920 nand->verify_buf = flctl_verify_buf;
921 nand->select_chip = flctl_select_chip;
922 nand->cmdfunc = flctl_cmdfunc;
923
924 if (pdata->flcmncr_val & SEL_16BIT) {
925 nand->options |= NAND_BUSWIDTH_16;
926 nand->read_word = flctl_read_word;
927 }
928
929 pm_runtime_enable(&pdev->dev);
930 pm_runtime_resume(&pdev->dev);
931
932 ret = nand_scan_ident(flctl_mtd, 1, NULL);
933 if (ret)
934 goto err_chip;
935
936 ret = flctl_chip_init_tail(flctl_mtd);
937 if (ret)
938 goto err_chip;
939
940 ret = nand_scan_tail(flctl_mtd);
941 if (ret)
942 goto err_chip;
943
944 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
945
946 return 0;
947
948 err_chip:
949 pm_runtime_disable(&pdev->dev);
950 free_irq(irq, flctl);
951 err_flste:
952 iounmap(flctl->reg);
953 err_iomap:
954 kfree(flctl);
955 return ret;
956 }
957
958 static int __devexit flctl_remove(struct platform_device *pdev)
959 {
960 struct sh_flctl *flctl = platform_get_drvdata(pdev);
961
962 nand_release(&flctl->mtd);
963 pm_runtime_disable(&pdev->dev);
964 free_irq(platform_get_irq(pdev, 0), flctl);
965 iounmap(flctl->reg);
966 kfree(flctl);
967
968 return 0;
969 }
970
971 static struct platform_driver flctl_driver = {
972 .remove = flctl_remove,
973 .driver = {
974 .name = "sh_flctl",
975 .owner = THIS_MODULE,
976 },
977 };
978
979 static int __init flctl_nand_init(void)
980 {
981 return platform_driver_probe(&flctl_driver, flctl_probe);
982 }
983
984 static void __exit flctl_nand_cleanup(void)
985 {
986 platform_driver_unregister(&flctl_driver);
987 }
988
989 module_init(flctl_nand_init);
990 module_exit(flctl_nand_cleanup);
991
992 MODULE_LICENSE("GPL");
993 MODULE_AUTHOR("Yoshihiro Shimoda");
994 MODULE_DESCRIPTION("SuperH FLCTL driver");
995 MODULE_ALIAS("platform:sh_flctl");
This page took 0.072585 seconds and 6 git commands to generate.