mtd: omap2 fix prefetch mode read issue
[deliverable/linux.git] / drivers / mtd / nand / omap2.c
1 /*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 #include <linux/jiffies.h>
15 #include <linux/sched.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/mtd/partitions.h>
19 #include <linux/io.h>
20
21 #include <plat/dma.h>
22 #include <plat/gpmc.h>
23 #include <plat/nand.h>
24
25 #define GPMC_IRQ_STATUS 0x18
26 #define GPMC_ECC_CONFIG 0x1F4
27 #define GPMC_ECC_CONTROL 0x1F8
28 #define GPMC_ECC_SIZE_CONFIG 0x1FC
29 #define GPMC_ECC1_RESULT 0x200
30
31 #define DRIVER_NAME "omap2-nand"
32
33 /* size (4 KiB) for IO mapping */
34 #define NAND_IO_SIZE SZ_4K
35
36 #define NAND_WP_OFF 0
37 #define NAND_WP_BIT 0x00000010
38 #define WR_RD_PIN_MONITORING 0x00600000
39
40 #define GPMC_BUF_FULL 0x00000001
41 #define GPMC_BUF_EMPTY 0x00000000
42
43 #define NAND_Ecc_P1e (1 << 0)
44 #define NAND_Ecc_P2e (1 << 1)
45 #define NAND_Ecc_P4e (1 << 2)
46 #define NAND_Ecc_P8e (1 << 3)
47 #define NAND_Ecc_P16e (1 << 4)
48 #define NAND_Ecc_P32e (1 << 5)
49 #define NAND_Ecc_P64e (1 << 6)
50 #define NAND_Ecc_P128e (1 << 7)
51 #define NAND_Ecc_P256e (1 << 8)
52 #define NAND_Ecc_P512e (1 << 9)
53 #define NAND_Ecc_P1024e (1 << 10)
54 #define NAND_Ecc_P2048e (1 << 11)
55
56 #define NAND_Ecc_P1o (1 << 16)
57 #define NAND_Ecc_P2o (1 << 17)
58 #define NAND_Ecc_P4o (1 << 18)
59 #define NAND_Ecc_P8o (1 << 19)
60 #define NAND_Ecc_P16o (1 << 20)
61 #define NAND_Ecc_P32o (1 << 21)
62 #define NAND_Ecc_P64o (1 << 22)
63 #define NAND_Ecc_P128o (1 << 23)
64 #define NAND_Ecc_P256o (1 << 24)
65 #define NAND_Ecc_P512o (1 << 25)
66 #define NAND_Ecc_P1024o (1 << 26)
67 #define NAND_Ecc_P2048o (1 << 27)
68
69 #define TF(value) (value ? 1 : 0)
70
71 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
72 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
73 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
74 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
75 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
76 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
77 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
78 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
79
80 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
81 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
82 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
83 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
84 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
85 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
86 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
87 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
88
89 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
90 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
91 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
92 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
93 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
94 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
95 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
96 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
97
98 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
99 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
100 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
101 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
102 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
103 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
104 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
105 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
106
107 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
108 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
109
110 #ifdef CONFIG_MTD_PARTITIONS
111 static const char *part_probes[] = { "cmdlinepart", NULL };
112 #endif
113
114 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
115 static int use_prefetch = 1;
116
117 /* "modprobe ... use_prefetch=0" etc */
118 module_param(use_prefetch, bool, 0);
119 MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
120
121 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
122 static int use_dma = 1;
123
124 /* "modprobe ... use_dma=0" etc */
125 module_param(use_dma, bool, 0);
126 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
127 #else
128 const int use_dma;
129 #endif
130 #else
131 const int use_prefetch;
132 const int use_dma;
133 #endif
134
135 struct omap_nand_info {
136 struct nand_hw_control controller;
137 struct omap_nand_platform_data *pdata;
138 struct mtd_info mtd;
139 struct mtd_partition *parts;
140 struct nand_chip nand;
141 struct platform_device *pdev;
142
143 int gpmc_cs;
144 unsigned long phys_base;
145 void __iomem *gpmc_cs_baseaddr;
146 void __iomem *gpmc_baseaddr;
147 void __iomem *nand_pref_fifo_add;
148 struct completion comp;
149 int dma_ch;
150 };
151
152 /**
153 * omap_nand_wp - This function enable or disable the Write Protect feature
154 * @mtd: MTD device structure
155 * @mode: WP ON/OFF
156 */
157 static void omap_nand_wp(struct mtd_info *mtd, int mode)
158 {
159 struct omap_nand_info *info = container_of(mtd,
160 struct omap_nand_info, mtd);
161
162 unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
163
164 if (mode)
165 config &= ~(NAND_WP_BIT); /* WP is ON */
166 else
167 config |= (NAND_WP_BIT); /* WP is OFF */
168
169 __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
170 }
171
172 /**
173 * omap_hwcontrol - hardware specific access to control-lines
174 * @mtd: MTD device structure
175 * @cmd: command to device
176 * @ctrl:
177 * NAND_NCE: bit 0 -> don't care
178 * NAND_CLE: bit 1 -> Command Latch
179 * NAND_ALE: bit 2 -> Address Latch
180 *
181 * NOTE: boards may use different bits for these!!
182 */
183 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
184 {
185 struct omap_nand_info *info = container_of(mtd,
186 struct omap_nand_info, mtd);
187 switch (ctrl) {
188 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
189 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
190 GPMC_CS_NAND_COMMAND;
191 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
192 GPMC_CS_NAND_DATA;
193 break;
194
195 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
196 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
197 GPMC_CS_NAND_ADDRESS;
198 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
199 GPMC_CS_NAND_DATA;
200 break;
201
202 case NAND_CTRL_CHANGE | NAND_NCE:
203 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
204 GPMC_CS_NAND_DATA;
205 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
206 GPMC_CS_NAND_DATA;
207 break;
208 }
209
210 if (cmd != NAND_CMD_NONE)
211 __raw_writeb(cmd, info->nand.IO_ADDR_W);
212 }
213
214 /**
215 * omap_read_buf8 - read data from NAND controller into buffer
216 * @mtd: MTD device structure
217 * @buf: buffer to store date
218 * @len: number of bytes to read
219 */
220 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
221 {
222 struct nand_chip *nand = mtd->priv;
223
224 ioread8_rep(nand->IO_ADDR_R, buf, len);
225 }
226
227 /**
228 * omap_write_buf8 - write buffer to NAND controller
229 * @mtd: MTD device structure
230 * @buf: data buffer
231 * @len: number of bytes to write
232 */
233 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
234 {
235 struct omap_nand_info *info = container_of(mtd,
236 struct omap_nand_info, mtd);
237 u_char *p = (u_char *)buf;
238
239 while (len--) {
240 iowrite8(*p++, info->nand.IO_ADDR_W);
241 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
242 GPMC_STATUS) & GPMC_BUF_FULL));
243 }
244 }
245
246 /**
247 * omap_read_buf16 - read data from NAND controller into buffer
248 * @mtd: MTD device structure
249 * @buf: buffer to store date
250 * @len: number of bytes to read
251 */
252 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
253 {
254 struct nand_chip *nand = mtd->priv;
255
256 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
257 }
258
259 /**
260 * omap_write_buf16 - write buffer to NAND controller
261 * @mtd: MTD device structure
262 * @buf: data buffer
263 * @len: number of bytes to write
264 */
265 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
266 {
267 struct omap_nand_info *info = container_of(mtd,
268 struct omap_nand_info, mtd);
269 u16 *p = (u16 *) buf;
270
271 /* FIXME try bursts of writesw() or DMA ... */
272 len >>= 1;
273
274 while (len--) {
275 iowrite16(*p++, info->nand.IO_ADDR_W);
276
277 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
278 GPMC_STATUS) & GPMC_BUF_FULL))
279 ;
280 }
281 }
282
283 /**
284 * omap_read_buf_pref - read data from NAND controller into buffer
285 * @mtd: MTD device structure
286 * @buf: buffer to store date
287 * @len: number of bytes to read
288 */
289 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
290 {
291 struct omap_nand_info *info = container_of(mtd,
292 struct omap_nand_info, mtd);
293 uint32_t pfpw_status = 0, r_count = 0;
294 int ret = 0;
295 u32 *p = (u32 *)buf;
296
297 /* take care of subpage reads */
298 if (len % 4) {
299 if (info->nand.options & NAND_BUSWIDTH_16)
300 omap_read_buf16(mtd, buf, len % 4);
301 else
302 omap_read_buf8(mtd, buf, len % 4);
303 p = (u32 *) (buf + len % 4);
304 len -= len % 4;
305 }
306
307 /* configure and start prefetch transfer */
308 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
309 if (ret) {
310 /* PFPW engine is busy, use cpu copy method */
311 if (info->nand.options & NAND_BUSWIDTH_16)
312 omap_read_buf16(mtd, buf, len);
313 else
314 omap_read_buf8(mtd, buf, len);
315 } else {
316 do {
317 pfpw_status = gpmc_prefetch_status();
318 r_count = ((pfpw_status >> 24) & 0x7F) >> 2;
319 ioread32_rep(info->nand_pref_fifo_add, p, r_count);
320 p += r_count;
321 len -= r_count << 2;
322 } while (len);
323
324 /* disable and stop the PFPW engine */
325 gpmc_prefetch_reset();
326 }
327 }
328
329 /**
330 * omap_write_buf_pref - write buffer to NAND controller
331 * @mtd: MTD device structure
332 * @buf: data buffer
333 * @len: number of bytes to write
334 */
335 static void omap_write_buf_pref(struct mtd_info *mtd,
336 const u_char *buf, int len)
337 {
338 struct omap_nand_info *info = container_of(mtd,
339 struct omap_nand_info, mtd);
340 uint32_t pfpw_status = 0, w_count = 0;
341 int i = 0, ret = 0;
342 u16 *p = (u16 *) buf;
343
344 /* take care of subpage writes */
345 if (len % 2 != 0) {
346 writeb(*buf, info->nand.IO_ADDR_R);
347 p = (u16 *)(buf + 1);
348 len--;
349 }
350
351 /* configure and start prefetch transfer */
352 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
353 if (ret) {
354 /* PFPW engine is busy, use cpu copy method */
355 if (info->nand.options & NAND_BUSWIDTH_16)
356 omap_write_buf16(mtd, buf, len);
357 else
358 omap_write_buf8(mtd, buf, len);
359 } else {
360 pfpw_status = gpmc_prefetch_status();
361 while (pfpw_status & 0x3FFF) {
362 w_count = ((pfpw_status >> 24) & 0x7F) >> 1;
363 for (i = 0; (i < w_count) && len; i++, len -= 2)
364 iowrite16(*p++, info->nand_pref_fifo_add);
365 pfpw_status = gpmc_prefetch_status();
366 }
367
368 /* disable and stop the PFPW engine */
369 gpmc_prefetch_reset();
370 }
371 }
372
373 #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
374 /*
375 * omap_nand_dma_cb: callback on the completion of dma transfer
376 * @lch: logical channel
377 * @ch_satuts: channel status
378 * @data: pointer to completion data structure
379 */
380 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
381 {
382 complete((struct completion *) data);
383 }
384
385 /*
386 * omap_nand_dma_transfer: configer and start dma transfer
387 * @mtd: MTD device structure
388 * @addr: virtual address in RAM of source/destination
389 * @len: number of data bytes to be transferred
390 * @is_write: flag for read/write operation
391 */
392 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
393 unsigned int len, int is_write)
394 {
395 struct omap_nand_info *info = container_of(mtd,
396 struct omap_nand_info, mtd);
397 uint32_t prefetch_status = 0;
398 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
399 DMA_FROM_DEVICE;
400 dma_addr_t dma_addr;
401 int ret;
402
403 /* The fifo depth is 64 bytes. We have a sync at each frame and frame
404 * length is 64 bytes.
405 */
406 int buf_len = len >> 6;
407
408 if (addr >= high_memory) {
409 struct page *p1;
410
411 if (((size_t)addr & PAGE_MASK) !=
412 ((size_t)(addr + len - 1) & PAGE_MASK))
413 goto out_copy;
414 p1 = vmalloc_to_page(addr);
415 if (!p1)
416 goto out_copy;
417 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
418 }
419
420 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
421 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
422 dev_err(&info->pdev->dev,
423 "Couldn't DMA map a %d byte buffer\n", len);
424 goto out_copy;
425 }
426
427 if (is_write) {
428 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
429 info->phys_base, 0, 0);
430 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
431 dma_addr, 0, 0);
432 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
433 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
434 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
435 } else {
436 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
437 info->phys_base, 0, 0);
438 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
439 dma_addr, 0, 0);
440 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
441 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
442 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
443 }
444 /* configure and start prefetch transfer */
445 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
446 if (ret)
447 /* PFPW engine is busy, use cpu copy methode */
448 goto out_copy;
449
450 init_completion(&info->comp);
451
452 omap_start_dma(info->dma_ch);
453
454 /* setup and start DMA using dma_addr */
455 wait_for_completion(&info->comp);
456
457 while (0x3fff & (prefetch_status = gpmc_prefetch_status()))
458 ;
459 /* disable and stop the PFPW engine */
460 gpmc_prefetch_reset();
461
462 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
463 return 0;
464
465 out_copy:
466 if (info->nand.options & NAND_BUSWIDTH_16)
467 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
468 : omap_write_buf16(mtd, (u_char *) addr, len);
469 else
470 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
471 : omap_write_buf8(mtd, (u_char *) addr, len);
472 return 0;
473 }
474 #else
475 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
476 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
477 unsigned int len, int is_write)
478 {
479 return 0;
480 }
481 #endif
482
483 /**
484 * omap_read_buf_dma_pref - read data from NAND controller into buffer
485 * @mtd: MTD device structure
486 * @buf: buffer to store date
487 * @len: number of bytes to read
488 */
489 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
490 {
491 if (len <= mtd->oobsize)
492 omap_read_buf_pref(mtd, buf, len);
493 else
494 /* start transfer in DMA mode */
495 omap_nand_dma_transfer(mtd, buf, len, 0x0);
496 }
497
498 /**
499 * omap_write_buf_dma_pref - write buffer to NAND controller
500 * @mtd: MTD device structure
501 * @buf: data buffer
502 * @len: number of bytes to write
503 */
504 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
505 const u_char *buf, int len)
506 {
507 if (len <= mtd->oobsize)
508 omap_write_buf_pref(mtd, buf, len);
509 else
510 /* start transfer in DMA mode */
511 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
512 }
513
514 /**
515 * omap_verify_buf - Verify chip data against buffer
516 * @mtd: MTD device structure
517 * @buf: buffer containing the data to compare
518 * @len: number of bytes to compare
519 */
520 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
521 {
522 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
523 mtd);
524 u16 *p = (u16 *) buf;
525
526 len >>= 1;
527 while (len--) {
528 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
529 return -EFAULT;
530 }
531
532 return 0;
533 }
534
535 #ifdef CONFIG_MTD_NAND_OMAP_HWECC
536 /**
537 * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
538 * @mtd: MTD device structure
539 */
540 static void omap_hwecc_init(struct mtd_info *mtd)
541 {
542 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
543 mtd);
544 struct nand_chip *chip = mtd->priv;
545 unsigned long val = 0x0;
546
547 /* Read from ECC Control Register */
548 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
549 /* Clear all ECC | Enable Reg1 */
550 val = ((0x00000001<<8) | 0x00000001);
551 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
552
553 /* Read from ECC Size Config Register */
554 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
555 /* ECCSIZE1=512 | Select eccResultsize[0-3] */
556 val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
557 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
558 }
559
560 /**
561 * gen_true_ecc - This function will generate true ECC value
562 * @ecc_buf: buffer to store ecc code
563 *
564 * This generated true ECC value can be used when correcting
565 * data read from NAND flash memory core
566 */
567 static void gen_true_ecc(u8 *ecc_buf)
568 {
569 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
570 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
571
572 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
573 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
574 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
575 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
576 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
577 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
578 }
579
580 /**
581 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
582 * @ecc_data1: ecc code from nand spare area
583 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
584 * @page_data: page data
585 *
586 * This function compares two ECC's and indicates if there is an error.
587 * If the error can be corrected it will be corrected to the buffer.
588 */
589 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
590 u8 *ecc_data2, /* read from register */
591 u8 *page_data)
592 {
593 uint i;
594 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
595 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
596 u8 ecc_bit[24];
597 u8 ecc_sum = 0;
598 u8 find_bit = 0;
599 uint find_byte = 0;
600 int isEccFF;
601
602 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
603
604 gen_true_ecc(ecc_data1);
605 gen_true_ecc(ecc_data2);
606
607 for (i = 0; i <= 2; i++) {
608 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
609 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
610 }
611
612 for (i = 0; i < 8; i++) {
613 tmp0_bit[i] = *ecc_data1 % 2;
614 *ecc_data1 = *ecc_data1 / 2;
615 }
616
617 for (i = 0; i < 8; i++) {
618 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
619 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
620 }
621
622 for (i = 0; i < 8; i++) {
623 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
624 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
625 }
626
627 for (i = 0; i < 8; i++) {
628 comp0_bit[i] = *ecc_data2 % 2;
629 *ecc_data2 = *ecc_data2 / 2;
630 }
631
632 for (i = 0; i < 8; i++) {
633 comp1_bit[i] = *(ecc_data2 + 1) % 2;
634 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
635 }
636
637 for (i = 0; i < 8; i++) {
638 comp2_bit[i] = *(ecc_data2 + 2) % 2;
639 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
640 }
641
642 for (i = 0; i < 6; i++)
643 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
644
645 for (i = 0; i < 8; i++)
646 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
647
648 for (i = 0; i < 8; i++)
649 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
650
651 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
652 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
653
654 for (i = 0; i < 24; i++)
655 ecc_sum += ecc_bit[i];
656
657 switch (ecc_sum) {
658 case 0:
659 /* Not reached because this function is not called if
660 * ECC values are equal
661 */
662 return 0;
663
664 case 1:
665 /* Uncorrectable error */
666 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
667 return -1;
668
669 case 11:
670 /* UN-Correctable error */
671 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
672 return -1;
673
674 case 12:
675 /* Correctable error */
676 find_byte = (ecc_bit[23] << 8) +
677 (ecc_bit[21] << 7) +
678 (ecc_bit[19] << 6) +
679 (ecc_bit[17] << 5) +
680 (ecc_bit[15] << 4) +
681 (ecc_bit[13] << 3) +
682 (ecc_bit[11] << 2) +
683 (ecc_bit[9] << 1) +
684 ecc_bit[7];
685
686 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
687
688 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
689 "offset: %d, bit: %d\n", find_byte, find_bit);
690
691 page_data[find_byte] ^= (1 << find_bit);
692
693 return 0;
694 default:
695 if (isEccFF) {
696 if (ecc_data2[0] == 0 &&
697 ecc_data2[1] == 0 &&
698 ecc_data2[2] == 0)
699 return 0;
700 }
701 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
702 return -1;
703 }
704 }
705
706 /**
707 * omap_correct_data - Compares the ECC read with HW generated ECC
708 * @mtd: MTD device structure
709 * @dat: page data
710 * @read_ecc: ecc read from nand flash
711 * @calc_ecc: ecc read from HW ECC registers
712 *
713 * Compares the ecc read from nand spare area with ECC registers values
714 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
715 * and correction.
716 */
717 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
718 u_char *read_ecc, u_char *calc_ecc)
719 {
720 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
721 mtd);
722 int blockCnt = 0, i = 0, ret = 0;
723
724 /* Ex NAND_ECC_HW12_2048 */
725 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
726 (info->nand.ecc.size == 2048))
727 blockCnt = 4;
728 else
729 blockCnt = 1;
730
731 for (i = 0; i < blockCnt; i++) {
732 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
733 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
734 if (ret < 0)
735 return ret;
736 }
737 read_ecc += 3;
738 calc_ecc += 3;
739 dat += 512;
740 }
741 return 0;
742 }
743
744 /**
745 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
746 * @mtd: MTD device structure
747 * @dat: The pointer to data on which ecc is computed
748 * @ecc_code: The ecc_code buffer
749 *
750 * Using noninverted ECC can be considered ugly since writing a blank
751 * page ie. padding will clear the ECC bytes. This is no problem as long
752 * nobody is trying to write data on the seemingly unused page. Reading
753 * an erased page will produce an ECC mismatch between generated and read
754 * ECC bytes that has to be dealt with separately.
755 */
756 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
757 u_char *ecc_code)
758 {
759 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
760 mtd);
761 unsigned long val = 0x0;
762 unsigned long reg;
763
764 /* Start Reading from HW ECC1_Result = 0x200 */
765 reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
766 val = __raw_readl(reg);
767 *ecc_code++ = val; /* P128e, ..., P1e */
768 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
769 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
770 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
771 reg += 4;
772
773 return 0;
774 }
775
776 /**
777 * omap_enable_hwecc - This function enables the hardware ecc functionality
778 * @mtd: MTD device structure
779 * @mode: Read/Write mode
780 */
781 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
782 {
783 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
784 mtd);
785 struct nand_chip *chip = mtd->priv;
786 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
787 unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
788
789 switch (mode) {
790 case NAND_ECC_READ:
791 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
792 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
793 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
794 break;
795 case NAND_ECC_READSYN:
796 __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
797 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
798 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
799 break;
800 case NAND_ECC_WRITE:
801 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
802 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
803 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
804 break;
805 default:
806 DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
807 mode);
808 break;
809 }
810
811 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
812 }
813 #endif
814
815 /**
816 * omap_wait - wait until the command is done
817 * @mtd: MTD device structure
818 * @chip: NAND Chip structure
819 *
820 * Wait function is called during Program and erase operations and
821 * the way it is called from MTD layer, we should wait till the NAND
822 * chip is ready after the programming/erase operation has completed.
823 *
824 * Erase can take up to 400ms and program up to 20ms according to
825 * general NAND and SmartMedia specs
826 */
827 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
828 {
829 struct nand_chip *this = mtd->priv;
830 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
831 mtd);
832 unsigned long timeo = jiffies;
833 int status = NAND_STATUS_FAIL, state = this->state;
834
835 if (state == FL_ERASING)
836 timeo += (HZ * 400) / 1000;
837 else
838 timeo += (HZ * 20) / 1000;
839
840 this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
841 GPMC_CS_NAND_COMMAND;
842 this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
843
844 __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
845
846 while (time_before(jiffies, timeo)) {
847 status = __raw_readb(this->IO_ADDR_R);
848 if (status & NAND_STATUS_READY)
849 break;
850 cond_resched();
851 }
852 return status;
853 }
854
855 /**
856 * omap_dev_ready - calls the platform specific dev_ready function
857 * @mtd: MTD device structure
858 */
859 static int omap_dev_ready(struct mtd_info *mtd)
860 {
861 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
862 mtd);
863 unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
864
865 if ((val & 0x100) == 0x100) {
866 /* Clear IRQ Interrupt */
867 val |= 0x100;
868 val &= ~(0x0);
869 __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
870 } else {
871 unsigned int cnt = 0;
872 while (cnt++ < 0x1FF) {
873 if ((val & 0x100) == 0x100)
874 return 0;
875 val = __raw_readl(info->gpmc_baseaddr +
876 GPMC_IRQ_STATUS);
877 }
878 }
879
880 return 1;
881 }
882
883 static int __devinit omap_nand_probe(struct platform_device *pdev)
884 {
885 struct omap_nand_info *info;
886 struct omap_nand_platform_data *pdata;
887 int err;
888 unsigned long val;
889
890
891 pdata = pdev->dev.platform_data;
892 if (pdata == NULL) {
893 dev_err(&pdev->dev, "platform data missing\n");
894 return -ENODEV;
895 }
896
897 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
898 if (!info)
899 return -ENOMEM;
900
901 platform_set_drvdata(pdev, info);
902
903 spin_lock_init(&info->controller.lock);
904 init_waitqueue_head(&info->controller.wq);
905
906 info->pdev = pdev;
907
908 info->gpmc_cs = pdata->cs;
909 info->gpmc_baseaddr = pdata->gpmc_baseaddr;
910 info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
911
912 info->mtd.priv = &info->nand;
913 info->mtd.name = dev_name(&pdev->dev);
914 info->mtd.owner = THIS_MODULE;
915
916 err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
917 if (err < 0) {
918 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
919 goto out_free_info;
920 }
921
922 /* Enable RD PIN Monitoring Reg */
923 if (pdata->dev_ready) {
924 val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
925 val |= WR_RD_PIN_MONITORING;
926 gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
927 }
928
929 val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
930 val &= ~(0xf << 8);
931 val |= (0xc & 0xf) << 8;
932 gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
933
934 /* NAND write protect off */
935 omap_nand_wp(&info->mtd, NAND_WP_OFF);
936
937 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
938 pdev->dev.driver->name)) {
939 err = -EBUSY;
940 goto out_free_cs;
941 }
942
943 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
944 if (!info->nand.IO_ADDR_R) {
945 err = -ENOMEM;
946 goto out_release_mem_region;
947 }
948
949 info->nand.controller = &info->controller;
950
951 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
952 info->nand.cmd_ctrl = omap_hwcontrol;
953
954 /*
955 * If RDY/BSY line is connected to OMAP then use the omap ready
956 * funcrtion and the generic nand_wait function which reads the status
957 * register after monitoring the RDY/BSY line.Otherwise use a standard
958 * chip delay which is slightly more than tR (AC Timing) of the NAND
959 * device and read status register until you get a failure or success
960 */
961 if (pdata->dev_ready) {
962 info->nand.dev_ready = omap_dev_ready;
963 info->nand.chip_delay = 0;
964 } else {
965 info->nand.waitfunc = omap_wait;
966 info->nand.chip_delay = 50;
967 }
968
969 info->nand.options |= NAND_SKIP_BBTSCAN;
970 if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
971 == 0x1000)
972 info->nand.options |= NAND_BUSWIDTH_16;
973
974 if (use_prefetch) {
975 /* copy the virtual address of nand base for fifo access */
976 info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
977
978 info->nand.read_buf = omap_read_buf_pref;
979 info->nand.write_buf = omap_write_buf_pref;
980 if (use_dma) {
981 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
982 omap_nand_dma_cb, &info->comp, &info->dma_ch);
983 if (err < 0) {
984 info->dma_ch = -1;
985 printk(KERN_WARNING "DMA request failed."
986 " Non-dma data transfer mode\n");
987 } else {
988 omap_set_dma_dest_burst_mode(info->dma_ch,
989 OMAP_DMA_DATA_BURST_16);
990 omap_set_dma_src_burst_mode(info->dma_ch,
991 OMAP_DMA_DATA_BURST_16);
992
993 info->nand.read_buf = omap_read_buf_dma_pref;
994 info->nand.write_buf = omap_write_buf_dma_pref;
995 }
996 }
997 } else {
998 if (info->nand.options & NAND_BUSWIDTH_16) {
999 info->nand.read_buf = omap_read_buf16;
1000 info->nand.write_buf = omap_write_buf16;
1001 } else {
1002 info->nand.read_buf = omap_read_buf8;
1003 info->nand.write_buf = omap_write_buf8;
1004 }
1005 }
1006 info->nand.verify_buf = omap_verify_buf;
1007
1008 #ifdef CONFIG_MTD_NAND_OMAP_HWECC
1009 info->nand.ecc.bytes = 3;
1010 info->nand.ecc.size = 512;
1011 info->nand.ecc.calculate = omap_calculate_ecc;
1012 info->nand.ecc.hwctl = omap_enable_hwecc;
1013 info->nand.ecc.correct = omap_correct_data;
1014 info->nand.ecc.mode = NAND_ECC_HW;
1015
1016 /* init HW ECC */
1017 omap_hwecc_init(&info->mtd);
1018 #else
1019 info->nand.ecc.mode = NAND_ECC_SOFT;
1020 #endif
1021
1022 /* DIP switches on some boards change between 8 and 16 bit
1023 * bus widths for flash. Try the other width if the first try fails.
1024 */
1025 if (nand_scan(&info->mtd, 1)) {
1026 info->nand.options ^= NAND_BUSWIDTH_16;
1027 if (nand_scan(&info->mtd, 1)) {
1028 err = -ENXIO;
1029 goto out_release_mem_region;
1030 }
1031 }
1032
1033 #ifdef CONFIG_MTD_PARTITIONS
1034 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1035 if (err > 0)
1036 add_mtd_partitions(&info->mtd, info->parts, err);
1037 else if (pdata->parts)
1038 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
1039 else
1040 #endif
1041 add_mtd_device(&info->mtd);
1042
1043 platform_set_drvdata(pdev, &info->mtd);
1044
1045 return 0;
1046
1047 out_release_mem_region:
1048 release_mem_region(info->phys_base, NAND_IO_SIZE);
1049 out_free_cs:
1050 gpmc_cs_free(info->gpmc_cs);
1051 out_free_info:
1052 kfree(info);
1053
1054 return err;
1055 }
1056
1057 static int omap_nand_remove(struct platform_device *pdev)
1058 {
1059 struct mtd_info *mtd = platform_get_drvdata(pdev);
1060 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1061 mtd);
1062
1063 platform_set_drvdata(pdev, NULL);
1064 if (use_dma)
1065 omap_free_dma(info->dma_ch);
1066
1067 /* Release NAND device, its internal structures and partitions */
1068 nand_release(&info->mtd);
1069 iounmap(info->nand_pref_fifo_add);
1070 kfree(&info->mtd);
1071 return 0;
1072 }
1073
1074 static struct platform_driver omap_nand_driver = {
1075 .probe = omap_nand_probe,
1076 .remove = omap_nand_remove,
1077 .driver = {
1078 .name = DRIVER_NAME,
1079 .owner = THIS_MODULE,
1080 },
1081 };
1082
1083 static int __init omap_nand_init(void)
1084 {
1085 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
1086
1087 /* This check is required if driver is being
1088 * loaded run time as a module
1089 */
1090 if ((1 == use_dma) && (0 == use_prefetch)) {
1091 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
1092 "without use_prefetch'. Prefetch will not be"
1093 " used in either mode (mpu or dma)\n");
1094 }
1095 return platform_driver_register(&omap_nand_driver);
1096 }
1097
1098 static void __exit omap_nand_exit(void)
1099 {
1100 platform_driver_unregister(&omap_nand_driver);
1101 }
1102
1103 module_init(omap_nand_init);
1104 module_exit(omap_nand_exit);
1105
1106 MODULE_ALIAS(DRIVER_NAME);
1107 MODULE_LICENSE("GPL");
1108 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
This page took 0.054915 seconds and 5 git commands to generate.