omap3: nand: configurable fifo threshold to gain the throughput
[deliverable/linux.git] / drivers / mtd / nand / omap2.c
CommitLineData
67ce04bf
VS
1/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
4e070376 14#include <linux/interrupt.h>
c276aca4 15#include <linux/jiffies.h>
16#include <linux/sched.h>
67ce04bf
VS
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h>
20#include <linux/io.h>
5a0e3ad6 21#include <linux/slab.h>
67ce04bf 22
ce491cf8
TL
23#include <plat/dma.h>
24#include <plat/gpmc.h>
25#include <plat/nand.h>
67ce04bf 26
67ce04bf 27#define DRIVER_NAME "omap2-nand"
4e070376 28#define OMAP_NAND_TIMEOUT_MS 5000
67ce04bf 29
67ce04bf
VS
30#define NAND_Ecc_P1e (1 << 0)
31#define NAND_Ecc_P2e (1 << 1)
32#define NAND_Ecc_P4e (1 << 2)
33#define NAND_Ecc_P8e (1 << 3)
34#define NAND_Ecc_P16e (1 << 4)
35#define NAND_Ecc_P32e (1 << 5)
36#define NAND_Ecc_P64e (1 << 6)
37#define NAND_Ecc_P128e (1 << 7)
38#define NAND_Ecc_P256e (1 << 8)
39#define NAND_Ecc_P512e (1 << 9)
40#define NAND_Ecc_P1024e (1 << 10)
41#define NAND_Ecc_P2048e (1 << 11)
42
43#define NAND_Ecc_P1o (1 << 16)
44#define NAND_Ecc_P2o (1 << 17)
45#define NAND_Ecc_P4o (1 << 18)
46#define NAND_Ecc_P8o (1 << 19)
47#define NAND_Ecc_P16o (1 << 20)
48#define NAND_Ecc_P32o (1 << 21)
49#define NAND_Ecc_P64o (1 << 22)
50#define NAND_Ecc_P128o (1 << 23)
51#define NAND_Ecc_P256o (1 << 24)
52#define NAND_Ecc_P512o (1 << 25)
53#define NAND_Ecc_P1024o (1 << 26)
54#define NAND_Ecc_P2048o (1 << 27)
55
56#define TF(value) (value ? 1 : 0)
57
58#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
59#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
60#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
61#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
62#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
63#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
64#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
65#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
66
67#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
68#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
69#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
70#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
71#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
72#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
73#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
74#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
75
76#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
77#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
78#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
79#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
80#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
81#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
82#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
83#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
84
85#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
86#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
87#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
88#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
89#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
90#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
91#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
92#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
93
94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
96
97#ifdef CONFIG_MTD_PARTITIONS
98static const char *part_probes[] = { "cmdlinepart", NULL };
99#endif
100
101struct omap_nand_info {
102 struct nand_hw_control controller;
103 struct omap_nand_platform_data *pdata;
104 struct mtd_info mtd;
105 struct mtd_partition *parts;
106 struct nand_chip nand;
107 struct platform_device *pdev;
108
109 int gpmc_cs;
110 unsigned long phys_base;
dfe32893 111 struct completion comp;
112 int dma_ch;
4e070376
SG
113 int gpmc_irq;
114 enum {
115 OMAP_NAND_IO_READ = 0, /* read */
116 OMAP_NAND_IO_WRITE, /* write */
117 } iomode;
118 u_char *buf;
119 int buf_len;
67ce04bf
VS
120};
121
67ce04bf
VS
122/**
123 * omap_hwcontrol - hardware specific access to control-lines
124 * @mtd: MTD device structure
125 * @cmd: command to device
126 * @ctrl:
127 * NAND_NCE: bit 0 -> don't care
128 * NAND_CLE: bit 1 -> Command Latch
129 * NAND_ALE: bit 2 -> Address Latch
130 *
131 * NOTE: boards may use different bits for these!!
132 */
133static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
134{
135 struct omap_nand_info *info = container_of(mtd,
136 struct omap_nand_info, mtd);
67ce04bf 137
2c01946c
SG
138 if (cmd != NAND_CMD_NONE) {
139 if (ctrl & NAND_CLE)
140 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
141
142 else if (ctrl & NAND_ALE)
143 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
144
145 else /* NAND_NCE */
146 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
147 }
67ce04bf
VS
148}
149
59e9c5ae 150/**
151 * omap_read_buf8 - read data from NAND controller into buffer
152 * @mtd: MTD device structure
153 * @buf: buffer to store date
154 * @len: number of bytes to read
155 */
156static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
157{
158 struct nand_chip *nand = mtd->priv;
159
160 ioread8_rep(nand->IO_ADDR_R, buf, len);
161}
162
163/**
164 * omap_write_buf8 - write buffer to NAND controller
165 * @mtd: MTD device structure
166 * @buf: data buffer
167 * @len: number of bytes to write
168 */
169static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
170{
171 struct omap_nand_info *info = container_of(mtd,
172 struct omap_nand_info, mtd);
173 u_char *p = (u_char *)buf;
2c01946c 174 u32 status = 0;
59e9c5ae 175
176 while (len--) {
177 iowrite8(*p++, info->nand.IO_ADDR_W);
2c01946c
SG
178 /* wait until buffer is available for write */
179 do {
180 status = gpmc_read_status(GPMC_STATUS_BUFFER);
181 } while (!status);
59e9c5ae 182 }
183}
184
67ce04bf
VS
185/**
186 * omap_read_buf16 - read data from NAND controller into buffer
187 * @mtd: MTD device structure
188 * @buf: buffer to store date
189 * @len: number of bytes to read
190 */
191static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
192{
193 struct nand_chip *nand = mtd->priv;
194
59e9c5ae 195 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
67ce04bf
VS
196}
197
198/**
199 * omap_write_buf16 - write buffer to NAND controller
200 * @mtd: MTD device structure
201 * @buf: data buffer
202 * @len: number of bytes to write
203 */
204static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
205{
206 struct omap_nand_info *info = container_of(mtd,
207 struct omap_nand_info, mtd);
208 u16 *p = (u16 *) buf;
2c01946c 209 u32 status = 0;
67ce04bf
VS
210 /* FIXME try bursts of writesw() or DMA ... */
211 len >>= 1;
212
213 while (len--) {
59e9c5ae 214 iowrite16(*p++, info->nand.IO_ADDR_W);
2c01946c
SG
215 /* wait until buffer is available for write */
216 do {
217 status = gpmc_read_status(GPMC_STATUS_BUFFER);
218 } while (!status);
67ce04bf
VS
219 }
220}
59e9c5ae 221
222/**
223 * omap_read_buf_pref - read data from NAND controller into buffer
224 * @mtd: MTD device structure
225 * @buf: buffer to store date
226 * @len: number of bytes to read
227 */
228static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
229{
230 struct omap_nand_info *info = container_of(mtd,
231 struct omap_nand_info, mtd);
2c01946c 232 uint32_t r_count = 0;
59e9c5ae 233 int ret = 0;
234 u32 *p = (u32 *)buf;
235
236 /* take care of subpage reads */
c3341d0c
VS
237 if (len % 4) {
238 if (info->nand.options & NAND_BUSWIDTH_16)
239 omap_read_buf16(mtd, buf, len % 4);
240 else
241 omap_read_buf8(mtd, buf, len % 4);
242 p = (u32 *) (buf + len % 4);
243 len -= len % 4;
59e9c5ae 244 }
59e9c5ae 245
246 /* configure and start prefetch transfer */
317379a9
SG
247 ret = gpmc_prefetch_enable(info->gpmc_cs,
248 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
59e9c5ae 249 if (ret) {
250 /* PFPW engine is busy, use cpu copy method */
251 if (info->nand.options & NAND_BUSWIDTH_16)
252 omap_read_buf16(mtd, buf, len);
253 else
254 omap_read_buf8(mtd, buf, len);
255 } else {
2c01946c 256 p = (u32 *) buf;
59e9c5ae 257 do {
2c01946c
SG
258 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
259 r_count = r_count >> 2;
260 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
59e9c5ae 261 p += r_count;
262 len -= r_count << 2;
263 } while (len);
59e9c5ae 264 /* disable and stop the PFPW engine */
948d38e7 265 gpmc_prefetch_reset(info->gpmc_cs);
59e9c5ae 266 }
267}
268
269/**
270 * omap_write_buf_pref - write buffer to NAND controller
271 * @mtd: MTD device structure
272 * @buf: data buffer
273 * @len: number of bytes to write
274 */
275static void omap_write_buf_pref(struct mtd_info *mtd,
276 const u_char *buf, int len)
277{
278 struct omap_nand_info *info = container_of(mtd,
279 struct omap_nand_info, mtd);
4e070376 280 uint32_t w_count = 0;
59e9c5ae 281 int i = 0, ret = 0;
2c01946c 282 u16 *p;
4e070376 283 unsigned long tim, limit;
59e9c5ae 284
285 /* take care of subpage writes */
286 if (len % 2 != 0) {
2c01946c 287 writeb(*buf, info->nand.IO_ADDR_W);
59e9c5ae 288 p = (u16 *)(buf + 1);
289 len--;
290 }
291
292 /* configure and start prefetch transfer */
317379a9
SG
293 ret = gpmc_prefetch_enable(info->gpmc_cs,
294 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
59e9c5ae 295 if (ret) {
296 /* PFPW engine is busy, use cpu copy method */
297 if (info->nand.options & NAND_BUSWIDTH_16)
298 omap_write_buf16(mtd, buf, len);
299 else
300 omap_write_buf8(mtd, buf, len);
301 } else {
2c01946c
SG
302 p = (u16 *) buf;
303 while (len) {
304 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
305 w_count = w_count >> 1;
59e9c5ae 306 for (i = 0; (i < w_count) && len; i++, len -= 2)
2c01946c 307 iowrite16(*p++, info->nand.IO_ADDR_W);
59e9c5ae 308 }
2c01946c 309 /* wait for data to flushed-out before reset the prefetch */
4e070376
SG
310 tim = 0;
311 limit = (loops_per_jiffy *
312 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
313 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
314 cpu_relax();
315
59e9c5ae 316 /* disable and stop the PFPW engine */
948d38e7 317 gpmc_prefetch_reset(info->gpmc_cs);
59e9c5ae 318 }
319}
320
dfe32893 321/*
322 * omap_nand_dma_cb: callback on the completion of dma transfer
323 * @lch: logical channel
324 * @ch_satuts: channel status
325 * @data: pointer to completion data structure
326 */
327static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
328{
329 complete((struct completion *) data);
330}
331
332/*
333 * omap_nand_dma_transfer: configer and start dma transfer
334 * @mtd: MTD device structure
335 * @addr: virtual address in RAM of source/destination
336 * @len: number of data bytes to be transferred
337 * @is_write: flag for read/write operation
338 */
339static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
340 unsigned int len, int is_write)
341{
342 struct omap_nand_info *info = container_of(mtd,
343 struct omap_nand_info, mtd);
dfe32893 344 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
345 DMA_FROM_DEVICE;
346 dma_addr_t dma_addr;
347 int ret;
4e070376 348 unsigned long tim, limit;
dfe32893 349
317379a9
SG
350 /* The fifo depth is 64 bytes max.
351 * But configure the FIFO-threahold to 32 to get a sync at each frame
352 * and frame length is 32 bytes.
dfe32893 353 */
354 int buf_len = len >> 6;
355
356 if (addr >= high_memory) {
357 struct page *p1;
358
359 if (((size_t)addr & PAGE_MASK) !=
360 ((size_t)(addr + len - 1) & PAGE_MASK))
361 goto out_copy;
362 p1 = vmalloc_to_page(addr);
363 if (!p1)
364 goto out_copy;
365 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
366 }
367
368 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
369 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
370 dev_err(&info->pdev->dev,
371 "Couldn't DMA map a %d byte buffer\n", len);
372 goto out_copy;
373 }
374
375 if (is_write) {
376 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
377 info->phys_base, 0, 0);
378 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
379 dma_addr, 0, 0);
380 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
381 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
382 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
383 } else {
384 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
385 info->phys_base, 0, 0);
386 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
387 dma_addr, 0, 0);
388 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
389 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
390 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
391 }
392 /* configure and start prefetch transfer */
317379a9
SG
393 ret = gpmc_prefetch_enable(info->gpmc_cs,
394 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
dfe32893 395 if (ret)
4e070376 396 /* PFPW engine is busy, use cpu copy method */
dfe32893 397 goto out_copy;
398
399 init_completion(&info->comp);
400
401 omap_start_dma(info->dma_ch);
402
403 /* setup and start DMA using dma_addr */
404 wait_for_completion(&info->comp);
4e070376
SG
405 tim = 0;
406 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
407 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
408 cpu_relax();
dfe32893 409
dfe32893 410 /* disable and stop the PFPW engine */
f12f662f 411 gpmc_prefetch_reset(info->gpmc_cs);
dfe32893 412
413 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
414 return 0;
415
416out_copy:
417 if (info->nand.options & NAND_BUSWIDTH_16)
418 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
419 : omap_write_buf16(mtd, (u_char *) addr, len);
420 else
421 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
422 : omap_write_buf8(mtd, (u_char *) addr, len);
423 return 0;
424}
dfe32893 425
426/**
427 * omap_read_buf_dma_pref - read data from NAND controller into buffer
428 * @mtd: MTD device structure
429 * @buf: buffer to store date
430 * @len: number of bytes to read
431 */
432static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
433{
434 if (len <= mtd->oobsize)
435 omap_read_buf_pref(mtd, buf, len);
436 else
437 /* start transfer in DMA mode */
438 omap_nand_dma_transfer(mtd, buf, len, 0x0);
439}
440
441/**
442 * omap_write_buf_dma_pref - write buffer to NAND controller
443 * @mtd: MTD device structure
444 * @buf: data buffer
445 * @len: number of bytes to write
446 */
447static void omap_write_buf_dma_pref(struct mtd_info *mtd,
448 const u_char *buf, int len)
449{
450 if (len <= mtd->oobsize)
451 omap_write_buf_pref(mtd, buf, len);
452 else
453 /* start transfer in DMA mode */
bdaefc41 454 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
dfe32893 455}
456
4e070376
SG
457/*
458 * omap_nand_irq - GMPC irq handler
459 * @this_irq: gpmc irq number
460 * @dev: omap_nand_info structure pointer is passed here
461 */
462static irqreturn_t omap_nand_irq(int this_irq, void *dev)
463{
464 struct omap_nand_info *info = (struct omap_nand_info *) dev;
465 u32 bytes;
466 u32 irq_stat;
467
468 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
469 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
470 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
471 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
472 if (irq_stat & 0x2)
473 goto done;
474
475 if (info->buf_len && (info->buf_len < bytes))
476 bytes = info->buf_len;
477 else if (!info->buf_len)
478 bytes = 0;
479 iowrite32_rep(info->nand.IO_ADDR_W,
480 (u32 *)info->buf, bytes >> 2);
481 info->buf = info->buf + bytes;
482 info->buf_len -= bytes;
483
484 } else {
485 ioread32_rep(info->nand.IO_ADDR_R,
486 (u32 *)info->buf, bytes >> 2);
487 info->buf = info->buf + bytes;
488
489 if (irq_stat & 0x2)
490 goto done;
491 }
492 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
493
494 return IRQ_HANDLED;
495
496done:
497 complete(&info->comp);
498 /* disable irq */
499 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
500
501 /* clear status */
502 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
503
504 return IRQ_HANDLED;
505}
506
507/*
508 * omap_read_buf_irq_pref - read data from NAND controller into buffer
509 * @mtd: MTD device structure
510 * @buf: buffer to store date
511 * @len: number of bytes to read
512 */
513static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
514{
515 struct omap_nand_info *info = container_of(mtd,
516 struct omap_nand_info, mtd);
517 int ret = 0;
518
519 if (len <= mtd->oobsize) {
520 omap_read_buf_pref(mtd, buf, len);
521 return;
522 }
523
524 info->iomode = OMAP_NAND_IO_READ;
525 info->buf = buf;
526 init_completion(&info->comp);
527
528 /* configure and start prefetch transfer */
317379a9
SG
529 ret = gpmc_prefetch_enable(info->gpmc_cs,
530 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
4e070376
SG
531 if (ret)
532 /* PFPW engine is busy, use cpu copy method */
533 goto out_copy;
534
535 info->buf_len = len;
536 /* enable irq */
537 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
538 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
539
540 /* waiting for read to complete */
541 wait_for_completion(&info->comp);
542
543 /* disable and stop the PFPW engine */
544 gpmc_prefetch_reset(info->gpmc_cs);
545 return;
546
547out_copy:
548 if (info->nand.options & NAND_BUSWIDTH_16)
549 omap_read_buf16(mtd, buf, len);
550 else
551 omap_read_buf8(mtd, buf, len);
552}
553
554/*
555 * omap_write_buf_irq_pref - write buffer to NAND controller
556 * @mtd: MTD device structure
557 * @buf: data buffer
558 * @len: number of bytes to write
559 */
560static void omap_write_buf_irq_pref(struct mtd_info *mtd,
561 const u_char *buf, int len)
562{
563 struct omap_nand_info *info = container_of(mtd,
564 struct omap_nand_info, mtd);
565 int ret = 0;
566 unsigned long tim, limit;
567
568 if (len <= mtd->oobsize) {
569 omap_write_buf_pref(mtd, buf, len);
570 return;
571 }
572
573 info->iomode = OMAP_NAND_IO_WRITE;
574 info->buf = (u_char *) buf;
575 init_completion(&info->comp);
576
317379a9
SG
577 /* configure and start prefetch transfer : size=24 */
578 ret = gpmc_prefetch_enable(info->gpmc_cs,
579 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
4e070376
SG
580 if (ret)
581 /* PFPW engine is busy, use cpu copy method */
582 goto out_copy;
583
584 info->buf_len = len;
585 /* enable irq */
586 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
587 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
588
589 /* waiting for write to complete */
590 wait_for_completion(&info->comp);
591 /* wait for data to flushed-out before reset the prefetch */
592 tim = 0;
593 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
594 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
595 cpu_relax();
596
597 /* disable and stop the PFPW engine */
598 gpmc_prefetch_reset(info->gpmc_cs);
599 return;
600
601out_copy:
602 if (info->nand.options & NAND_BUSWIDTH_16)
603 omap_write_buf16(mtd, buf, len);
604 else
605 omap_write_buf8(mtd, buf, len);
606}
607
67ce04bf
VS
608/**
609 * omap_verify_buf - Verify chip data against buffer
610 * @mtd: MTD device structure
611 * @buf: buffer containing the data to compare
612 * @len: number of bytes to compare
613 */
614static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
615{
616 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
617 mtd);
618 u16 *p = (u16 *) buf;
619
620 len >>= 1;
621 while (len--) {
622 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
623 return -EFAULT;
624 }
625
626 return 0;
627}
628
629#ifdef CONFIG_MTD_NAND_OMAP_HWECC
67ce04bf
VS
630
631/**
632 * gen_true_ecc - This function will generate true ECC value
633 * @ecc_buf: buffer to store ecc code
634 *
635 * This generated true ECC value can be used when correcting
636 * data read from NAND flash memory core
637 */
638static void gen_true_ecc(u8 *ecc_buf)
639{
640 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
641 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
642
643 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
644 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
645 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
646 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
647 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
648 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
649}
650
651/**
652 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
653 * @ecc_data1: ecc code from nand spare area
654 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
655 * @page_data: page data
656 *
657 * This function compares two ECC's and indicates if there is an error.
658 * If the error can be corrected it will be corrected to the buffer.
659 */
660static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
661 u8 *ecc_data2, /* read from register */
662 u8 *page_data)
663{
664 uint i;
665 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
666 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
667 u8 ecc_bit[24];
668 u8 ecc_sum = 0;
669 u8 find_bit = 0;
670 uint find_byte = 0;
671 int isEccFF;
672
673 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
674
675 gen_true_ecc(ecc_data1);
676 gen_true_ecc(ecc_data2);
677
678 for (i = 0; i <= 2; i++) {
679 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
680 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
681 }
682
683 for (i = 0; i < 8; i++) {
684 tmp0_bit[i] = *ecc_data1 % 2;
685 *ecc_data1 = *ecc_data1 / 2;
686 }
687
688 for (i = 0; i < 8; i++) {
689 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
690 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
691 }
692
693 for (i = 0; i < 8; i++) {
694 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
695 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
696 }
697
698 for (i = 0; i < 8; i++) {
699 comp0_bit[i] = *ecc_data2 % 2;
700 *ecc_data2 = *ecc_data2 / 2;
701 }
702
703 for (i = 0; i < 8; i++) {
704 comp1_bit[i] = *(ecc_data2 + 1) % 2;
705 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
706 }
707
708 for (i = 0; i < 8; i++) {
709 comp2_bit[i] = *(ecc_data2 + 2) % 2;
710 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
711 }
712
713 for (i = 0; i < 6; i++)
714 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
715
716 for (i = 0; i < 8; i++)
717 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
718
719 for (i = 0; i < 8; i++)
720 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
721
722 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
723 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
724
725 for (i = 0; i < 24; i++)
726 ecc_sum += ecc_bit[i];
727
728 switch (ecc_sum) {
729 case 0:
730 /* Not reached because this function is not called if
731 * ECC values are equal
732 */
733 return 0;
734
735 case 1:
736 /* Uncorrectable error */
737 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
738 return -1;
739
740 case 11:
741 /* UN-Correctable error */
742 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
743 return -1;
744
745 case 12:
746 /* Correctable error */
747 find_byte = (ecc_bit[23] << 8) +
748 (ecc_bit[21] << 7) +
749 (ecc_bit[19] << 6) +
750 (ecc_bit[17] << 5) +
751 (ecc_bit[15] << 4) +
752 (ecc_bit[13] << 3) +
753 (ecc_bit[11] << 2) +
754 (ecc_bit[9] << 1) +
755 ecc_bit[7];
756
757 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
758
759 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
760 "offset: %d, bit: %d\n", find_byte, find_bit);
761
762 page_data[find_byte] ^= (1 << find_bit);
763
764 return 0;
765 default:
766 if (isEccFF) {
767 if (ecc_data2[0] == 0 &&
768 ecc_data2[1] == 0 &&
769 ecc_data2[2] == 0)
770 return 0;
771 }
772 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
773 return -1;
774 }
775}
776
777/**
778 * omap_correct_data - Compares the ECC read with HW generated ECC
779 * @mtd: MTD device structure
780 * @dat: page data
781 * @read_ecc: ecc read from nand flash
782 * @calc_ecc: ecc read from HW ECC registers
783 *
784 * Compares the ecc read from nand spare area with ECC registers values
785 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
786 * and correction.
787 */
788static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
789 u_char *read_ecc, u_char *calc_ecc)
790{
791 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
792 mtd);
793 int blockCnt = 0, i = 0, ret = 0;
794
795 /* Ex NAND_ECC_HW12_2048 */
796 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
797 (info->nand.ecc.size == 2048))
798 blockCnt = 4;
799 else
800 blockCnt = 1;
801
802 for (i = 0; i < blockCnt; i++) {
803 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
804 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
805 if (ret < 0)
806 return ret;
807 }
808 read_ecc += 3;
809 calc_ecc += 3;
810 dat += 512;
811 }
812 return 0;
813}
814
815/**
816 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
817 * @mtd: MTD device structure
818 * @dat: The pointer to data on which ecc is computed
819 * @ecc_code: The ecc_code buffer
820 *
821 * Using noninverted ECC can be considered ugly since writing a blank
822 * page ie. padding will clear the ECC bytes. This is no problem as long
823 * nobody is trying to write data on the seemingly unused page. Reading
824 * an erased page will produce an ECC mismatch between generated and read
825 * ECC bytes that has to be dealt with separately.
826 */
827static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
828 u_char *ecc_code)
829{
830 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
831 mtd);
2c01946c 832 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
67ce04bf
VS
833}
834
835/**
836 * omap_enable_hwecc - This function enables the hardware ecc functionality
837 * @mtd: MTD device structure
838 * @mode: Read/Write mode
839 */
840static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
841{
842 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
843 mtd);
844 struct nand_chip *chip = mtd->priv;
845 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
67ce04bf 846
2c01946c 847 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
67ce04bf 848}
2c01946c 849
67ce04bf
VS
850#endif
851
852/**
853 * omap_wait - wait until the command is done
854 * @mtd: MTD device structure
855 * @chip: NAND Chip structure
856 *
857 * Wait function is called during Program and erase operations and
858 * the way it is called from MTD layer, we should wait till the NAND
859 * chip is ready after the programming/erase operation has completed.
860 *
861 * Erase can take up to 400ms and program up to 20ms according to
862 * general NAND and SmartMedia specs
863 */
864static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
865{
866 struct nand_chip *this = mtd->priv;
867 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
868 mtd);
869 unsigned long timeo = jiffies;
c276aca4 870 int status = NAND_STATUS_FAIL, state = this->state;
67ce04bf
VS
871
872 if (state == FL_ERASING)
873 timeo += (HZ * 400) / 1000;
874 else
875 timeo += (HZ * 20) / 1000;
876
2c01946c
SG
877 gpmc_nand_write(info->gpmc_cs,
878 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
67ce04bf 879 while (time_before(jiffies, timeo)) {
2c01946c 880 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
c276aca4 881 if (status & NAND_STATUS_READY)
67ce04bf 882 break;
c276aca4 883 cond_resched();
67ce04bf
VS
884 }
885 return status;
886}
887
888/**
889 * omap_dev_ready - calls the platform specific dev_ready function
890 * @mtd: MTD device structure
891 */
892static int omap_dev_ready(struct mtd_info *mtd)
893{
2c01946c 894 unsigned int val = 0;
67ce04bf
VS
895 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
896 mtd);
67ce04bf 897
2c01946c 898 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
67ce04bf
VS
899 if ((val & 0x100) == 0x100) {
900 /* Clear IRQ Interrupt */
901 val |= 0x100;
902 val &= ~(0x0);
2c01946c 903 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
67ce04bf
VS
904 } else {
905 unsigned int cnt = 0;
906 while (cnt++ < 0x1FF) {
907 if ((val & 0x100) == 0x100)
908 return 0;
2c01946c 909 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
67ce04bf
VS
910 }
911 }
912
913 return 1;
914}
915
916static int __devinit omap_nand_probe(struct platform_device *pdev)
917{
918 struct omap_nand_info *info;
919 struct omap_nand_platform_data *pdata;
920 int err;
67ce04bf
VS
921
922 pdata = pdev->dev.platform_data;
923 if (pdata == NULL) {
924 dev_err(&pdev->dev, "platform data missing\n");
925 return -ENODEV;
926 }
927
928 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
929 if (!info)
930 return -ENOMEM;
931
932 platform_set_drvdata(pdev, info);
933
934 spin_lock_init(&info->controller.lock);
935 init_waitqueue_head(&info->controller.wq);
936
937 info->pdev = pdev;
938
939 info->gpmc_cs = pdata->cs;
2f70a1e9 940 info->phys_base = pdata->phys_base;
67ce04bf
VS
941
942 info->mtd.priv = &info->nand;
943 info->mtd.name = dev_name(&pdev->dev);
944 info->mtd.owner = THIS_MODULE;
945
d5ce2b65 946 info->nand.options = pdata->devsize;
2f70a1e9 947 info->nand.options |= NAND_SKIP_BBTSCAN;
67ce04bf
VS
948
949 /* NAND write protect off */
2c01946c 950 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
67ce04bf
VS
951
952 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
953 pdev->dev.driver->name)) {
954 err = -EBUSY;
2f70a1e9 955 goto out_free_info;
67ce04bf
VS
956 }
957
958 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
959 if (!info->nand.IO_ADDR_R) {
960 err = -ENOMEM;
961 goto out_release_mem_region;
962 }
59e9c5ae 963
67ce04bf
VS
964 info->nand.controller = &info->controller;
965
966 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
967 info->nand.cmd_ctrl = omap_hwcontrol;
968
67ce04bf
VS
969 /*
970 * If RDY/BSY line is connected to OMAP then use the omap ready
971 * funcrtion and the generic nand_wait function which reads the status
972 * register after monitoring the RDY/BSY line.Otherwise use a standard
973 * chip delay which is slightly more than tR (AC Timing) of the NAND
974 * device and read status register until you get a failure or success
975 */
976 if (pdata->dev_ready) {
977 info->nand.dev_ready = omap_dev_ready;
978 info->nand.chip_delay = 0;
979 } else {
980 info->nand.waitfunc = omap_wait;
981 info->nand.chip_delay = 50;
982 }
983
1b0b323c
SG
984 switch (pdata->xfer_type) {
985 case NAND_OMAP_PREFETCH_POLLED:
59e9c5ae 986 info->nand.read_buf = omap_read_buf_pref;
987 info->nand.write_buf = omap_write_buf_pref;
1b0b323c
SG
988 break;
989
990 case NAND_OMAP_POLLED:
59e9c5ae 991 if (info->nand.options & NAND_BUSWIDTH_16) {
992 info->nand.read_buf = omap_read_buf16;
993 info->nand.write_buf = omap_write_buf16;
994 } else {
995 info->nand.read_buf = omap_read_buf8;
996 info->nand.write_buf = omap_write_buf8;
997 }
1b0b323c
SG
998 break;
999
1000 case NAND_OMAP_PREFETCH_DMA:
1001 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1002 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1003 if (err < 0) {
1004 info->dma_ch = -1;
1005 dev_err(&pdev->dev, "DMA request failed!\n");
1006 goto out_release_mem_region;
1007 } else {
1008 omap_set_dma_dest_burst_mode(info->dma_ch,
1009 OMAP_DMA_DATA_BURST_16);
1010 omap_set_dma_src_burst_mode(info->dma_ch,
1011 OMAP_DMA_DATA_BURST_16);
1012
1013 info->nand.read_buf = omap_read_buf_dma_pref;
1014 info->nand.write_buf = omap_write_buf_dma_pref;
1015 }
1016 break;
1017
4e070376
SG
1018 case NAND_OMAP_PREFETCH_IRQ:
1019 err = request_irq(pdata->gpmc_irq,
1020 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1021 if (err) {
1022 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1023 pdata->gpmc_irq, err);
1024 goto out_release_mem_region;
1025 } else {
1026 info->gpmc_irq = pdata->gpmc_irq;
1027 info->nand.read_buf = omap_read_buf_irq_pref;
1028 info->nand.write_buf = omap_write_buf_irq_pref;
1029 }
1030 break;
1031
1b0b323c
SG
1032 default:
1033 dev_err(&pdev->dev,
1034 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1035 err = -EINVAL;
1036 goto out_release_mem_region;
59e9c5ae 1037 }
1b0b323c 1038
59e9c5ae 1039 info->nand.verify_buf = omap_verify_buf;
1040
67ce04bf
VS
1041#ifdef CONFIG_MTD_NAND_OMAP_HWECC
1042 info->nand.ecc.bytes = 3;
1043 info->nand.ecc.size = 512;
1044 info->nand.ecc.calculate = omap_calculate_ecc;
1045 info->nand.ecc.hwctl = omap_enable_hwecc;
1046 info->nand.ecc.correct = omap_correct_data;
1047 info->nand.ecc.mode = NAND_ECC_HW;
1048
67ce04bf
VS
1049#else
1050 info->nand.ecc.mode = NAND_ECC_SOFT;
1051#endif
1052
1053 /* DIP switches on some boards change between 8 and 16 bit
1054 * bus widths for flash. Try the other width if the first try fails.
1055 */
1056 if (nand_scan(&info->mtd, 1)) {
1057 info->nand.options ^= NAND_BUSWIDTH_16;
1058 if (nand_scan(&info->mtd, 1)) {
1059 err = -ENXIO;
1060 goto out_release_mem_region;
1061 }
1062 }
1063
1b0b323c 1064
67ce04bf
VS
1065#ifdef CONFIG_MTD_PARTITIONS
1066 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1067 if (err > 0)
1068 add_mtd_partitions(&info->mtd, info->parts, err);
1069 else if (pdata->parts)
1070 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
1071 else
1072#endif
1073 add_mtd_device(&info->mtd);
1074
1075 platform_set_drvdata(pdev, &info->mtd);
1076
1077 return 0;
1078
1079out_release_mem_region:
1080 release_mem_region(info->phys_base, NAND_IO_SIZE);
67ce04bf
VS
1081out_free_info:
1082 kfree(info);
1083
1084 return err;
1085}
1086
1087static int omap_nand_remove(struct platform_device *pdev)
1088{
1089 struct mtd_info *mtd = platform_get_drvdata(pdev);
f35b6eda
VS
1090 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1091 mtd);
67ce04bf
VS
1092
1093 platform_set_drvdata(pdev, NULL);
1b0b323c 1094 if (info->dma_ch != -1)
dfe32893 1095 omap_free_dma(info->dma_ch);
1096
4e070376
SG
1097 if (info->gpmc_irq)
1098 free_irq(info->gpmc_irq, info);
1099
67ce04bf
VS
1100 /* Release NAND device, its internal structures and partitions */
1101 nand_release(&info->mtd);
2c01946c 1102 iounmap(info->nand.IO_ADDR_R);
67ce04bf
VS
1103 kfree(&info->mtd);
1104 return 0;
1105}
1106
1107static struct platform_driver omap_nand_driver = {
1108 .probe = omap_nand_probe,
1109 .remove = omap_nand_remove,
1110 .driver = {
1111 .name = DRIVER_NAME,
1112 .owner = THIS_MODULE,
1113 },
1114};
1115
1116static int __init omap_nand_init(void)
1117{
1b0b323c 1118 pr_info("%s driver initializing\n", DRIVER_NAME);
dfe32893 1119
67ce04bf
VS
1120 return platform_driver_register(&omap_nand_driver);
1121}
1122
1123static void __exit omap_nand_exit(void)
1124{
1125 platform_driver_unregister(&omap_nand_driver);
1126}
1127
1128module_init(omap_nand_init);
1129module_exit(omap_nand_exit);
1130
1131MODULE_ALIAS(DRIVER_NAME);
1132MODULE_LICENSE("GPL");
1133MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
This page took 0.164367 seconds and 5 git commands to generate.