mtd: nand: pxa3xx: Clean pxa_ecc_init() error handling
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
a1c06ee1
DW
24#include <linux/io.h>
25#include <linux/irq.h>
5a0e3ad6 26#include <linux/slab.h>
1e7ba630
DM
27#include <linux/of.h>
28#include <linux/of_device.h>
776f265e 29#include <linux/of_mtd.h>
fe69af00 30
f4db2e3a
EG
31#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32#define ARCH_HAS_DMA
33#endif
34
35#ifdef ARCH_HAS_DMA
afb5b5c9 36#include <mach/dma.h>
f4db2e3a
EG
37#endif
38
293b2da1 39#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 40
41#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
f8155a40 42#define NAND_STOP_DELAY (2 * HZ/50)
4eb2da89 43#define PAGE_CHUNK_SIZE (2048)
fe69af00 44
62e8b851
EG
45/*
46 * Define a buffer size for the initial command that detects the flash device:
47 * STATUS, READID and PARAM. The largest of these is the PARAM command,
48 * needing 256 bytes.
49 */
50#define INIT_BUFFER_SIZE 256
51
fe69af00 52/* registers and bit definitions */
53#define NDCR (0x00) /* Control register */
54#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
55#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
56#define NDSR (0x14) /* Status Register */
57#define NDPCR (0x18) /* Page Count Register */
58#define NDBDR0 (0x1C) /* Bad Block Register 0 */
59#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 60#define NDECCCTRL (0x28) /* ECC control */
fe69af00 61#define NDDB (0x40) /* Data Buffer */
62#define NDCB0 (0x48) /* Command Buffer0 */
63#define NDCB1 (0x4C) /* Command Buffer1 */
64#define NDCB2 (0x50) /* Command Buffer2 */
65
66#define NDCR_SPARE_EN (0x1 << 31)
67#define NDCR_ECC_EN (0x1 << 30)
68#define NDCR_DMA_EN (0x1 << 29)
69#define NDCR_ND_RUN (0x1 << 28)
70#define NDCR_DWIDTH_C (0x1 << 27)
71#define NDCR_DWIDTH_M (0x1 << 26)
72#define NDCR_PAGE_SZ (0x1 << 24)
73#define NDCR_NCSX (0x1 << 23)
74#define NDCR_ND_MODE (0x3 << 21)
75#define NDCR_NAND_MODE (0x0)
76#define NDCR_CLR_PG_CNT (0x1 << 20)
f8155a40 77#define NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 78#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
79#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
80
81#define NDCR_RA_START (0x1 << 15)
82#define NDCR_PG_PER_BLK (0x1 << 14)
83#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 84#define NDCR_INT_MASK (0xFFF)
fe69af00 85
86#define NDSR_MASK (0xfff)
87f5336e
EG
87#define NDSR_ERR_CNT_OFF (16)
88#define NDSR_ERR_CNT_MASK (0x1f)
89#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
90#define NDSR_RDY (0x1 << 12)
91#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 92#define NDSR_CS0_PAGED (0x1 << 10)
93#define NDSR_CS1_PAGED (0x1 << 9)
94#define NDSR_CS0_CMDD (0x1 << 8)
95#define NDSR_CS1_CMDD (0x1 << 7)
96#define NDSR_CS0_BBD (0x1 << 6)
97#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
98#define NDSR_UNCORERR (0x1 << 4)
99#define NDSR_CORERR (0x1 << 3)
fe69af00 100#define NDSR_WRDREQ (0x1 << 2)
101#define NDSR_RDDREQ (0x1 << 1)
102#define NDSR_WRCMDREQ (0x1)
103
41a63430 104#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 105#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 106#define NDCB0_AUTO_RS (0x1 << 25)
107#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
108#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
109#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 110#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
111#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
112#define NDCB0_NC (0x1 << 20)
113#define NDCB0_DBC (0x1 << 19)
114#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
115#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
116#define NDCB0_CMD2_MASK (0xff << 8)
117#define NDCB0_CMD1_MASK (0xff)
118#define NDCB0_ADDR_CYC_SHIFT (16)
119
70ed8523
EG
120#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
121#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
122#define EXT_CMD_TYPE_READ 4 /* Read */
123#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
124#define EXT_CMD_TYPE_FINAL 3 /* Final command */
125#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
126#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127
fe69af00 128/* macros for registers read/write */
129#define nand_writel(info, off, val) \
130 __raw_writel((val), (info)->mmio_base + (off))
131
132#define nand_readl(info, off) \
133 __raw_readl((info)->mmio_base + (off))
134
135/* error code and state */
136enum {
137 ERR_NONE = 0,
138 ERR_DMABUSERR = -1,
139 ERR_SENDCMD = -2,
87f5336e 140 ERR_UNCORERR = -3,
fe69af00 141 ERR_BBERR = -4,
87f5336e 142 ERR_CORERR = -5,
fe69af00 143};
144
145enum {
f8155a40 146 STATE_IDLE = 0,
d456882b 147 STATE_PREPARED,
fe69af00 148 STATE_CMD_HANDLE,
149 STATE_DMA_READING,
150 STATE_DMA_WRITING,
151 STATE_DMA_DONE,
152 STATE_PIO_READING,
153 STATE_PIO_WRITING,
f8155a40
LW
154 STATE_CMD_DONE,
155 STATE_READY,
fe69af00 156};
157
c0f3b864
EG
158enum pxa3xx_nand_variant {
159 PXA3XX_NAND_VARIANT_PXA,
160 PXA3XX_NAND_VARIANT_ARMADA370,
161};
162
d456882b
LW
163struct pxa3xx_nand_host {
164 struct nand_chip chip;
d456882b
LW
165 struct mtd_info *mtd;
166 void *info_data;
167
168 /* page size of attached chip */
d456882b 169 int use_ecc;
f3c8cfc2 170 int cs;
fe69af00 171
d456882b
LW
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles;
174 unsigned int row_addr_cycles;
175 size_t read_id_bytes;
176
d456882b
LW
177};
178
179struct pxa3xx_nand_info {
401e67e2 180 struct nand_hw_control controller;
fe69af00 181 struct platform_device *pdev;
fe69af00 182
183 struct clk *clk;
184 void __iomem *mmio_base;
8638fac8 185 unsigned long mmio_phys;
55d9fd6e 186 struct completion cmd_complete, dev_ready;
fe69af00 187
188 unsigned int buf_start;
189 unsigned int buf_count;
62e8b851 190 unsigned int buf_size;
fa543bef
EG
191 unsigned int data_buff_pos;
192 unsigned int oob_buff_pos;
fe69af00 193
194 /* DMA information */
195 int drcmr_dat;
196 int drcmr_cmd;
197
198 unsigned char *data_buff;
18c81b18 199 unsigned char *oob_buff;
fe69af00 200 dma_addr_t data_buff_phys;
fe69af00 201 int data_dma_ch;
202 struct pxa_dma_desc *data_desc;
203 dma_addr_t data_desc_addr;
204
f3c8cfc2 205 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 206 unsigned int state;
207
c0f3b864
EG
208 /*
209 * This driver supports NFCv1 (as found in PXA SoC)
210 * and NFCv2 (as found in Armada 370/XP SoC).
211 */
212 enum pxa3xx_nand_variant variant;
213
f3c8cfc2 214 int cs;
fe69af00 215 int use_ecc; /* use HW ECC ? */
43bcfd2b 216 int ecc_bch; /* using BCH ECC? */
fe69af00 217 int use_dma; /* use DMA ? */
5bb653e8 218 int use_spare; /* use spare ? */
55d9fd6e 219 int need_wait;
fe69af00 220
2128b08c 221 unsigned int data_size; /* data to be read from FIFO */
70ed8523 222 unsigned int chunk_size; /* split commands chunk size */
d456882b 223 unsigned int oob_size;
43bcfd2b
EG
224 unsigned int spare_size;
225 unsigned int ecc_size;
87f5336e
EG
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
fe69af00 228 int retcode;
fe69af00 229
48cf7efa
EG
230 /* cached register value */
231 uint32_t reg_ndcr;
232 uint32_t ndtr0cs0;
233 uint32_t ndtr1cs0;
234
fe69af00 235 /* generated NDCBx register values */
236 uint32_t ndcb0;
237 uint32_t ndcb1;
238 uint32_t ndcb2;
3a1a344a 239 uint32_t ndcb3;
fe69af00 240};
241
90ab5ee9 242static bool use_dma = 1;
fe69af00 243module_param(use_dma, bool, 0444);
25985edc 244MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 245
c1f82478 246static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
247 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
248 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
249 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
250 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
251};
252
c1f82478 253static struct pxa3xx_nand_flash builtin_flash_types[] = {
4332c116
LW
254{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
255{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
256{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
257{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
258{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
259{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
260{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
261{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
262{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
d3490dfd
HZ
263};
264
776f265e
EG
265static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
266static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
267
268static struct nand_bbt_descr bbt_main_descr = {
269 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
270 | NAND_BBT_2BIT | NAND_BBT_VERSION,
271 .offs = 8,
272 .len = 6,
273 .veroffs = 14,
274 .maxblocks = 8, /* Last 8 blocks in each chip */
275 .pattern = bbt_pattern
276};
277
278static struct nand_bbt_descr bbt_mirror_descr = {
279 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281 .offs = 8,
282 .len = 6,
283 .veroffs = 14,
284 .maxblocks = 8, /* Last 8 blocks in each chip */
285 .pattern = bbt_mirror_pattern
286};
287
3db227b6
RG
288static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
289 .eccbytes = 32,
290 .eccpos = {
291 32, 33, 34, 35, 36, 37, 38, 39,
292 40, 41, 42, 43, 44, 45, 46, 47,
293 48, 49, 50, 51, 52, 53, 54, 55,
294 56, 57, 58, 59, 60, 61, 62, 63},
295 .oobfree = { {2, 30} }
296};
297
70ed8523
EG
298static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
299 .eccbytes = 64,
300 .eccpos = {
301 32, 33, 34, 35, 36, 37, 38, 39,
302 40, 41, 42, 43, 44, 45, 46, 47,
303 48, 49, 50, 51, 52, 53, 54, 55,
304 56, 57, 58, 59, 60, 61, 62, 63,
305 96, 97, 98, 99, 100, 101, 102, 103,
306 104, 105, 106, 107, 108, 109, 110, 111,
307 112, 113, 114, 115, 116, 117, 118, 119,
308 120, 121, 122, 123, 124, 125, 126, 127},
309 /* Bootrom looks in bytes 0 & 5 for bad blocks */
310 .oobfree = { {6, 26}, { 64, 32} }
311};
312
313static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
314 .eccbytes = 128,
315 .eccpos = {
316 32, 33, 34, 35, 36, 37, 38, 39,
317 40, 41, 42, 43, 44, 45, 46, 47,
318 48, 49, 50, 51, 52, 53, 54, 55,
319 56, 57, 58, 59, 60, 61, 62, 63},
320 .oobfree = { }
321};
322
227a886c
LW
323/* Define a default flash type setting serve as flash detecting only */
324#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
325
fe69af00 326#define NDTR0_tCH(c) (min((c), 7) << 19)
327#define NDTR0_tCS(c) (min((c), 7) << 16)
328#define NDTR0_tWH(c) (min((c), 7) << 11)
329#define NDTR0_tWP(c) (min((c), 7) << 8)
330#define NDTR0_tRH(c) (min((c), 7) << 3)
331#define NDTR0_tRP(c) (min((c), 7) << 0)
332
333#define NDTR1_tR(c) (min((c), 65535) << 16)
334#define NDTR1_tWHR(c) (min((c), 15) << 4)
335#define NDTR1_tAR(c) (min((c), 15) << 0)
336
337/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 338#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 339
17754ad6 340static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
341 {
342 .compatible = "marvell,pxa3xx-nand",
343 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
344 },
1963ff97
EG
345 {
346 .compatible = "marvell,armada370-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
348 },
c7e9c7e7
EG
349 {}
350};
351MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
352
353static enum pxa3xx_nand_variant
354pxa3xx_nand_get_variant(struct platform_device *pdev)
355{
356 const struct of_device_id *of_id =
357 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
358 if (!of_id)
359 return PXA3XX_NAND_VARIANT_PXA;
360 return (enum pxa3xx_nand_variant)of_id->data;
361}
362
d456882b 363static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 364 const struct pxa3xx_nand_timing *t)
fe69af00 365{
d456882b 366 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 367 unsigned long nand_clk = clk_get_rate(info->clk);
368 uint32_t ndtr0, ndtr1;
369
370 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
371 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
372 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
373 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
374 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
375 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
376
377 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
378 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
379 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
380
48cf7efa
EG
381 info->ndtr0cs0 = ndtr0;
382 info->ndtr1cs0 = ndtr1;
fe69af00 383 nand_writel(info, NDTR0CS0, ndtr0);
384 nand_writel(info, NDTR1CS0, ndtr1);
385}
386
6a3e4865
EG
387/*
388 * Set the data and OOB size, depending on the selected
389 * spare and ECC configuration.
390 * Only applicable to READ0, READOOB and PAGEPROG commands.
391 */
fa543bef
EG
392static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
393 struct mtd_info *mtd)
fe69af00 394{
48cf7efa 395 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 396
fa543bef 397 info->data_size = mtd->writesize;
43bcfd2b 398 if (!oob_enable)
9d8b1043 399 return;
9d8b1043 400
43bcfd2b
EG
401 info->oob_size = info->spare_size;
402 if (!info->use_ecc)
403 info->oob_size += info->ecc_size;
18c81b18
LW
404}
405
f8155a40
LW
406/**
407 * NOTE: it is a must to set ND_RUN firstly, then write
408 * command buffer, otherwise, it does not work.
409 * We enable all the interrupt at the same time, and
410 * let pxa3xx_nand_irq to handle all logic.
411 */
412static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
413{
414 uint32_t ndcr;
415
48cf7efa 416 ndcr = info->reg_ndcr;
cd9d1182 417
43bcfd2b 418 if (info->use_ecc) {
cd9d1182 419 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
420 if (info->ecc_bch)
421 nand_writel(info, NDECCCTRL, 0x1);
422 } else {
cd9d1182 423 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
424 if (info->ecc_bch)
425 nand_writel(info, NDECCCTRL, 0x0);
426 }
cd9d1182
EG
427
428 if (info->use_dma)
429 ndcr |= NDCR_DMA_EN;
430 else
431 ndcr &= ~NDCR_DMA_EN;
432
5bb653e8
EG
433 if (info->use_spare)
434 ndcr |= NDCR_SPARE_EN;
435 else
436 ndcr &= ~NDCR_SPARE_EN;
437
f8155a40
LW
438 ndcr |= NDCR_ND_RUN;
439
440 /* clear status bits and run */
441 nand_writel(info, NDCR, 0);
442 nand_writel(info, NDSR, NDSR_MASK);
443 nand_writel(info, NDCR, ndcr);
444}
445
446static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
447{
448 uint32_t ndcr;
449 int timeout = NAND_STOP_DELAY;
450
451 /* wait RUN bit in NDCR become 0 */
452 ndcr = nand_readl(info, NDCR);
453 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
454 ndcr = nand_readl(info, NDCR);
455 udelay(1);
456 }
457
458 if (timeout <= 0) {
459 ndcr &= ~NDCR_ND_RUN;
460 nand_writel(info, NDCR, ndcr);
461 }
462 /* clear status bits */
463 nand_writel(info, NDSR, NDSR_MASK);
464}
465
57ff88f0
EG
466static void __maybe_unused
467enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 468{
469 uint32_t ndcr;
470
471 ndcr = nand_readl(info, NDCR);
472 nand_writel(info, NDCR, ndcr & ~int_mask);
473}
474
475static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
476{
477 uint32_t ndcr;
478
479 ndcr = nand_readl(info, NDCR);
480 nand_writel(info, NDCR, ndcr | int_mask);
481}
482
f8155a40 483static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 484{
70ed8523 485 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 486
fe69af00 487 switch (info->state) {
488 case STATE_PIO_WRITING:
fa543bef
EG
489 __raw_writesl(info->mmio_base + NDDB,
490 info->data_buff + info->data_buff_pos,
491 DIV_ROUND_UP(do_bytes, 4));
492
9d8b1043 493 if (info->oob_size > 0)
fa543bef
EG
494 __raw_writesl(info->mmio_base + NDDB,
495 info->oob_buff + info->oob_buff_pos,
496 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 497 break;
498 case STATE_PIO_READING:
fa543bef
EG
499 __raw_readsl(info->mmio_base + NDDB,
500 info->data_buff + info->data_buff_pos,
501 DIV_ROUND_UP(do_bytes, 4));
502
9d8b1043 503 if (info->oob_size > 0)
fa543bef
EG
504 __raw_readsl(info->mmio_base + NDDB,
505 info->oob_buff + info->oob_buff_pos,
506 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 507 break;
508 default:
da675b4e 509 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 510 info->state);
f8155a40 511 BUG();
fe69af00 512 }
fa543bef
EG
513
514 /* Update buffer pointers for multi-page read/write */
515 info->data_buff_pos += do_bytes;
516 info->oob_buff_pos += info->oob_size;
517 info->data_size -= do_bytes;
fe69af00 518}
519
f4db2e3a 520#ifdef ARCH_HAS_DMA
f8155a40 521static void start_data_dma(struct pxa3xx_nand_info *info)
fe69af00 522{
523 struct pxa_dma_desc *desc = info->data_desc;
9d8b1043 524 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
fe69af00 525
526 desc->ddadr = DDADR_STOP;
527 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
528
f8155a40
LW
529 switch (info->state) {
530 case STATE_DMA_WRITING:
fe69af00 531 desc->dsadr = info->data_buff_phys;
8638fac8 532 desc->dtadr = info->mmio_phys + NDDB;
fe69af00 533 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
f8155a40
LW
534 break;
535 case STATE_DMA_READING:
fe69af00 536 desc->dtadr = info->data_buff_phys;
8638fac8 537 desc->dsadr = info->mmio_phys + NDDB;
fe69af00 538 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
f8155a40
LW
539 break;
540 default:
da675b4e 541 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
542 info->state);
543 BUG();
fe69af00 544 }
545
546 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
547 DDADR(info->data_dma_ch) = info->data_desc_addr;
548 DCSR(info->data_dma_ch) |= DCSR_RUN;
549}
550
551static void pxa3xx_nand_data_dma_irq(int channel, void *data)
552{
553 struct pxa3xx_nand_info *info = data;
554 uint32_t dcsr;
555
556 dcsr = DCSR(channel);
557 DCSR(channel) = dcsr;
558
559 if (dcsr & DCSR_BUSERR) {
560 info->retcode = ERR_DMABUSERR;
fe69af00 561 }
562
f8155a40
LW
563 info->state = STATE_DMA_DONE;
564 enable_int(info, NDCR_INT_MASK);
565 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
fe69af00 566}
f4db2e3a
EG
567#else
568static void start_data_dma(struct pxa3xx_nand_info *info)
569{}
570#endif
fe69af00 571
572static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
573{
574 struct pxa3xx_nand_info *info = devid;
55d9fd6e 575 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2
LW
576 unsigned int ready, cmd_done;
577
578 if (info->cs == 0) {
579 ready = NDSR_FLASH_RDY;
580 cmd_done = NDSR_CS0_CMDD;
581 } else {
582 ready = NDSR_RDY;
583 cmd_done = NDSR_CS1_CMDD;
584 }
fe69af00 585
586 status = nand_readl(info, NDSR);
587
87f5336e
EG
588 if (status & NDSR_UNCORERR)
589 info->retcode = ERR_UNCORERR;
590 if (status & NDSR_CORERR) {
591 info->retcode = ERR_CORERR;
592 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
593 info->ecc_bch)
594 info->ecc_err_cnt = NDSR_ERR_CNT(status);
595 else
596 info->ecc_err_cnt = 1;
597
598 /*
599 * Each chunk composing a page is corrected independently,
600 * and we need to store maximum number of corrected bitflips
601 * to return it to the MTD layer in ecc.read_page().
602 */
603 info->max_bitflips = max_t(unsigned int,
604 info->max_bitflips,
605 info->ecc_err_cnt);
606 }
f8155a40
LW
607 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
608 /* whether use dma to transfer data */
fe69af00 609 if (info->use_dma) {
f8155a40
LW
610 disable_int(info, NDCR_INT_MASK);
611 info->state = (status & NDSR_RDDREQ) ?
612 STATE_DMA_READING : STATE_DMA_WRITING;
613 start_data_dma(info);
614 goto NORMAL_IRQ_EXIT;
fe69af00 615 } else {
f8155a40
LW
616 info->state = (status & NDSR_RDDREQ) ?
617 STATE_PIO_READING : STATE_PIO_WRITING;
618 handle_data_pio(info);
fe69af00 619 }
fe69af00 620 }
f3c8cfc2 621 if (status & cmd_done) {
f8155a40
LW
622 info->state = STATE_CMD_DONE;
623 is_completed = 1;
fe69af00 624 }
f3c8cfc2 625 if (status & ready) {
f8155a40 626 info->state = STATE_READY;
55d9fd6e 627 is_ready = 1;
401e67e2 628 }
fe69af00 629
f8155a40
LW
630 if (status & NDSR_WRCMDREQ) {
631 nand_writel(info, NDSR, NDSR_WRCMDREQ);
632 status &= ~NDSR_WRCMDREQ;
633 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
634
635 /*
636 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
637 * must be loaded by writing directly either 12 or 16
638 * bytes directly to NDCB0, four bytes at a time.
639 *
640 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
641 * but each NDCBx register can be read.
642 */
f8155a40
LW
643 nand_writel(info, NDCB0, info->ndcb0);
644 nand_writel(info, NDCB0, info->ndcb1);
645 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
646
647 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
648 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
649 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 650 }
651
f8155a40
LW
652 /* clear NDSR to let the controller exit the IRQ */
653 nand_writel(info, NDSR, status);
654 if (is_completed)
655 complete(&info->cmd_complete);
55d9fd6e
EG
656 if (is_ready)
657 complete(&info->dev_ready);
f8155a40
LW
658NORMAL_IRQ_EXIT:
659 return IRQ_HANDLED;
fe69af00 660}
661
fe69af00 662static inline int is_buf_blank(uint8_t *buf, size_t len)
663{
664 for (; len > 0; len--)
665 if (*buf++ != 0xff)
666 return 0;
667 return 1;
668}
669
86beebae
EG
670static void set_command_address(struct pxa3xx_nand_info *info,
671 unsigned int page_size, uint16_t column, int page_addr)
672{
673 /* small page addr setting */
674 if (page_size < PAGE_CHUNK_SIZE) {
675 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
676 | (column & 0xFF);
677
678 info->ndcb2 = 0;
679 } else {
680 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
681 | (column & 0xFFFF);
682
683 if (page_addr & 0xFF0000)
684 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
685 else
686 info->ndcb2 = 0;
687 }
688}
689
c39ff03a 690static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 691{
39f83d15
EG
692 struct pxa3xx_nand_host *host = info->host[info->cs];
693 struct mtd_info *mtd = host->mtd;
694
4eb2da89 695 /* reset data and oob column point to handle data */
401e67e2
LW
696 info->buf_start = 0;
697 info->buf_count = 0;
4eb2da89 698 info->oob_size = 0;
fa543bef
EG
699 info->data_buff_pos = 0;
700 info->oob_buff_pos = 0;
4eb2da89 701 info->use_ecc = 0;
5bb653e8 702 info->use_spare = 1;
4eb2da89 703 info->retcode = ERR_NONE;
87f5336e 704 info->ecc_err_cnt = 0;
f0e6a32e 705 info->ndcb3 = 0;
d20d0a6c 706 info->need_wait = 0;
fe69af00 707
708 switch (command) {
4eb2da89
LW
709 case NAND_CMD_READ0:
710 case NAND_CMD_PAGEPROG:
711 info->use_ecc = 1;
fe69af00 712 case NAND_CMD_READOOB:
fa543bef 713 pxa3xx_set_datasize(info, mtd);
fe69af00 714 break;
41a63430
EG
715 case NAND_CMD_PARAM:
716 info->use_spare = 0;
717 break;
4eb2da89
LW
718 default:
719 info->ndcb1 = 0;
720 info->ndcb2 = 0;
721 break;
722 }
39f83d15
EG
723
724 /*
725 * If we are about to issue a read command, or about to set
726 * the write address, then clean the data buffer.
727 */
728 if (command == NAND_CMD_READ0 ||
729 command == NAND_CMD_READOOB ||
730 command == NAND_CMD_SEQIN) {
731
732 info->buf_count = mtd->writesize + mtd->oobsize;
733 memset(info->data_buff, 0xFF, info->buf_count);
734 }
735
c39ff03a
EG
736}
737
738static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 739 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
740{
741 int addr_cycle, exec_cmd;
742 struct pxa3xx_nand_host *host;
743 struct mtd_info *mtd;
744
745 host = info->host[info->cs];
746 mtd = host->mtd;
747 addr_cycle = 0;
748 exec_cmd = 1;
749
750 if (info->cs != 0)
751 info->ndcb0 = NDCB0_CSEL;
752 else
753 info->ndcb0 = 0;
754
755 if (command == NAND_CMD_SEQIN)
756 exec_cmd = 0;
4eb2da89 757
d456882b
LW
758 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
759 + host->col_addr_cycles);
fe69af00 760
4eb2da89
LW
761 switch (command) {
762 case NAND_CMD_READOOB:
fe69af00 763 case NAND_CMD_READ0:
ec82135a
EG
764 info->buf_start = column;
765 info->ndcb0 |= NDCB0_CMD_TYPE(0)
766 | addr_cycle
767 | NAND_CMD_READ0;
768
4eb2da89 769 if (command == NAND_CMD_READOOB)
ec82135a 770 info->buf_start += mtd->writesize;
4eb2da89 771
70ed8523
EG
772 /*
773 * Multiple page read needs an 'extended command type' field,
774 * which is either naked-read or last-read according to the
775 * state.
776 */
777 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 778 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
779 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
780 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
781 | NDCB0_LEN_OVRD
782 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
783 info->ndcb3 = info->chunk_size +
784 info->oob_size;
785 }
fe69af00 786
01d9947e 787 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
788 break;
789
fe69af00 790 case NAND_CMD_SEQIN:
4eb2da89 791
e7f9a6a4
EG
792 info->buf_start = column;
793 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
794
795 /*
796 * Multiple page programming needs to execute the initial
797 * SEQIN command that sets the page address.
798 */
799 if (mtd->writesize > PAGE_CHUNK_SIZE) {
800 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
801 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
802 | addr_cycle
803 | command;
804 /* No data transfer in this case */
805 info->data_size = 0;
806 exec_cmd = 1;
807 }
fe69af00 808 break;
4eb2da89 809
fe69af00 810 case NAND_CMD_PAGEPROG:
4eb2da89
LW
811 if (is_buf_blank(info->data_buff,
812 (mtd->writesize + mtd->oobsize))) {
813 exec_cmd = 0;
814 break;
815 }
fe69af00 816
535cb57a
EG
817 /* Second command setting for large pages */
818 if (mtd->writesize > PAGE_CHUNK_SIZE) {
819 /*
820 * Multiple page write uses the 'extended command'
821 * field. This can be used to issue a command dispatch
822 * or a naked-write depending on the current stage.
823 */
824 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
825 | NDCB0_LEN_OVRD
826 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
827 info->ndcb3 = info->chunk_size +
828 info->oob_size;
829
830 /*
831 * This is the command dispatch that completes a chunked
832 * page program operation.
833 */
834 if (info->data_size == 0) {
835 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
836 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
837 | command;
838 info->ndcb1 = 0;
839 info->ndcb2 = 0;
840 info->ndcb3 = 0;
841 }
842 } else {
843 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
844 | NDCB0_AUTO_RS
845 | NDCB0_ST_ROW_EN
846 | NDCB0_DBC
847 | (NAND_CMD_PAGEPROG << 8)
848 | NAND_CMD_SEQIN
849 | addr_cycle;
850 }
fe69af00 851 break;
4eb2da89 852
ce0268f6 853 case NAND_CMD_PARAM:
ce0268f6
EG
854 info->buf_count = 256;
855 info->ndcb0 |= NDCB0_CMD_TYPE(0)
856 | NDCB0_ADDR_CYC(1)
41a63430 857 | NDCB0_LEN_OVRD
ec82135a 858 | command;
ce0268f6 859 info->ndcb1 = (column & 0xFF);
41a63430 860 info->ndcb3 = 256;
ce0268f6
EG
861 info->data_size = 256;
862 break;
863
fe69af00 864 case NAND_CMD_READID:
d456882b 865 info->buf_count = host->read_id_bytes;
4eb2da89
LW
866 info->ndcb0 |= NDCB0_CMD_TYPE(3)
867 | NDCB0_ADDR_CYC(1)
ec82135a 868 | command;
d14231f1 869 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
870
871 info->data_size = 8;
872 break;
fe69af00 873 case NAND_CMD_STATUS:
4eb2da89
LW
874 info->buf_count = 1;
875 info->ndcb0 |= NDCB0_CMD_TYPE(4)
876 | NDCB0_ADDR_CYC(1)
ec82135a 877 | command;
4eb2da89
LW
878
879 info->data_size = 8;
880 break;
881
882 case NAND_CMD_ERASE1:
4eb2da89
LW
883 info->ndcb0 |= NDCB0_CMD_TYPE(2)
884 | NDCB0_AUTO_RS
885 | NDCB0_ADDR_CYC(3)
886 | NDCB0_DBC
ec82135a
EG
887 | (NAND_CMD_ERASE2 << 8)
888 | NAND_CMD_ERASE1;
4eb2da89
LW
889 info->ndcb1 = page_addr;
890 info->ndcb2 = 0;
891
fe69af00 892 break;
893 case NAND_CMD_RESET:
4eb2da89 894 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 895 | command;
4eb2da89
LW
896
897 break;
898
899 case NAND_CMD_ERASE2:
900 exec_cmd = 0;
fe69af00 901 break;
4eb2da89 902
fe69af00 903 default:
4eb2da89 904 exec_cmd = 0;
da675b4e
LW
905 dev_err(&info->pdev->dev, "non-supported command %x\n",
906 command);
fe69af00 907 break;
908 }
909
4eb2da89
LW
910 return exec_cmd;
911}
912
5cbbdc6a
EG
913static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
914 int column, int page_addr)
4eb2da89 915{
d456882b
LW
916 struct pxa3xx_nand_host *host = mtd->priv;
917 struct pxa3xx_nand_info *info = host->info_data;
4eb2da89
LW
918 int ret, exec_cmd;
919
920 /*
921 * if this is a x16 device ,then convert the input
922 * "byte" address into a "word" address appropriate
923 * for indexing a word-oriented device
924 */
48cf7efa 925 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
926 column /= 2;
927
f3c8cfc2
LW
928 /*
929 * There may be different NAND chip hooked to
930 * different chip select, so check whether
931 * chip select has been changed, if yes, reset the timing
932 */
933 if (info->cs != host->cs) {
934 info->cs = host->cs;
48cf7efa
EG
935 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
936 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
937 }
938
c39ff03a
EG
939 prepare_start_command(info, command);
940
d456882b 941 info->state = STATE_PREPARED;
70ed8523
EG
942 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
943
f8155a40
LW
944 if (exec_cmd) {
945 init_completion(&info->cmd_complete);
55d9fd6e
EG
946 init_completion(&info->dev_ready);
947 info->need_wait = 1;
f8155a40
LW
948 pxa3xx_nand_start(info);
949
950 ret = wait_for_completion_timeout(&info->cmd_complete,
951 CHIP_DELAY_TIMEOUT);
952 if (!ret) {
da675b4e 953 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
954 /* Stop State Machine for next command cycle */
955 pxa3xx_nand_stop(info);
956 }
f8155a40 957 }
d456882b 958 info->state = STATE_IDLE;
f8155a40
LW
959}
960
5cbbdc6a
EG
961static void nand_cmdfunc_extended(struct mtd_info *mtd,
962 const unsigned command,
963 int column, int page_addr)
70ed8523
EG
964{
965 struct pxa3xx_nand_host *host = mtd->priv;
966 struct pxa3xx_nand_info *info = host->info_data;
967 int ret, exec_cmd, ext_cmd_type;
968
969 /*
970 * if this is a x16 device then convert the input
971 * "byte" address into a "word" address appropriate
972 * for indexing a word-oriented device
973 */
974 if (info->reg_ndcr & NDCR_DWIDTH_M)
975 column /= 2;
976
977 /*
978 * There may be different NAND chip hooked to
979 * different chip select, so check whether
980 * chip select has been changed, if yes, reset the timing
981 */
982 if (info->cs != host->cs) {
983 info->cs = host->cs;
984 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
985 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
986 }
987
988 /* Select the extended command for the first command */
989 switch (command) {
990 case NAND_CMD_READ0:
991 case NAND_CMD_READOOB:
992 ext_cmd_type = EXT_CMD_TYPE_MONO;
993 break;
535cb57a
EG
994 case NAND_CMD_SEQIN:
995 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
996 break;
997 case NAND_CMD_PAGEPROG:
998 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
999 break;
70ed8523
EG
1000 default:
1001 ext_cmd_type = 0;
535cb57a 1002 break;
70ed8523
EG
1003 }
1004
1005 prepare_start_command(info, command);
1006
1007 /*
1008 * Prepare the "is ready" completion before starting a command
1009 * transaction sequence. If the command is not executed the
1010 * completion will be completed, see below.
1011 *
1012 * We can do that inside the loop because the command variable
1013 * is invariant and thus so is the exec_cmd.
1014 */
1015 info->need_wait = 1;
1016 init_completion(&info->dev_ready);
1017 do {
1018 info->state = STATE_PREPARED;
1019 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1020 column, page_addr);
1021 if (!exec_cmd) {
1022 info->need_wait = 0;
1023 complete(&info->dev_ready);
1024 break;
1025 }
1026
1027 init_completion(&info->cmd_complete);
1028 pxa3xx_nand_start(info);
1029
1030 ret = wait_for_completion_timeout(&info->cmd_complete,
1031 CHIP_DELAY_TIMEOUT);
1032 if (!ret) {
1033 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1034 /* Stop State Machine for next command cycle */
1035 pxa3xx_nand_stop(info);
1036 break;
1037 }
1038
1039 /* Check if the sequence is complete */
535cb57a
EG
1040 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1041 break;
1042
1043 /*
1044 * After a splitted program command sequence has issued
1045 * the command dispatch, the command sequence is complete.
1046 */
1047 if (info->data_size == 0 &&
1048 command == NAND_CMD_PAGEPROG &&
1049 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1050 break;
1051
1052 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1053 /* Last read: issue a 'last naked read' */
1054 if (info->data_size == info->chunk_size)
1055 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1056 else
1057 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1058
1059 /*
1060 * If a splitted program command has no more data to transfer,
1061 * the command dispatch must be issued to complete.
1062 */
1063 } else if (command == NAND_CMD_PAGEPROG &&
1064 info->data_size == 0) {
1065 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1066 }
1067 } while (1);
1068
1069 info->state = STATE_IDLE;
1070}
1071
fdbad98d 1072static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1fbb938d 1073 struct nand_chip *chip, const uint8_t *buf, int oob_required)
f8155a40
LW
1074{
1075 chip->write_buf(mtd, buf, mtd->writesize);
1076 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1077
1078 return 0;
f8155a40
LW
1079}
1080
1081static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1082 struct nand_chip *chip, uint8_t *buf, int oob_required,
1083 int page)
f8155a40 1084{
d456882b
LW
1085 struct pxa3xx_nand_host *host = mtd->priv;
1086 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1087
1088 chip->read_buf(mtd, buf, mtd->writesize);
1089 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1090
87f5336e
EG
1091 if (info->retcode == ERR_CORERR && info->use_ecc) {
1092 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1093
1094 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1095 /*
1096 * for blank page (all 0xff), HW will calculate its ECC as
1097 * 0, which is different from the ECC information within
87f5336e 1098 * OOB, ignore such uncorrectable errors
f8155a40
LW
1099 */
1100 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1101 info->retcode = ERR_NONE;
1102 else
f8155a40 1103 mtd->ecc_stats.failed++;
fe69af00 1104 }
f8155a40 1105
87f5336e 1106 return info->max_bitflips;
fe69af00 1107}
1108
1109static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1110{
d456882b
LW
1111 struct pxa3xx_nand_host *host = mtd->priv;
1112 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1113 char retval = 0xFF;
1114
1115 if (info->buf_start < info->buf_count)
1116 /* Has just send a new command? */
1117 retval = info->data_buff[info->buf_start++];
1118
1119 return retval;
1120}
1121
1122static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1123{
d456882b
LW
1124 struct pxa3xx_nand_host *host = mtd->priv;
1125 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1126 u16 retval = 0xFFFF;
1127
1128 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1129 retval = *((u16 *)(info->data_buff+info->buf_start));
1130 info->buf_start += 2;
1131 }
1132 return retval;
1133}
1134
1135static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1136{
d456882b
LW
1137 struct pxa3xx_nand_host *host = mtd->priv;
1138 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1139 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1140
1141 memcpy(buf, info->data_buff + info->buf_start, real_len);
1142 info->buf_start += real_len;
1143}
1144
1145static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1146 const uint8_t *buf, int len)
1147{
d456882b
LW
1148 struct pxa3xx_nand_host *host = mtd->priv;
1149 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1150 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1151
1152 memcpy(info->data_buff + info->buf_start, buf, real_len);
1153 info->buf_start += real_len;
1154}
1155
fe69af00 1156static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1157{
1158 return;
1159}
1160
1161static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1162{
d456882b
LW
1163 struct pxa3xx_nand_host *host = mtd->priv;
1164 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1165 int ret;
1166
1167 if (info->need_wait) {
1168 ret = wait_for_completion_timeout(&info->dev_ready,
1169 CHIP_DELAY_TIMEOUT);
1170 info->need_wait = 0;
1171 if (!ret) {
1172 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1173 return NAND_STATUS_FAIL;
1174 }
1175 }
fe69af00 1176
1177 /* pxa3xx_nand_send_command has waited for command complete */
1178 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1179 if (info->retcode == ERR_NONE)
1180 return 0;
55d9fd6e
EG
1181 else
1182 return NAND_STATUS_FAIL;
fe69af00 1183 }
1184
55d9fd6e 1185 return NAND_STATUS_READY;
fe69af00 1186}
1187
fe69af00 1188static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
c8c17c88 1189 const struct pxa3xx_nand_flash *f)
fe69af00 1190{
1191 struct platform_device *pdev = info->pdev;
453810b7 1192 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f3c8cfc2 1193 struct pxa3xx_nand_host *host = info->host[info->cs];
f8155a40 1194 uint32_t ndcr = 0x0; /* enable all interrupts */
fe69af00 1195
da675b4e
LW
1196 if (f->page_size != 2048 && f->page_size != 512) {
1197 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
fe69af00 1198 return -EINVAL;
da675b4e 1199 }
fe69af00 1200
da675b4e
LW
1201 if (f->flash_width != 16 && f->flash_width != 8) {
1202 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
fe69af00 1203 return -EINVAL;
da675b4e 1204 }
fe69af00 1205
1206 /* calculate flash information */
d456882b 1207 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
fe69af00 1208
1209 /* calculate addressing information */
d456882b 1210 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
fe69af00 1211
1212 if (f->num_blocks * f->page_per_block > 65536)
d456882b 1213 host->row_addr_cycles = 3;
fe69af00 1214 else
d456882b 1215 host->row_addr_cycles = 2;
fe69af00 1216
1217 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
d456882b 1218 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
fe69af00 1219 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1220 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1221 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1222 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1223
d456882b 1224 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
fe69af00 1225 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1226
48cf7efa 1227 info->reg_ndcr = ndcr;
fe69af00 1228
d456882b 1229 pxa3xx_nand_set_timing(host, f->timing);
fe69af00 1230 return 0;
1231}
1232
f271049e
MR
1233static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1234{
f3c8cfc2
LW
1235 /*
1236 * We set 0 by hard coding here, for we don't support keep_config
1237 * when there is more than one chip attached to the controller
1238 */
1239 struct pxa3xx_nand_host *host = info->host[0];
f271049e 1240 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1241
d456882b 1242 if (ndcr & NDCR_PAGE_SZ) {
2128b08c 1243 /* Controller's FIFO size */
70ed8523 1244 info->chunk_size = 2048;
d456882b
LW
1245 host->read_id_bytes = 4;
1246 } else {
70ed8523 1247 info->chunk_size = 512;
d456882b
LW
1248 host->read_id_bytes = 2;
1249 }
1250
70ed8523 1251 /* Set an initial chunk size */
48cf7efa
EG
1252 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1253 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1254 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1255 return 0;
1256}
1257
f4db2e3a 1258#ifdef ARCH_HAS_DMA
fe69af00 1259static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1260{
1261 struct platform_device *pdev = info->pdev;
62e8b851 1262 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
fe69af00 1263
1264 if (use_dma == 0) {
62e8b851 1265 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
fe69af00 1266 if (info->data_buff == NULL)
1267 return -ENOMEM;
1268 return 0;
1269 }
1270
62e8b851 1271 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
fe69af00 1272 &info->data_buff_phys, GFP_KERNEL);
1273 if (info->data_buff == NULL) {
1274 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1275 return -ENOMEM;
1276 }
1277
fe69af00 1278 info->data_desc = (void *)info->data_buff + data_desc_offset;
1279 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1280
1281 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1282 pxa3xx_nand_data_dma_irq, info);
1283 if (info->data_dma_ch < 0) {
1284 dev_err(&pdev->dev, "failed to request data dma\n");
62e8b851 1285 dma_free_coherent(&pdev->dev, info->buf_size,
fe69af00 1286 info->data_buff, info->data_buff_phys);
1287 return info->data_dma_ch;
1288 }
1289
95b26563
EG
1290 /*
1291 * Now that DMA buffers are allocated we turn on
1292 * DMA proper for I/O operations.
1293 */
1294 info->use_dma = 1;
fe69af00 1295 return 0;
1296}
1297
498b6145
EG
1298static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1299{
1300 struct platform_device *pdev = info->pdev;
15b540c7 1301 if (info->use_dma) {
498b6145 1302 pxa_free_dma(info->data_dma_ch);
62e8b851 1303 dma_free_coherent(&pdev->dev, info->buf_size,
498b6145
EG
1304 info->data_buff, info->data_buff_phys);
1305 } else {
1306 kfree(info->data_buff);
1307 }
1308}
f4db2e3a
EG
1309#else
1310static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1311{
62e8b851 1312 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
f4db2e3a
EG
1313 if (info->data_buff == NULL)
1314 return -ENOMEM;
1315 return 0;
1316}
1317
1318static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1319{
1320 kfree(info->data_buff);
1321}
1322#endif
498b6145 1323
401e67e2
LW
1324static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1325{
f3c8cfc2 1326 struct mtd_info *mtd;
2d79ab16 1327 struct nand_chip *chip;
d456882b 1328 int ret;
2d79ab16 1329
f3c8cfc2 1330 mtd = info->host[info->cs]->mtd;
2d79ab16
EG
1331 chip = mtd->priv;
1332
401e67e2 1333 /* use the common timing to make a try */
d456882b
LW
1334 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1335 if (ret)
1336 return ret;
1337
2d79ab16 1338 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
56704d85
EG
1339 ret = chip->waitfunc(mtd, chip);
1340 if (ret & NAND_STATUS_FAIL)
1341 return -ENODEV;
d456882b 1342
56704d85 1343 return 0;
401e67e2 1344}
fe69af00 1345
43bcfd2b
EG
1346static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1347 struct nand_ecc_ctrl *ecc,
30b2afc8 1348 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1349{
30b2afc8 1350 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1351 info->chunk_size = 2048;
43bcfd2b
EG
1352 info->spare_size = 40;
1353 info->ecc_size = 24;
1354 ecc->mode = NAND_ECC_HW;
1355 ecc->size = 512;
1356 ecc->strength = 1;
43bcfd2b 1357
30b2afc8 1358 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1359 info->chunk_size = 512;
43bcfd2b
EG
1360 info->spare_size = 8;
1361 info->ecc_size = 8;
1362 ecc->mode = NAND_ECC_HW;
1363 ecc->size = 512;
1364 ecc->strength = 1;
43bcfd2b 1365
6033a949
BN
1366 /*
1367 * Required ECC: 4-bit correction per 512 bytes
1368 * Select: 16-bit correction per 2048 bytes
1369 */
3db227b6
RG
1370 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1371 info->ecc_bch = 1;
1372 info->chunk_size = 2048;
1373 info->spare_size = 32;
1374 info->ecc_size = 32;
1375 ecc->mode = NAND_ECC_HW;
1376 ecc->size = info->chunk_size;
1377 ecc->layout = &ecc_layout_2KB_bch4bit;
1378 ecc->strength = 16;
3db227b6 1379
30b2afc8 1380 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1381 info->ecc_bch = 1;
1382 info->chunk_size = 2048;
1383 info->spare_size = 32;
1384 info->ecc_size = 32;
1385 ecc->mode = NAND_ECC_HW;
1386 ecc->size = info->chunk_size;
1387 ecc->layout = &ecc_layout_4KB_bch4bit;
1388 ecc->strength = 16;
70ed8523 1389
6033a949
BN
1390 /*
1391 * Required ECC: 8-bit correction per 512 bytes
1392 * Select: 16-bit correction per 1024 bytes
1393 */
1394 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1395 info->ecc_bch = 1;
1396 info->chunk_size = 1024;
1397 info->spare_size = 0;
1398 info->ecc_size = 32;
1399 ecc->mode = NAND_ECC_HW;
1400 ecc->size = info->chunk_size;
1401 ecc->layout = &ecc_layout_4KB_bch8bit;
1402 ecc->strength = 16;
eee0166d
EG
1403 } else {
1404 dev_err(&info->pdev->dev,
1405 "ECC strength %d at page size %d is not supported\n",
1406 strength, page_size);
1407 return -ENODEV;
70ed8523 1408 }
eee0166d
EG
1409
1410 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1411 ecc->strength, ecc->size);
43bcfd2b
EG
1412 return 0;
1413}
1414
401e67e2 1415static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1416{
d456882b
LW
1417 struct pxa3xx_nand_host *host = mtd->priv;
1418 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1419 struct platform_device *pdev = info->pdev;
453810b7 1420 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
0fab028b 1421 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
401e67e2
LW
1422 const struct pxa3xx_nand_flash *f = NULL;
1423 struct nand_chip *chip = mtd->priv;
1424 uint32_t id = -1;
4332c116 1425 uint64_t chipsize;
401e67e2 1426 int i, ret, num;
30b2afc8 1427 uint16_t ecc_strength, ecc_step;
401e67e2
LW
1428
1429 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
4332c116 1430 goto KEEP_CONFIG;
401e67e2
LW
1431
1432 ret = pxa3xx_nand_sensing(info);
d456882b 1433 if (ret) {
f3c8cfc2
LW
1434 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1435 info->cs);
401e67e2 1436
d456882b 1437 return ret;
401e67e2
LW
1438 }
1439
1440 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1441 id = *((uint16_t *)(info->data_buff));
1442 if (id != 0)
da675b4e 1443 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
401e67e2 1444 else {
da675b4e
LW
1445 dev_warn(&info->pdev->dev,
1446 "Read out ID 0, potential timing set wrong!!\n");
401e67e2
LW
1447
1448 return -EINVAL;
1449 }
1450
1451 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1452 for (i = 0; i < num; i++) {
1453 if (i < pdata->num_flash)
1454 f = pdata->flash + i;
1455 else
1456 f = &builtin_flash_types[i - pdata->num_flash + 1];
1457
1458 /* find the chip in default list */
4332c116 1459 if (f->chip_id == id)
401e67e2 1460 break;
401e67e2
LW
1461 }
1462
4332c116 1463 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
da675b4e 1464 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
401e67e2
LW
1465
1466 return -EINVAL;
1467 }
1468
d456882b
LW
1469 ret = pxa3xx_nand_config_flash(info, f);
1470 if (ret) {
1471 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1472 return ret;
1473 }
1474
4332c116 1475 pxa3xx_flash_ids[0].name = f->name;
68aa352d 1476 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
4332c116
LW
1477 pxa3xx_flash_ids[0].pagesize = f->page_size;
1478 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1479 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1480 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1481 if (f->flash_width == 16)
1482 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
0fab028b
LW
1483 pxa3xx_flash_ids[1].name = NULL;
1484 def = pxa3xx_flash_ids;
4332c116 1485KEEP_CONFIG:
48cf7efa 1486 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1487 chip->options |= NAND_BUSWIDTH_16;
1488
43bcfd2b
EG
1489 /* Device detection must be done with ECC disabled */
1490 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1491 nand_writel(info, NDECCCTRL, 0x0);
1492
0fab028b 1493 if (nand_scan_ident(mtd, 1, def))
4332c116 1494 return -ENODEV;
776f265e
EG
1495
1496 if (pdata->flash_bbt) {
1497 /*
1498 * We'll use a bad block table stored in-flash and don't
1499 * allow writing the bad block marker to the flash.
1500 */
1501 chip->bbt_options |= NAND_BBT_USE_FLASH |
1502 NAND_BBT_NO_OOB_BBM;
1503 chip->bbt_td = &bbt_main_descr;
1504 chip->bbt_md = &bbt_mirror_descr;
1505 }
1506
5cbbdc6a
EG
1507 /*
1508 * If the page size is bigger than the FIFO size, let's check
1509 * we are given the right variant and then switch to the extended
1510 * (aka splitted) command handling,
1511 */
1512 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1513 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1514 chip->cmdfunc = nand_cmdfunc_extended;
1515 } else {
1516 dev_err(&info->pdev->dev,
1517 "unsupported page size on this variant\n");
1518 return -ENODEV;
1519 }
1520 }
1521
30b2afc8
EG
1522 ecc_strength = chip->ecc_strength_ds;
1523 ecc_step = chip->ecc_step_ds;
1524
1525 /* Set default ECC strength requirements on non-ONFI devices */
1526 if (ecc_strength < 1 && ecc_step < 1) {
1527 ecc_strength = 1;
1528 ecc_step = 512;
1529 }
1530
1531 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1532 ecc_step, mtd->writesize);
eee0166d
EG
1533 if (ret)
1534 return ret;
43bcfd2b 1535
4332c116 1536 /* calculate addressing information */
d456882b
LW
1537 if (mtd->writesize >= 2048)
1538 host->col_addr_cycles = 2;
1539 else
1540 host->col_addr_cycles = 1;
1541
62e8b851
EG
1542 /* release the initial buffer */
1543 kfree(info->data_buff);
1544
1545 /* allocate the real data + oob buffer */
1546 info->buf_size = mtd->writesize + mtd->oobsize;
1547 ret = pxa3xx_nand_init_buff(info);
1548 if (ret)
1549 return ret;
4332c116 1550 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1551
4332c116 1552 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1553 host->row_addr_cycles = 3;
4332c116 1554 else
d456882b 1555 host->row_addr_cycles = 2;
401e67e2 1556 return nand_scan_tail(mtd);
fe69af00 1557}
1558
d456882b 1559static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1560{
f3c8cfc2 1561 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1562 struct pxa3xx_nand_info *info;
d456882b 1563 struct pxa3xx_nand_host *host;
6e308f87 1564 struct nand_chip *chip = NULL;
fe69af00 1565 struct mtd_info *mtd;
1566 struct resource *r;
f3c8cfc2 1567 int ret, irq, cs;
fe69af00 1568
453810b7 1569 pdata = dev_get_platdata(&pdev->dev);
4c073cd2
EG
1570 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1571 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1572 if (!info)
d456882b 1573 return -ENOMEM;
fe69af00 1574
fe69af00 1575 info->pdev = pdev;
c7e9c7e7 1576 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2
LW
1577 for (cs = 0; cs < pdata->num_cs; cs++) {
1578 mtd = (struct mtd_info *)((unsigned int)&info[1] +
1579 (sizeof(*mtd) + sizeof(*host)) * cs);
1580 chip = (struct nand_chip *)(&mtd[1]);
1581 host = (struct pxa3xx_nand_host *)chip;
1582 info->host[cs] = host;
1583 host->mtd = mtd;
1584 host->cs = cs;
1585 host->info_data = info;
1586 mtd->priv = host;
1587 mtd->owner = THIS_MODULE;
1588
1589 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1590 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1591 chip->controller = &info->controller;
1592 chip->waitfunc = pxa3xx_nand_waitfunc;
1593 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1594 chip->read_word = pxa3xx_nand_read_word;
1595 chip->read_byte = pxa3xx_nand_read_byte;
1596 chip->read_buf = pxa3xx_nand_read_buf;
1597 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1598 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1599 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1600 }
401e67e2
LW
1601
1602 spin_lock_init(&chip->controller->lock);
1603 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1604 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1605 if (IS_ERR(info->clk)) {
1606 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1607 return PTR_ERR(info->clk);
fe69af00 1608 }
1f8eaff2
EG
1609 ret = clk_prepare_enable(info->clk);
1610 if (ret < 0)
1611 return ret;
fe69af00 1612
6b45c1ee
EG
1613 if (use_dma) {
1614 /*
1615 * This is a dirty hack to make this driver work from
1616 * devicetree bindings. It can be removed once we have
1617 * a prober DMA controller framework for DT.
1618 */
1619 if (pdev->dev.of_node &&
1620 of_machine_is_compatible("marvell,pxa3xx")) {
1621 info->drcmr_dat = 97;
1622 info->drcmr_cmd = 99;
1623 } else {
1624 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1625 if (r == NULL) {
1626 dev_err(&pdev->dev,
1627 "no resource defined for data DMA\n");
1628 ret = -ENXIO;
1629 goto fail_disable_clk;
1630 }
1631 info->drcmr_dat = r->start;
1632
1633 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1634 if (r == NULL) {
1635 dev_err(&pdev->dev,
1636 "no resource defined for cmd DMA\n");
1637 ret = -ENXIO;
1638 goto fail_disable_clk;
1639 }
1640 info->drcmr_cmd = r->start;
1e7ba630 1641 }
fe69af00 1642 }
fe69af00 1643
1644 irq = platform_get_irq(pdev, 0);
1645 if (irq < 0) {
1646 dev_err(&pdev->dev, "no IRQ resource defined\n");
1647 ret = -ENXIO;
9ca7944d 1648 goto fail_disable_clk;
fe69af00 1649 }
1650
1651 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1652 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1653 if (IS_ERR(info->mmio_base)) {
1654 ret = PTR_ERR(info->mmio_base);
9ca7944d 1655 goto fail_disable_clk;
fe69af00 1656 }
8638fac8 1657 info->mmio_phys = r->start;
fe69af00 1658
62e8b851
EG
1659 /* Allocate a buffer to allow flash detection */
1660 info->buf_size = INIT_BUFFER_SIZE;
1661 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1662 if (info->data_buff == NULL) {
1663 ret = -ENOMEM;
9ca7944d 1664 goto fail_disable_clk;
62e8b851 1665 }
fe69af00 1666
346e1259
HZ
1667 /* initialize all interrupts to be disabled */
1668 disable_int(info, NDSR_MASK);
1669
b1eb234f 1670 ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
fe69af00 1671 if (ret < 0) {
1672 dev_err(&pdev->dev, "failed to request IRQ\n");
1673 goto fail_free_buf;
1674 }
1675
e353a20a 1676 platform_set_drvdata(pdev, info);
fe69af00 1677
d456882b 1678 return 0;
fe69af00 1679
fe69af00 1680fail_free_buf:
401e67e2 1681 free_irq(irq, info);
62e8b851 1682 kfree(info->data_buff);
9ca7944d 1683fail_disable_clk:
fb32061f 1684 clk_disable_unprepare(info->clk);
d456882b 1685 return ret;
fe69af00 1686}
1687
1688static int pxa3xx_nand_remove(struct platform_device *pdev)
1689{
e353a20a 1690 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1691 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1692 int irq, cs;
fe69af00 1693
d456882b
LW
1694 if (!info)
1695 return 0;
1696
453810b7 1697 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1698
dbf5986a
HZ
1699 irq = platform_get_irq(pdev, 0);
1700 if (irq >= 0)
1701 free_irq(irq, info);
498b6145 1702 pxa3xx_nand_free_buff(info);
82a72d10 1703
fb32061f 1704 clk_disable_unprepare(info->clk);
82a72d10 1705
f3c8cfc2
LW
1706 for (cs = 0; cs < pdata->num_cs; cs++)
1707 nand_release(info->host[cs]->mtd);
fe69af00 1708 return 0;
1709}
1710
1e7ba630
DM
1711static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1712{
1713 struct pxa3xx_nand_platform_data *pdata;
1714 struct device_node *np = pdev->dev.of_node;
1715 const struct of_device_id *of_id =
1716 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1717
1718 if (!of_id)
1719 return 0;
1720
1721 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1722 if (!pdata)
1723 return -ENOMEM;
1724
1725 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1726 pdata->enable_arbiter = 1;
1727 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1728 pdata->keep_config = 1;
1729 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1730 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630
DM
1731
1732 pdev->dev.platform_data = pdata;
1733
1734 return 0;
1735}
1e7ba630 1736
e353a20a
LW
1737static int pxa3xx_nand_probe(struct platform_device *pdev)
1738{
1739 struct pxa3xx_nand_platform_data *pdata;
1e7ba630 1740 struct mtd_part_parser_data ppdata = {};
e353a20a 1741 struct pxa3xx_nand_info *info;
f3c8cfc2 1742 int ret, cs, probe_success;
e353a20a 1743
f4db2e3a
EG
1744#ifndef ARCH_HAS_DMA
1745 if (use_dma) {
1746 use_dma = 0;
1747 dev_warn(&pdev->dev,
1748 "This platform can't do DMA on this device\n");
1749 }
1750#endif
1e7ba630
DM
1751 ret = pxa3xx_nand_probe_dt(pdev);
1752 if (ret)
1753 return ret;
1754
453810b7 1755 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1756 if (!pdata) {
1757 dev_err(&pdev->dev, "no platform data defined\n");
1758 return -ENODEV;
1759 }
1760
d456882b
LW
1761 ret = alloc_nand_resource(pdev);
1762 if (ret) {
1763 dev_err(&pdev->dev, "alloc nand resource failed\n");
1764 return ret;
1765 }
e353a20a 1766
d456882b 1767 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1768 probe_success = 0;
1769 for (cs = 0; cs < pdata->num_cs; cs++) {
b7655bcb 1770 struct mtd_info *mtd = info->host[cs]->mtd;
f455578d 1771
18a84e93
EG
1772 /*
1773 * The mtd name matches the one used in 'mtdparts' kernel
1774 * parameter. This name cannot be changed or otherwise
1775 * user's mtd partitions configuration would get broken.
1776 */
1777 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1778 info->cs = cs;
b7655bcb 1779 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1780 if (ret) {
1781 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1782 cs);
1783 continue;
1784 }
1785
1e7ba630 1786 ppdata.of_node = pdev->dev.of_node;
b7655bcb 1787 ret = mtd_device_parse_register(mtd, NULL,
1e7ba630 1788 &ppdata, pdata->parts[cs],
42d7fbe2 1789 pdata->nr_parts[cs]);
f3c8cfc2
LW
1790 if (!ret)
1791 probe_success = 1;
1792 }
1793
1794 if (!probe_success) {
e353a20a
LW
1795 pxa3xx_nand_remove(pdev);
1796 return -ENODEV;
1797 }
1798
f3c8cfc2 1799 return 0;
e353a20a
LW
1800}
1801
fe69af00 1802#ifdef CONFIG_PM
1803static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1804{
e353a20a 1805 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1806 struct pxa3xx_nand_platform_data *pdata;
1807 struct mtd_info *mtd;
1808 int cs;
fe69af00 1809
453810b7 1810 pdata = dev_get_platdata(&pdev->dev);
f8155a40 1811 if (info->state) {
fe69af00 1812 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1813 return -EAGAIN;
1814 }
1815
f3c8cfc2
LW
1816 for (cs = 0; cs < pdata->num_cs; cs++) {
1817 mtd = info->host[cs]->mtd;
3fe4bae8 1818 mtd_suspend(mtd);
f3c8cfc2
LW
1819 }
1820
fe69af00 1821 return 0;
1822}
1823
1824static int pxa3xx_nand_resume(struct platform_device *pdev)
1825{
e353a20a 1826 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1827 struct pxa3xx_nand_platform_data *pdata;
1828 struct mtd_info *mtd;
1829 int cs;
051fc41c 1830
453810b7 1831 pdata = dev_get_platdata(&pdev->dev);
051fc41c
LW
1832 /* We don't want to handle interrupt without calling mtd routine */
1833 disable_int(info, NDCR_INT_MASK);
fe69af00 1834
f3c8cfc2
LW
1835 /*
1836 * Directly set the chip select to a invalid value,
1837 * then the driver would reset the timing according
1838 * to current chip select at the beginning of cmdfunc
1839 */
1840 info->cs = 0xff;
fe69af00 1841
051fc41c
LW
1842 /*
1843 * As the spec says, the NDSR would be updated to 0x1800 when
1844 * doing the nand_clk disable/enable.
1845 * To prevent it damaging state machine of the driver, clear
1846 * all status before resume
1847 */
1848 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2
LW
1849 for (cs = 0; cs < pdata->num_cs; cs++) {
1850 mtd = info->host[cs]->mtd;
ead995f8 1851 mtd_resume(mtd);
f3c8cfc2
LW
1852 }
1853
18c81b18 1854 return 0;
fe69af00 1855}
1856#else
1857#define pxa3xx_nand_suspend NULL
1858#define pxa3xx_nand_resume NULL
1859#endif
1860
1861static struct platform_driver pxa3xx_nand_driver = {
1862 .driver = {
1863 .name = "pxa3xx-nand",
5576bc7b 1864 .of_match_table = pxa3xx_nand_dt_ids,
fe69af00 1865 },
1866 .probe = pxa3xx_nand_probe,
1867 .remove = pxa3xx_nand_remove,
1868 .suspend = pxa3xx_nand_suspend,
1869 .resume = pxa3xx_nand_resume,
1870};
1871
f99640de 1872module_platform_driver(pxa3xx_nand_driver);
fe69af00 1873
1874MODULE_LICENSE("GPL");
1875MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.551312 seconds and 5 git commands to generate.