2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/irq.h>
26 #include <linux/slab.h>
28 #include <linux/of_device.h>
29 #include <linux/of_mtd.h>
31 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
39 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41 #define NAND_DEV_READY_TIMEOUT 50
42 #define CHIP_DELAY_TIMEOUT (2 * HZ/10)
43 #define NAND_STOP_DELAY (2 * HZ/50)
44 #define PAGE_CHUNK_SIZE (2048)
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM. The largest of these is the PARAM command,
51 #define INIT_BUFFER_SIZE 256
53 /* registers and bit definitions */
54 #define NDCR (0x00) /* Control register */
55 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
56 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
57 #define NDSR (0x14) /* Status Register */
58 #define NDPCR (0x18) /* Page Count Register */
59 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
60 #define NDBDR1 (0x20) /* Bad Block Register 1 */
61 #define NDECCCTRL (0x28) /* ECC control */
62 #define NDDB (0x40) /* Data Buffer */
63 #define NDCB0 (0x48) /* Command Buffer0 */
64 #define NDCB1 (0x4C) /* Command Buffer1 */
65 #define NDCB2 (0x50) /* Command Buffer2 */
67 #define NDCR_SPARE_EN (0x1 << 31)
68 #define NDCR_ECC_EN (0x1 << 30)
69 #define NDCR_DMA_EN (0x1 << 29)
70 #define NDCR_ND_RUN (0x1 << 28)
71 #define NDCR_DWIDTH_C (0x1 << 27)
72 #define NDCR_DWIDTH_M (0x1 << 26)
73 #define NDCR_PAGE_SZ (0x1 << 24)
74 #define NDCR_NCSX (0x1 << 23)
75 #define NDCR_ND_MODE (0x3 << 21)
76 #define NDCR_NAND_MODE (0x0)
77 #define NDCR_CLR_PG_CNT (0x1 << 20)
78 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
79 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
80 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
82 #define NDCR_RA_START (0x1 << 15)
83 #define NDCR_PG_PER_BLK (0x1 << 14)
84 #define NDCR_ND_ARB_EN (0x1 << 12)
85 #define NDCR_INT_MASK (0xFFF)
87 #define NDSR_MASK (0xfff)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_DBERR (0x1 << 4)
97 #define NDSR_SBERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
107 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
108 #define NDCB0_NC (0x1 << 20)
109 #define NDCB0_DBC (0x1 << 19)
110 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
111 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
112 #define NDCB0_CMD2_MASK (0xff << 8)
113 #define NDCB0_CMD1_MASK (0xff)
114 #define NDCB0_ADDR_CYC_SHIFT (16)
116 /* macros for registers read/write */
117 #define nand_writel(info, off, val) \
118 __raw_writel((val), (info)->mmio_base + (off))
120 #define nand_readl(info, off) \
121 __raw_readl((info)->mmio_base + (off))
123 /* error code and state */
146 enum pxa3xx_nand_variant
{
147 PXA3XX_NAND_VARIANT_PXA
,
148 PXA3XX_NAND_VARIANT_ARMADA370
,
151 struct pxa3xx_nand_host
{
152 struct nand_chip chip
;
153 struct mtd_info
*mtd
;
156 /* page size of attached chip */
160 /* calculated from pxa3xx_nand_flash data */
161 unsigned int col_addr_cycles
;
162 unsigned int row_addr_cycles
;
163 size_t read_id_bytes
;
167 struct pxa3xx_nand_info
{
168 struct nand_hw_control controller
;
169 struct platform_device
*pdev
;
172 void __iomem
*mmio_base
;
173 unsigned long mmio_phys
;
174 struct completion cmd_complete
, dev_ready
;
176 unsigned int buf_start
;
177 unsigned int buf_count
;
178 unsigned int buf_size
;
180 /* DMA information */
184 unsigned char *data_buff
;
185 unsigned char *oob_buff
;
186 dma_addr_t data_buff_phys
;
188 struct pxa_dma_desc
*data_desc
;
189 dma_addr_t data_desc_addr
;
191 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
195 * This driver supports NFCv1 (as found in PXA SoC)
196 * and NFCv2 (as found in Armada 370/XP SoC).
198 enum pxa3xx_nand_variant variant
;
201 int use_ecc
; /* use HW ECC ? */
202 int ecc_bch
; /* using BCH ECC? */
203 int use_dma
; /* use DMA ? */
204 int use_spare
; /* use spare ? */
207 unsigned int fifo_size
; /* max. data size in the FIFO */
208 unsigned int data_size
; /* data to be read from FIFO */
209 unsigned int oob_size
;
210 unsigned int spare_size
;
211 unsigned int ecc_size
;
214 /* cached register value */
219 /* generated NDCBx register values */
226 static bool use_dma
= 1;
227 module_param(use_dma
, bool, 0444);
228 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
230 static struct pxa3xx_nand_timing timing
[] = {
231 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
232 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
233 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
234 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
237 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
238 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
239 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
240 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
241 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
242 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
243 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
244 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
245 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
246 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
249 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
250 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
252 static struct nand_bbt_descr bbt_main_descr
= {
253 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
254 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
258 .maxblocks
= 8, /* Last 8 blocks in each chip */
259 .pattern
= bbt_pattern
262 static struct nand_bbt_descr bbt_mirror_descr
= {
263 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
264 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
268 .maxblocks
= 8, /* Last 8 blocks in each chip */
269 .pattern
= bbt_mirror_pattern
272 /* Define a default flash type setting serve as flash detecting only */
273 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
275 #define NDTR0_tCH(c) (min((c), 7) << 19)
276 #define NDTR0_tCS(c) (min((c), 7) << 16)
277 #define NDTR0_tWH(c) (min((c), 7) << 11)
278 #define NDTR0_tWP(c) (min((c), 7) << 8)
279 #define NDTR0_tRH(c) (min((c), 7) << 3)
280 #define NDTR0_tRP(c) (min((c), 7) << 0)
282 #define NDTR1_tR(c) (min((c), 65535) << 16)
283 #define NDTR1_tWHR(c) (min((c), 15) << 4)
284 #define NDTR1_tAR(c) (min((c), 15) << 0)
286 /* convert nano-seconds to nand flash controller clock cycles */
287 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
289 static struct of_device_id pxa3xx_nand_dt_ids
[] = {
291 .compatible
= "marvell,pxa3xx-nand",
292 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
296 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
298 static enum pxa3xx_nand_variant
299 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
301 const struct of_device_id
*of_id
=
302 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
304 return PXA3XX_NAND_VARIANT_PXA
;
305 return (enum pxa3xx_nand_variant
)of_id
->data
;
308 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
309 const struct pxa3xx_nand_timing
*t
)
311 struct pxa3xx_nand_info
*info
= host
->info_data
;
312 unsigned long nand_clk
= clk_get_rate(info
->clk
);
313 uint32_t ndtr0
, ndtr1
;
315 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
316 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
317 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
318 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
319 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
320 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
322 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
323 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
324 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
326 info
->ndtr0cs0
= ndtr0
;
327 info
->ndtr1cs0
= ndtr1
;
328 nand_writel(info
, NDTR0CS0
, ndtr0
);
329 nand_writel(info
, NDTR1CS0
, ndtr1
);
333 * Set the data and OOB size, depending on the selected
334 * spare and ECC configuration.
335 * Only applicable to READ0, READOOB and PAGEPROG commands.
337 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
)
339 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
341 info
->data_size
= info
->fifo_size
;
345 info
->oob_size
= info
->spare_size
;
347 info
->oob_size
+= info
->ecc_size
;
351 * NOTE: it is a must to set ND_RUN firstly, then write
352 * command buffer, otherwise, it does not work.
353 * We enable all the interrupt at the same time, and
354 * let pxa3xx_nand_irq to handle all logic.
356 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
360 ndcr
= info
->reg_ndcr
;
365 nand_writel(info
, NDECCCTRL
, 0x1);
367 ndcr
&= ~NDCR_ECC_EN
;
369 nand_writel(info
, NDECCCTRL
, 0x0);
375 ndcr
&= ~NDCR_DMA_EN
;
378 ndcr
|= NDCR_SPARE_EN
;
380 ndcr
&= ~NDCR_SPARE_EN
;
384 /* clear status bits and run */
385 nand_writel(info
, NDCR
, 0);
386 nand_writel(info
, NDSR
, NDSR_MASK
);
387 nand_writel(info
, NDCR
, ndcr
);
390 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
393 int timeout
= NAND_STOP_DELAY
;
395 /* wait RUN bit in NDCR become 0 */
396 ndcr
= nand_readl(info
, NDCR
);
397 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
398 ndcr
= nand_readl(info
, NDCR
);
403 ndcr
&= ~NDCR_ND_RUN
;
404 nand_writel(info
, NDCR
, ndcr
);
406 /* clear status bits */
407 nand_writel(info
, NDSR
, NDSR_MASK
);
410 static void __maybe_unused
411 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
415 ndcr
= nand_readl(info
, NDCR
);
416 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
419 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
423 ndcr
= nand_readl(info
, NDCR
);
424 nand_writel(info
, NDCR
, ndcr
| int_mask
);
427 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
429 switch (info
->state
) {
430 case STATE_PIO_WRITING
:
431 __raw_writesl(info
->mmio_base
+ NDDB
, info
->data_buff
,
432 DIV_ROUND_UP(info
->data_size
, 4));
433 if (info
->oob_size
> 0)
434 __raw_writesl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
435 DIV_ROUND_UP(info
->oob_size
, 4));
437 case STATE_PIO_READING
:
438 __raw_readsl(info
->mmio_base
+ NDDB
, info
->data_buff
,
439 DIV_ROUND_UP(info
->data_size
, 4));
440 if (info
->oob_size
> 0)
441 __raw_readsl(info
->mmio_base
+ NDDB
, info
->oob_buff
,
442 DIV_ROUND_UP(info
->oob_size
, 4));
445 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
452 static void start_data_dma(struct pxa3xx_nand_info
*info
)
454 struct pxa_dma_desc
*desc
= info
->data_desc
;
455 int dma_len
= ALIGN(info
->data_size
+ info
->oob_size
, 32);
457 desc
->ddadr
= DDADR_STOP
;
458 desc
->dcmd
= DCMD_ENDIRQEN
| DCMD_WIDTH4
| DCMD_BURST32
| dma_len
;
460 switch (info
->state
) {
461 case STATE_DMA_WRITING
:
462 desc
->dsadr
= info
->data_buff_phys
;
463 desc
->dtadr
= info
->mmio_phys
+ NDDB
;
464 desc
->dcmd
|= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
466 case STATE_DMA_READING
:
467 desc
->dtadr
= info
->data_buff_phys
;
468 desc
->dsadr
= info
->mmio_phys
+ NDDB
;
469 desc
->dcmd
|= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
472 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
477 DRCMR(info
->drcmr_dat
) = DRCMR_MAPVLD
| info
->data_dma_ch
;
478 DDADR(info
->data_dma_ch
) = info
->data_desc_addr
;
479 DCSR(info
->data_dma_ch
) |= DCSR_RUN
;
482 static void pxa3xx_nand_data_dma_irq(int channel
, void *data
)
484 struct pxa3xx_nand_info
*info
= data
;
487 dcsr
= DCSR(channel
);
488 DCSR(channel
) = dcsr
;
490 if (dcsr
& DCSR_BUSERR
) {
491 info
->retcode
= ERR_DMABUSERR
;
494 info
->state
= STATE_DMA_DONE
;
495 enable_int(info
, NDCR_INT_MASK
);
496 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
499 static void start_data_dma(struct pxa3xx_nand_info
*info
)
503 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
505 struct pxa3xx_nand_info
*info
= devid
;
506 unsigned int status
, is_completed
= 0, is_ready
= 0;
507 unsigned int ready
, cmd_done
;
510 ready
= NDSR_FLASH_RDY
;
511 cmd_done
= NDSR_CS0_CMDD
;
514 cmd_done
= NDSR_CS1_CMDD
;
517 status
= nand_readl(info
, NDSR
);
519 if (status
& NDSR_DBERR
)
520 info
->retcode
= ERR_DBERR
;
521 if (status
& NDSR_SBERR
)
522 info
->retcode
= ERR_SBERR
;
523 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
524 /* whether use dma to transfer data */
526 disable_int(info
, NDCR_INT_MASK
);
527 info
->state
= (status
& NDSR_RDDREQ
) ?
528 STATE_DMA_READING
: STATE_DMA_WRITING
;
529 start_data_dma(info
);
530 goto NORMAL_IRQ_EXIT
;
532 info
->state
= (status
& NDSR_RDDREQ
) ?
533 STATE_PIO_READING
: STATE_PIO_WRITING
;
534 handle_data_pio(info
);
537 if (status
& cmd_done
) {
538 info
->state
= STATE_CMD_DONE
;
541 if (status
& ready
) {
542 info
->state
= STATE_READY
;
546 if (status
& NDSR_WRCMDREQ
) {
547 nand_writel(info
, NDSR
, NDSR_WRCMDREQ
);
548 status
&= ~NDSR_WRCMDREQ
;
549 info
->state
= STATE_CMD_HANDLE
;
552 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
553 * must be loaded by writing directly either 12 or 16
554 * bytes directly to NDCB0, four bytes at a time.
556 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
557 * but each NDCBx register can be read.
559 nand_writel(info
, NDCB0
, info
->ndcb0
);
560 nand_writel(info
, NDCB0
, info
->ndcb1
);
561 nand_writel(info
, NDCB0
, info
->ndcb2
);
563 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
564 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
565 nand_writel(info
, NDCB0
, info
->ndcb3
);
568 /* clear NDSR to let the controller exit the IRQ */
569 nand_writel(info
, NDSR
, status
);
571 complete(&info
->cmd_complete
);
573 complete(&info
->dev_ready
);
578 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
580 for (; len
> 0; len
--)
586 static void set_command_address(struct pxa3xx_nand_info
*info
,
587 unsigned int page_size
, uint16_t column
, int page_addr
)
589 /* small page addr setting */
590 if (page_size
< PAGE_CHUNK_SIZE
) {
591 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
596 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
599 if (page_addr
& 0xFF0000)
600 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
606 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
608 /* reset data and oob column point to handle data */
614 info
->retcode
= ERR_NONE
;
619 case NAND_CMD_PAGEPROG
:
621 case NAND_CMD_READOOB
:
622 pxa3xx_set_datasize(info
);
634 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
635 uint16_t column
, int page_addr
)
637 int addr_cycle
, exec_cmd
;
638 struct pxa3xx_nand_host
*host
;
639 struct mtd_info
*mtd
;
641 host
= info
->host
[info
->cs
];
647 info
->ndcb0
= NDCB0_CSEL
;
651 if (command
== NAND_CMD_SEQIN
)
654 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
655 + host
->col_addr_cycles
);
658 case NAND_CMD_READOOB
:
660 info
->buf_start
= column
;
661 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
665 if (command
== NAND_CMD_READOOB
)
666 info
->buf_start
+= mtd
->writesize
;
668 /* Second command setting for large pages */
669 if (mtd
->writesize
>= PAGE_CHUNK_SIZE
)
670 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
672 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
673 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
674 memset(info
->data_buff
, 0xFF, info
->buf_count
);
679 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
680 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
681 memset(info
->data_buff
, 0xFF, info
->buf_count
);
685 case NAND_CMD_PAGEPROG
:
686 if (is_buf_blank(info
->data_buff
,
687 (mtd
->writesize
+ mtd
->oobsize
))) {
692 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
696 | (NAND_CMD_PAGEPROG
<< 8)
702 info
->buf_count
= 256;
703 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
707 info
->ndcb1
= (column
& 0xFF);
709 info
->data_size
= 256;
712 case NAND_CMD_READID
:
713 info
->buf_count
= host
->read_id_bytes
;
714 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
717 info
->ndcb1
= (column
& 0xFF);
721 case NAND_CMD_STATUS
:
723 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
730 case NAND_CMD_ERASE1
:
731 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
735 | (NAND_CMD_ERASE2
<< 8)
737 info
->ndcb1
= page_addr
;
742 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
747 case NAND_CMD_ERASE2
:
753 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
761 static void pxa3xx_nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
762 int column
, int page_addr
)
764 struct pxa3xx_nand_host
*host
= mtd
->priv
;
765 struct pxa3xx_nand_info
*info
= host
->info_data
;
769 * if this is a x16 device ,then convert the input
770 * "byte" address into a "word" address appropriate
771 * for indexing a word-oriented device
773 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
777 * There may be different NAND chip hooked to
778 * different chip select, so check whether
779 * chip select has been changed, if yes, reset the timing
781 if (info
->cs
!= host
->cs
) {
783 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
784 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
787 prepare_start_command(info
, command
);
789 info
->state
= STATE_PREPARED
;
790 exec_cmd
= prepare_set_command(info
, command
, column
, page_addr
);
792 init_completion(&info
->cmd_complete
);
793 init_completion(&info
->dev_ready
);
795 pxa3xx_nand_start(info
);
797 ret
= wait_for_completion_timeout(&info
->cmd_complete
,
800 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
801 /* Stop State Machine for next command cycle */
802 pxa3xx_nand_stop(info
);
805 info
->state
= STATE_IDLE
;
808 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
809 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
)
811 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
812 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
817 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
818 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
821 struct pxa3xx_nand_host
*host
= mtd
->priv
;
822 struct pxa3xx_nand_info
*info
= host
->info_data
;
823 int max_bitflips
= 0;
825 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
826 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
828 if (info
->retcode
== ERR_SBERR
) {
829 switch (info
->use_ecc
) {
832 mtd
->ecc_stats
.corrected
++;
838 } else if (info
->retcode
== ERR_DBERR
) {
840 * for blank page (all 0xff), HW will calculate its ECC as
841 * 0, which is different from the ECC information within
842 * OOB, ignore such double bit errors
844 if (is_buf_blank(buf
, mtd
->writesize
))
845 info
->retcode
= ERR_NONE
;
847 mtd
->ecc_stats
.failed
++;
853 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
855 struct pxa3xx_nand_host
*host
= mtd
->priv
;
856 struct pxa3xx_nand_info
*info
= host
->info_data
;
859 if (info
->buf_start
< info
->buf_count
)
860 /* Has just send a new command? */
861 retval
= info
->data_buff
[info
->buf_start
++];
866 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
868 struct pxa3xx_nand_host
*host
= mtd
->priv
;
869 struct pxa3xx_nand_info
*info
= host
->info_data
;
872 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
873 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
874 info
->buf_start
+= 2;
879 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
881 struct pxa3xx_nand_host
*host
= mtd
->priv
;
882 struct pxa3xx_nand_info
*info
= host
->info_data
;
883 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
885 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
886 info
->buf_start
+= real_len
;
889 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
890 const uint8_t *buf
, int len
)
892 struct pxa3xx_nand_host
*host
= mtd
->priv
;
893 struct pxa3xx_nand_info
*info
= host
->info_data
;
894 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
896 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
897 info
->buf_start
+= real_len
;
900 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
905 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
907 struct pxa3xx_nand_host
*host
= mtd
->priv
;
908 struct pxa3xx_nand_info
*info
= host
->info_data
;
911 if (info
->need_wait
) {
912 ret
= wait_for_completion_timeout(&info
->dev_ready
,
916 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
917 return NAND_STATUS_FAIL
;
921 /* pxa3xx_nand_send_command has waited for command complete */
922 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
923 if (info
->retcode
== ERR_NONE
)
926 return NAND_STATUS_FAIL
;
929 return NAND_STATUS_READY
;
932 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
933 const struct pxa3xx_nand_flash
*f
)
935 struct platform_device
*pdev
= info
->pdev
;
936 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
937 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
938 uint32_t ndcr
= 0x0; /* enable all interrupts */
940 if (f
->page_size
!= 2048 && f
->page_size
!= 512) {
941 dev_err(&pdev
->dev
, "Current only support 2048 and 512 size\n");
945 if (f
->flash_width
!= 16 && f
->flash_width
!= 8) {
946 dev_err(&pdev
->dev
, "Only support 8bit and 16 bit!\n");
950 /* calculate flash information */
951 host
->read_id_bytes
= (f
->page_size
== 2048) ? 4 : 2;
953 /* calculate addressing information */
954 host
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
956 if (f
->num_blocks
* f
->page_per_block
> 65536)
957 host
->row_addr_cycles
= 3;
959 host
->row_addr_cycles
= 2;
961 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
962 ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
963 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
964 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
965 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
966 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
968 ndcr
|= NDCR_RD_ID_CNT(host
->read_id_bytes
);
969 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
971 info
->reg_ndcr
= ndcr
;
973 pxa3xx_nand_set_timing(host
, f
->timing
);
977 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
980 * We set 0 by hard coding here, for we don't support keep_config
981 * when there is more than one chip attached to the controller
983 struct pxa3xx_nand_host
*host
= info
->host
[0];
984 uint32_t ndcr
= nand_readl(info
, NDCR
);
986 if (ndcr
& NDCR_PAGE_SZ
) {
987 /* Controller's FIFO size */
988 info
->fifo_size
= 2048;
989 host
->read_id_bytes
= 4;
991 info
->fifo_size
= 512;
992 host
->read_id_bytes
= 2;
995 info
->reg_ndcr
= ndcr
& ~NDCR_INT_MASK
;
996 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
997 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1002 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1004 struct platform_device
*pdev
= info
->pdev
;
1005 int data_desc_offset
= info
->buf_size
- sizeof(struct pxa_dma_desc
);
1008 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1009 if (info
->data_buff
== NULL
)
1014 info
->data_buff
= dma_alloc_coherent(&pdev
->dev
, info
->buf_size
,
1015 &info
->data_buff_phys
, GFP_KERNEL
);
1016 if (info
->data_buff
== NULL
) {
1017 dev_err(&pdev
->dev
, "failed to allocate dma buffer\n");
1021 info
->data_desc
= (void *)info
->data_buff
+ data_desc_offset
;
1022 info
->data_desc_addr
= info
->data_buff_phys
+ data_desc_offset
;
1024 info
->data_dma_ch
= pxa_request_dma("nand-data", DMA_PRIO_LOW
,
1025 pxa3xx_nand_data_dma_irq
, info
);
1026 if (info
->data_dma_ch
< 0) {
1027 dev_err(&pdev
->dev
, "failed to request data dma\n");
1028 dma_free_coherent(&pdev
->dev
, info
->buf_size
,
1029 info
->data_buff
, info
->data_buff_phys
);
1030 return info
->data_dma_ch
;
1034 * Now that DMA buffers are allocated we turn on
1035 * DMA proper for I/O operations.
1041 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1043 struct platform_device
*pdev
= info
->pdev
;
1044 if (info
->use_dma
) {
1045 pxa_free_dma(info
->data_dma_ch
);
1046 dma_free_coherent(&pdev
->dev
, info
->buf_size
,
1047 info
->data_buff
, info
->data_buff_phys
);
1049 kfree(info
->data_buff
);
1053 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1055 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1056 if (info
->data_buff
== NULL
)
1061 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1063 kfree(info
->data_buff
);
1067 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
1069 struct mtd_info
*mtd
;
1070 struct nand_chip
*chip
;
1073 mtd
= info
->host
[info
->cs
]->mtd
;
1076 /* use the common timing to make a try */
1077 ret
= pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
1081 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
1082 ret
= chip
->waitfunc(mtd
, chip
);
1083 if (ret
& NAND_STATUS_FAIL
)
1089 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1090 struct nand_ecc_ctrl
*ecc
,
1091 int strength
, int page_size
)
1094 * We don't use strength here as the PXA variant
1095 * is used with non-ONFI compliant devices.
1097 if (page_size
== 2048) {
1098 info
->spare_size
= 40;
1099 info
->ecc_size
= 24;
1100 ecc
->mode
= NAND_ECC_HW
;
1105 } else if (page_size
== 512) {
1106 info
->spare_size
= 8;
1108 ecc
->mode
= NAND_ECC_HW
;
1116 static int armada370_ecc_init(struct pxa3xx_nand_info
*info
,
1117 struct nand_ecc_ctrl
*ecc
,
1118 int strength
, int page_size
)
1120 /* Unimplemented yet */
1124 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1126 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1127 struct pxa3xx_nand_info
*info
= host
->info_data
;
1128 struct platform_device
*pdev
= info
->pdev
;
1129 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1130 struct nand_flash_dev pxa3xx_flash_ids
[2], *def
= NULL
;
1131 const struct pxa3xx_nand_flash
*f
= NULL
;
1132 struct nand_chip
*chip
= mtd
->priv
;
1137 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
1140 ret
= pxa3xx_nand_sensing(info
);
1142 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
1148 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
1149 id
= *((uint16_t *)(info
->data_buff
));
1151 dev_info(&info
->pdev
->dev
, "Detect a flash id %x\n", id
);
1153 dev_warn(&info
->pdev
->dev
,
1154 "Read out ID 0, potential timing set wrong!!\n");
1159 num
= ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1;
1160 for (i
= 0; i
< num
; i
++) {
1161 if (i
< pdata
->num_flash
)
1162 f
= pdata
->flash
+ i
;
1164 f
= &builtin_flash_types
[i
- pdata
->num_flash
+ 1];
1166 /* find the chip in default list */
1167 if (f
->chip_id
== id
)
1171 if (i
>= (ARRAY_SIZE(builtin_flash_types
) + pdata
->num_flash
- 1)) {
1172 dev_err(&info
->pdev
->dev
, "ERROR!! flash not defined!!!\n");
1177 ret
= pxa3xx_nand_config_flash(info
, f
);
1179 dev_err(&info
->pdev
->dev
, "ERROR! Configure failed\n");
1183 pxa3xx_flash_ids
[0].name
= f
->name
;
1184 pxa3xx_flash_ids
[0].dev_id
= (f
->chip_id
>> 8) & 0xffff;
1185 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
1186 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
1187 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
1188 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
1189 if (f
->flash_width
== 16)
1190 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
1191 pxa3xx_flash_ids
[1].name
= NULL
;
1192 def
= pxa3xx_flash_ids
;
1194 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1195 chip
->options
|= NAND_BUSWIDTH_16
;
1197 /* Device detection must be done with ECC disabled */
1198 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1199 nand_writel(info
, NDECCCTRL
, 0x0);
1201 if (nand_scan_ident(mtd
, 1, def
))
1204 if (pdata
->flash_bbt
) {
1206 * We'll use a bad block table stored in-flash and don't
1207 * allow writing the bad block marker to the flash.
1209 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1210 NAND_BBT_NO_OOB_BBM
;
1211 chip
->bbt_td
= &bbt_main_descr
;
1212 chip
->bbt_md
= &bbt_mirror_descr
;
1215 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1216 ret
= armada370_ecc_init(info
, &chip
->ecc
,
1217 chip
->ecc_strength_ds
,
1220 ret
= pxa_ecc_init(info
, &chip
->ecc
,
1221 chip
->ecc_strength_ds
,
1224 dev_err(&info
->pdev
->dev
,
1225 "ECC strength %d at page size %d is not supported\n",
1226 chip
->ecc_strength_ds
, mtd
->writesize
);
1230 /* calculate addressing information */
1231 if (mtd
->writesize
>= 2048)
1232 host
->col_addr_cycles
= 2;
1234 host
->col_addr_cycles
= 1;
1236 /* release the initial buffer */
1237 kfree(info
->data_buff
);
1239 /* allocate the real data + oob buffer */
1240 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1241 ret
= pxa3xx_nand_init_buff(info
);
1244 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1246 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1247 host
->row_addr_cycles
= 3;
1249 host
->row_addr_cycles
= 2;
1250 return nand_scan_tail(mtd
);
1253 static int alloc_nand_resource(struct platform_device
*pdev
)
1255 struct pxa3xx_nand_platform_data
*pdata
;
1256 struct pxa3xx_nand_info
*info
;
1257 struct pxa3xx_nand_host
*host
;
1258 struct nand_chip
*chip
= NULL
;
1259 struct mtd_info
*mtd
;
1263 pdata
= dev_get_platdata(&pdev
->dev
);
1264 info
= devm_kzalloc(&pdev
->dev
, sizeof(*info
) + (sizeof(*mtd
) +
1265 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1270 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1271 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1272 mtd
= (struct mtd_info
*)((unsigned int)&info
[1] +
1273 (sizeof(*mtd
) + sizeof(*host
)) * cs
);
1274 chip
= (struct nand_chip
*)(&mtd
[1]);
1275 host
= (struct pxa3xx_nand_host
*)chip
;
1276 info
->host
[cs
] = host
;
1279 host
->info_data
= info
;
1281 mtd
->owner
= THIS_MODULE
;
1283 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1284 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1285 chip
->controller
= &info
->controller
;
1286 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1287 chip
->select_chip
= pxa3xx_nand_select_chip
;
1288 chip
->cmdfunc
= pxa3xx_nand_cmdfunc
;
1289 chip
->read_word
= pxa3xx_nand_read_word
;
1290 chip
->read_byte
= pxa3xx_nand_read_byte
;
1291 chip
->read_buf
= pxa3xx_nand_read_buf
;
1292 chip
->write_buf
= pxa3xx_nand_write_buf
;
1293 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1296 spin_lock_init(&chip
->controller
->lock
);
1297 init_waitqueue_head(&chip
->controller
->wq
);
1298 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1299 if (IS_ERR(info
->clk
)) {
1300 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1301 return PTR_ERR(info
->clk
);
1303 ret
= clk_prepare_enable(info
->clk
);
1309 * This is a dirty hack to make this driver work from
1310 * devicetree bindings. It can be removed once we have
1311 * a prober DMA controller framework for DT.
1313 if (pdev
->dev
.of_node
&&
1314 of_machine_is_compatible("marvell,pxa3xx")) {
1315 info
->drcmr_dat
= 97;
1316 info
->drcmr_cmd
= 99;
1318 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1321 "no resource defined for data DMA\n");
1323 goto fail_disable_clk
;
1325 info
->drcmr_dat
= r
->start
;
1327 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1330 "no resource defined for cmd DMA\n");
1332 goto fail_disable_clk
;
1334 info
->drcmr_cmd
= r
->start
;
1338 irq
= platform_get_irq(pdev
, 0);
1340 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1342 goto fail_disable_clk
;
1345 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1346 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1347 if (IS_ERR(info
->mmio_base
)) {
1348 ret
= PTR_ERR(info
->mmio_base
);
1349 goto fail_disable_clk
;
1351 info
->mmio_phys
= r
->start
;
1353 /* Allocate a buffer to allow flash detection */
1354 info
->buf_size
= INIT_BUFFER_SIZE
;
1355 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1356 if (info
->data_buff
== NULL
) {
1358 goto fail_disable_clk
;
1361 /* initialize all interrupts to be disabled */
1362 disable_int(info
, NDSR_MASK
);
1364 ret
= request_irq(irq
, pxa3xx_nand_irq
, 0, pdev
->name
, info
);
1366 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1370 platform_set_drvdata(pdev
, info
);
1375 free_irq(irq
, info
);
1376 kfree(info
->data_buff
);
1378 clk_disable_unprepare(info
->clk
);
1382 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1384 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1385 struct pxa3xx_nand_platform_data
*pdata
;
1391 pdata
= dev_get_platdata(&pdev
->dev
);
1393 irq
= platform_get_irq(pdev
, 0);
1395 free_irq(irq
, info
);
1396 pxa3xx_nand_free_buff(info
);
1398 clk_disable_unprepare(info
->clk
);
1400 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1401 nand_release(info
->host
[cs
]->mtd
);
1405 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1407 struct pxa3xx_nand_platform_data
*pdata
;
1408 struct device_node
*np
= pdev
->dev
.of_node
;
1409 const struct of_device_id
*of_id
=
1410 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1415 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1419 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1420 pdata
->enable_arbiter
= 1;
1421 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1422 pdata
->keep_config
= 1;
1423 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1424 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1426 pdev
->dev
.platform_data
= pdata
;
1431 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1433 struct pxa3xx_nand_platform_data
*pdata
;
1434 struct mtd_part_parser_data ppdata
= {};
1435 struct pxa3xx_nand_info
*info
;
1436 int ret
, cs
, probe_success
;
1438 #ifndef ARCH_HAS_DMA
1441 dev_warn(&pdev
->dev
,
1442 "This platform can't do DMA on this device\n");
1445 ret
= pxa3xx_nand_probe_dt(pdev
);
1449 pdata
= dev_get_platdata(&pdev
->dev
);
1451 dev_err(&pdev
->dev
, "no platform data defined\n");
1455 ret
= alloc_nand_resource(pdev
);
1457 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1461 info
= platform_get_drvdata(pdev
);
1463 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1464 struct mtd_info
*mtd
= info
->host
[cs
]->mtd
;
1467 * The mtd name matches the one used in 'mtdparts' kernel
1468 * parameter. This name cannot be changed or otherwise
1469 * user's mtd partitions configuration would get broken.
1471 mtd
->name
= "pxa3xx_nand-0";
1473 ret
= pxa3xx_nand_scan(mtd
);
1475 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1480 ppdata
.of_node
= pdev
->dev
.of_node
;
1481 ret
= mtd_device_parse_register(mtd
, NULL
,
1482 &ppdata
, pdata
->parts
[cs
],
1483 pdata
->nr_parts
[cs
]);
1488 if (!probe_success
) {
1489 pxa3xx_nand_remove(pdev
);
1497 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1499 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1500 struct pxa3xx_nand_platform_data
*pdata
;
1501 struct mtd_info
*mtd
;
1504 pdata
= dev_get_platdata(&pdev
->dev
);
1506 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1510 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1511 mtd
= info
->host
[cs
]->mtd
;
1518 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1520 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1521 struct pxa3xx_nand_platform_data
*pdata
;
1522 struct mtd_info
*mtd
;
1525 pdata
= dev_get_platdata(&pdev
->dev
);
1526 /* We don't want to handle interrupt without calling mtd routine */
1527 disable_int(info
, NDCR_INT_MASK
);
1530 * Directly set the chip select to a invalid value,
1531 * then the driver would reset the timing according
1532 * to current chip select at the beginning of cmdfunc
1537 * As the spec says, the NDSR would be updated to 0x1800 when
1538 * doing the nand_clk disable/enable.
1539 * To prevent it damaging state machine of the driver, clear
1540 * all status before resume
1542 nand_writel(info
, NDSR
, NDSR_MASK
);
1543 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1544 mtd
= info
->host
[cs
]->mtd
;
1551 #define pxa3xx_nand_suspend NULL
1552 #define pxa3xx_nand_resume NULL
1555 static struct platform_driver pxa3xx_nand_driver
= {
1557 .name
= "pxa3xx-nand",
1558 .of_match_table
= pxa3xx_nand_dt_ids
,
1560 .probe
= pxa3xx_nand_probe
,
1561 .remove
= pxa3xx_nand_remove
,
1562 .suspend
= pxa3xx_nand_suspend
,
1563 .resume
= pxa3xx_nand_resume
,
1566 module_platform_driver(pxa3xx_nand_driver
);
1568 MODULE_LICENSE("GPL");
1569 MODULE_DESCRIPTION("PXA3xx NAND controller driver");