2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE (2048)
45 * Define a buffer size for the initial command that detects the flash device:
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
52 #define INIT_BUFFER_SIZE 2048
54 /* registers and bit definitions */
55 #define NDCR (0x00) /* Control register */
56 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR (0x14) /* Status Register */
59 #define NDPCR (0x18) /* Page Count Register */
60 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1 (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL (0x28) /* ECC control */
63 #define NDDB (0x40) /* Data Buffer */
64 #define NDCB0 (0x48) /* Command Buffer0 */
65 #define NDCB1 (0x4C) /* Command Buffer1 */
66 #define NDCB2 (0x50) /* Command Buffer2 */
68 #define NDCR_SPARE_EN (0x1 << 31)
69 #define NDCR_ECC_EN (0x1 << 30)
70 #define NDCR_DMA_EN (0x1 << 29)
71 #define NDCR_ND_RUN (0x1 << 28)
72 #define NDCR_DWIDTH_C (0x1 << 27)
73 #define NDCR_DWIDTH_M (0x1 << 26)
74 #define NDCR_PAGE_SZ (0x1 << 24)
75 #define NDCR_NCSX (0x1 << 23)
76 #define NDCR_ND_MODE (0x3 << 21)
77 #define NDCR_NAND_MODE (0x0)
78 #define NDCR_CLR_PG_CNT (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84 #define NDCR_RA_START (0x1 << 15)
85 #define NDCR_PG_PER_BLK (0x1 << 14)
86 #define NDCR_ND_ARB_EN (0x1 << 12)
87 #define NDCR_INT_MASK (0xFFF)
89 #define NDSR_MASK (0xfff)
90 #define NDSR_ERR_CNT_OFF (16)
91 #define NDSR_ERR_CNT_MASK (0x1f)
92 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY (0x1 << 12)
94 #define NDSR_FLASH_RDY (0x1 << 11)
95 #define NDSR_CS0_PAGED (0x1 << 10)
96 #define NDSR_CS1_PAGED (0x1 << 9)
97 #define NDSR_CS0_CMDD (0x1 << 8)
98 #define NDSR_CS1_CMDD (0x1 << 7)
99 #define NDSR_CS0_BBD (0x1 << 6)
100 #define NDSR_CS1_BBD (0x1 << 5)
101 #define NDSR_UNCORERR (0x1 << 4)
102 #define NDSR_CORERR (0x1 << 3)
103 #define NDSR_WRDREQ (0x1 << 2)
104 #define NDSR_RDDREQ (0x1 << 1)
105 #define NDSR_WRCMDREQ (0x1)
107 #define NDCB0_LEN_OVRD (0x1 << 28)
108 #define NDCB0_ST_ROW_EN (0x1 << 26)
109 #define NDCB0_AUTO_RS (0x1 << 25)
110 #define NDCB0_CSEL (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC (0x1 << 20)
116 #define NDCB0_DBC (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK (0xff << 8)
120 #define NDCB0_CMD1_MASK (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT (16)
123 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ 4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
136 #define READ_ID_BYTES 7
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val) \
140 writel_relaxed((val), (info)->mmio_base + (off))
142 #define nand_readl(info, off) \
143 readl_relaxed((info)->mmio_base + (off))
145 /* error code and state */
168 enum pxa3xx_nand_variant
{
169 PXA3XX_NAND_VARIANT_PXA
,
170 PXA3XX_NAND_VARIANT_ARMADA370
,
173 struct pxa3xx_nand_host
{
174 struct nand_chip chip
;
175 struct mtd_info
*mtd
;
178 /* page size of attached chip */
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles
;
184 unsigned int row_addr_cycles
;
187 struct pxa3xx_nand_info
{
188 struct nand_hw_control controller
;
189 struct platform_device
*pdev
;
192 void __iomem
*mmio_base
;
193 unsigned long mmio_phys
;
194 struct completion cmd_complete
, dev_ready
;
196 unsigned int buf_start
;
197 unsigned int buf_count
;
198 unsigned int buf_size
;
199 unsigned int data_buff_pos
;
200 unsigned int oob_buff_pos
;
202 /* DMA information */
203 struct scatterlist sg
;
204 enum dma_data_direction dma_dir
;
205 struct dma_chan
*dma_chan
;
206 dma_cookie_t dma_cookie
;
210 unsigned char *data_buff
;
211 unsigned char *oob_buff
;
212 dma_addr_t data_buff_phys
;
215 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
222 enum pxa3xx_nand_variant variant
;
225 int use_ecc
; /* use HW ECC ? */
226 int ecc_bch
; /* using BCH ECC? */
227 int use_dma
; /* use DMA ? */
228 int use_spare
; /* use spare ? */
231 unsigned int data_size
; /* data to be read from FIFO */
232 unsigned int chunk_size
; /* split commands chunk size */
233 unsigned int oob_size
;
234 unsigned int spare_size
;
235 unsigned int ecc_size
;
236 unsigned int ecc_err_cnt
;
237 unsigned int max_bitflips
;
240 /* cached register value */
245 /* generated NDCBx register values */
252 static bool use_dma
= 1;
253 module_param(use_dma
, bool, 0444);
254 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
256 struct pxa3xx_nand_timing
{
257 unsigned int tCH
; /* Enable signal hold time */
258 unsigned int tCS
; /* Enable signal setup time */
259 unsigned int tWH
; /* ND_nWE high duration */
260 unsigned int tWP
; /* ND_nWE pulse time */
261 unsigned int tRH
; /* ND_nRE high duration */
262 unsigned int tRP
; /* ND_nRE pulse width */
263 unsigned int tR
; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR
; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR
; /* ND_ALE low to ND_nRE low delay */
268 struct pxa3xx_nand_flash
{
271 unsigned int page_per_block
; /* Pages per block (PG_PER_BLK) */
272 unsigned int page_size
; /* Page size in bytes (PAGE_SZ) */
273 unsigned int flash_width
; /* Width of Flash memory (DWIDTH_M) */
274 unsigned int dfc_width
; /* Width of flash controller(DWIDTH_C) */
275 unsigned int num_blocks
; /* Number of physical blocks in Flash */
277 struct pxa3xx_nand_timing
*timing
; /* NAND Flash timing */
280 static struct pxa3xx_nand_timing timing
[] = {
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
287 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
288 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
289 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
290 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
291 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
292 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
293 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
294 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
295 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
296 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
299 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
302 static struct nand_bbt_descr bbt_main_descr
= {
303 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
304 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
308 .maxblocks
= 8, /* Last 8 blocks in each chip */
309 .pattern
= bbt_pattern
312 static struct nand_bbt_descr bbt_mirror_descr
= {
313 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
314 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
318 .maxblocks
= 8, /* Last 8 blocks in each chip */
319 .pattern
= bbt_mirror_pattern
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree
= { {2, 30} }
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree
= { {6, 26}, { 64, 32} }
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
357 /* Define a default flash type setting serve as flash detecting only */
358 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
360 #define NDTR0_tCH(c) (min((c), 7) << 19)
361 #define NDTR0_tCS(c) (min((c), 7) << 16)
362 #define NDTR0_tWH(c) (min((c), 7) << 11)
363 #define NDTR0_tWP(c) (min((c), 7) << 8)
364 #define NDTR0_tRH(c) (min((c), 7) << 3)
365 #define NDTR0_tRP(c) (min((c), 7) << 0)
367 #define NDTR1_tR(c) (min((c), 65535) << 16)
368 #define NDTR1_tWHR(c) (min((c), 15) << 4)
369 #define NDTR1_tAR(c) (min((c), 15) << 0)
371 /* convert nano-seconds to nand flash controller clock cycles */
372 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
374 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
376 .compatible
= "marvell,pxa3xx-nand",
377 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
380 .compatible
= "marvell,armada370-nand",
381 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
385 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
387 static enum pxa3xx_nand_variant
388 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
390 const struct of_device_id
*of_id
=
391 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
393 return PXA3XX_NAND_VARIANT_PXA
;
394 return (enum pxa3xx_nand_variant
)of_id
->data
;
397 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
398 const struct pxa3xx_nand_timing
*t
)
400 struct pxa3xx_nand_info
*info
= host
->info_data
;
401 unsigned long nand_clk
= clk_get_rate(info
->clk
);
402 uint32_t ndtr0
, ndtr1
;
404 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
405 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
406 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
407 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
408 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
409 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
411 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
412 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
413 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
415 info
->ndtr0cs0
= ndtr0
;
416 info
->ndtr1cs0
= ndtr1
;
417 nand_writel(info
, NDTR0CS0
, ndtr0
);
418 nand_writel(info
, NDTR1CS0
, ndtr1
);
422 * Set the data and OOB size, depending on the selected
423 * spare and ECC configuration.
424 * Only applicable to READ0, READOOB and PAGEPROG commands.
426 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
,
427 struct mtd_info
*mtd
)
429 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
431 info
->data_size
= mtd
->writesize
;
435 info
->oob_size
= info
->spare_size
;
437 info
->oob_size
+= info
->ecc_size
;
441 * NOTE: it is a must to set ND_RUN firstly, then write
442 * command buffer, otherwise, it does not work.
443 * We enable all the interrupt at the same time, and
444 * let pxa3xx_nand_irq to handle all logic.
446 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
450 ndcr
= info
->reg_ndcr
;
455 nand_writel(info
, NDECCCTRL
, 0x1);
457 ndcr
&= ~NDCR_ECC_EN
;
459 nand_writel(info
, NDECCCTRL
, 0x0);
465 ndcr
&= ~NDCR_DMA_EN
;
468 ndcr
|= NDCR_SPARE_EN
;
470 ndcr
&= ~NDCR_SPARE_EN
;
474 /* clear status bits and run */
475 nand_writel(info
, NDSR
, NDSR_MASK
);
476 nand_writel(info
, NDCR
, 0);
477 nand_writel(info
, NDCR
, ndcr
);
480 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
483 int timeout
= NAND_STOP_DELAY
;
485 /* wait RUN bit in NDCR become 0 */
486 ndcr
= nand_readl(info
, NDCR
);
487 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
488 ndcr
= nand_readl(info
, NDCR
);
493 ndcr
&= ~NDCR_ND_RUN
;
494 nand_writel(info
, NDCR
, ndcr
);
497 dmaengine_terminate_all(info
->dma_chan
);
499 /* clear status bits */
500 nand_writel(info
, NDSR
, NDSR_MASK
);
503 static void __maybe_unused
504 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
508 ndcr
= nand_readl(info
, NDCR
);
509 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
512 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
516 ndcr
= nand_readl(info
, NDCR
);
517 nand_writel(info
, NDCR
, ndcr
| int_mask
);
520 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
527 * According to the datasheet, when reading from NDDB
528 * with BCH enabled, after each 32 bytes reads, we
529 * have to make sure that the NDSR.RDDREQ bit is set.
531 * Drain the FIFO 8 32 bits reads at a time, and skip
532 * the polling on the last read.
535 readsl(info
->mmio_base
+ NDDB
, data
, 8);
537 ret
= readl_relaxed_poll_timeout(info
->mmio_base
+ NDSR
, val
,
538 val
& NDSR_RDDREQ
, 1000, 5000);
540 dev_err(&info
->pdev
->dev
,
541 "Timeout on RDDREQ while draining the FIFO\n");
550 readsl(info
->mmio_base
+ NDDB
, data
, len
);
553 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
555 unsigned int do_bytes
= min(info
->data_size
, info
->chunk_size
);
557 switch (info
->state
) {
558 case STATE_PIO_WRITING
:
559 writesl(info
->mmio_base
+ NDDB
,
560 info
->data_buff
+ info
->data_buff_pos
,
561 DIV_ROUND_UP(do_bytes
, 4));
563 if (info
->oob_size
> 0)
564 writesl(info
->mmio_base
+ NDDB
,
565 info
->oob_buff
+ info
->oob_buff_pos
,
566 DIV_ROUND_UP(info
->oob_size
, 4));
568 case STATE_PIO_READING
:
570 info
->data_buff
+ info
->data_buff_pos
,
571 DIV_ROUND_UP(do_bytes
, 4));
573 if (info
->oob_size
> 0)
575 info
->oob_buff
+ info
->oob_buff_pos
,
576 DIV_ROUND_UP(info
->oob_size
, 4));
579 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
584 /* Update buffer pointers for multi-page read/write */
585 info
->data_buff_pos
+= do_bytes
;
586 info
->oob_buff_pos
+= info
->oob_size
;
587 info
->data_size
-= do_bytes
;
590 static void pxa3xx_nand_data_dma_irq(void *data
)
592 struct pxa3xx_nand_info
*info
= data
;
593 struct dma_tx_state state
;
594 enum dma_status status
;
596 status
= dmaengine_tx_status(info
->dma_chan
, info
->dma_cookie
, &state
);
597 if (likely(status
== DMA_COMPLETE
)) {
598 info
->state
= STATE_DMA_DONE
;
600 dev_err(&info
->pdev
->dev
, "DMA error on data channel\n");
601 info
->retcode
= ERR_DMABUSERR
;
603 dma_unmap_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
605 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
606 enable_int(info
, NDCR_INT_MASK
);
609 static void start_data_dma(struct pxa3xx_nand_info
*info
)
611 enum dma_transfer_direction direction
;
612 struct dma_async_tx_descriptor
*tx
;
614 switch (info
->state
) {
615 case STATE_DMA_WRITING
:
616 info
->dma_dir
= DMA_TO_DEVICE
;
617 direction
= DMA_MEM_TO_DEV
;
619 case STATE_DMA_READING
:
620 info
->dma_dir
= DMA_FROM_DEVICE
;
621 direction
= DMA_DEV_TO_MEM
;
624 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
628 info
->sg
.length
= info
->data_size
+
629 (info
->oob_size
? info
->spare_size
+ info
->ecc_size
: 0);
630 dma_map_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
632 tx
= dmaengine_prep_slave_sg(info
->dma_chan
, &info
->sg
, 1, direction
,
635 dev_err(&info
->pdev
->dev
, "prep_slave_sg() failed\n");
638 tx
->callback
= pxa3xx_nand_data_dma_irq
;
639 tx
->callback_param
= info
;
640 info
->dma_cookie
= dmaengine_submit(tx
);
641 dma_async_issue_pending(info
->dma_chan
);
642 dev_dbg(&info
->pdev
->dev
, "%s(dir=%d cookie=%x size=%u)\n",
643 __func__
, direction
, info
->dma_cookie
, info
->sg
.length
);
646 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
648 struct pxa3xx_nand_info
*info
= data
;
650 handle_data_pio(info
);
652 info
->state
= STATE_CMD_DONE
;
653 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
658 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
660 struct pxa3xx_nand_info
*info
= devid
;
661 unsigned int status
, is_completed
= 0, is_ready
= 0;
662 unsigned int ready
, cmd_done
;
663 irqreturn_t ret
= IRQ_HANDLED
;
666 ready
= NDSR_FLASH_RDY
;
667 cmd_done
= NDSR_CS0_CMDD
;
670 cmd_done
= NDSR_CS1_CMDD
;
673 status
= nand_readl(info
, NDSR
);
675 if (status
& NDSR_UNCORERR
)
676 info
->retcode
= ERR_UNCORERR
;
677 if (status
& NDSR_CORERR
) {
678 info
->retcode
= ERR_CORERR
;
679 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
681 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
683 info
->ecc_err_cnt
= 1;
686 * Each chunk composing a page is corrected independently,
687 * and we need to store maximum number of corrected bitflips
688 * to return it to the MTD layer in ecc.read_page().
690 info
->max_bitflips
= max_t(unsigned int,
694 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
695 /* whether use dma to transfer data */
697 disable_int(info
, NDCR_INT_MASK
);
698 info
->state
= (status
& NDSR_RDDREQ
) ?
699 STATE_DMA_READING
: STATE_DMA_WRITING
;
700 start_data_dma(info
);
701 goto NORMAL_IRQ_EXIT
;
703 info
->state
= (status
& NDSR_RDDREQ
) ?
704 STATE_PIO_READING
: STATE_PIO_WRITING
;
705 ret
= IRQ_WAKE_THREAD
;
706 goto NORMAL_IRQ_EXIT
;
709 if (status
& cmd_done
) {
710 info
->state
= STATE_CMD_DONE
;
713 if (status
& ready
) {
714 info
->state
= STATE_READY
;
719 * Clear all status bit before issuing the next command, which
720 * can and will alter the status bits and will deserve a new
721 * interrupt on its own. This lets the controller exit the IRQ
723 nand_writel(info
, NDSR
, status
);
725 if (status
& NDSR_WRCMDREQ
) {
726 status
&= ~NDSR_WRCMDREQ
;
727 info
->state
= STATE_CMD_HANDLE
;
730 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
731 * must be loaded by writing directly either 12 or 16
732 * bytes directly to NDCB0, four bytes at a time.
734 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
735 * but each NDCBx register can be read.
737 nand_writel(info
, NDCB0
, info
->ndcb0
);
738 nand_writel(info
, NDCB0
, info
->ndcb1
);
739 nand_writel(info
, NDCB0
, info
->ndcb2
);
741 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
742 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
743 nand_writel(info
, NDCB0
, info
->ndcb3
);
747 complete(&info
->cmd_complete
);
749 complete(&info
->dev_ready
);
754 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
756 for (; len
> 0; len
--)
762 static void set_command_address(struct pxa3xx_nand_info
*info
,
763 unsigned int page_size
, uint16_t column
, int page_addr
)
765 /* small page addr setting */
766 if (page_size
< PAGE_CHUNK_SIZE
) {
767 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
772 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
775 if (page_addr
& 0xFF0000)
776 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
782 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
784 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
785 struct mtd_info
*mtd
= host
->mtd
;
787 /* reset data and oob column point to handle data */
791 info
->data_buff_pos
= 0;
792 info
->oob_buff_pos
= 0;
795 info
->retcode
= ERR_NONE
;
796 info
->ecc_err_cnt
= 0;
802 case NAND_CMD_PAGEPROG
:
804 case NAND_CMD_READOOB
:
805 pxa3xx_set_datasize(info
, mtd
);
817 * If we are about to issue a read command, or about to set
818 * the write address, then clean the data buffer.
820 if (command
== NAND_CMD_READ0
||
821 command
== NAND_CMD_READOOB
||
822 command
== NAND_CMD_SEQIN
) {
824 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
825 memset(info
->data_buff
, 0xFF, info
->buf_count
);
830 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
831 int ext_cmd_type
, uint16_t column
, int page_addr
)
833 int addr_cycle
, exec_cmd
;
834 struct pxa3xx_nand_host
*host
;
835 struct mtd_info
*mtd
;
837 host
= info
->host
[info
->cs
];
843 info
->ndcb0
= NDCB0_CSEL
;
847 if (command
== NAND_CMD_SEQIN
)
850 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
851 + host
->col_addr_cycles
);
854 case NAND_CMD_READOOB
:
856 info
->buf_start
= column
;
857 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
861 if (command
== NAND_CMD_READOOB
)
862 info
->buf_start
+= mtd
->writesize
;
865 * Multiple page read needs an 'extended command type' field,
866 * which is either naked-read or last-read according to the
869 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
870 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
871 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
872 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
874 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
875 info
->ndcb3
= info
->chunk_size
+
879 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
884 info
->buf_start
= column
;
885 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
888 * Multiple page programming needs to execute the initial
889 * SEQIN command that sets the page address.
891 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
892 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
893 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
896 /* No data transfer in this case */
902 case NAND_CMD_PAGEPROG
:
903 if (is_buf_blank(info
->data_buff
,
904 (mtd
->writesize
+ mtd
->oobsize
))) {
909 /* Second command setting for large pages */
910 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
912 * Multiple page write uses the 'extended command'
913 * field. This can be used to issue a command dispatch
914 * or a naked-write depending on the current stage.
916 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
918 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
919 info
->ndcb3
= info
->chunk_size
+
923 * This is the command dispatch that completes a chunked
924 * page program operation.
926 if (info
->data_size
== 0) {
927 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
928 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
935 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
939 | (NAND_CMD_PAGEPROG
<< 8)
946 info
->buf_count
= INIT_BUFFER_SIZE
;
947 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
951 info
->ndcb1
= (column
& 0xFF);
952 info
->ndcb3
= INIT_BUFFER_SIZE
;
953 info
->data_size
= INIT_BUFFER_SIZE
;
956 case NAND_CMD_READID
:
957 info
->buf_count
= READ_ID_BYTES
;
958 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
961 info
->ndcb1
= (column
& 0xFF);
965 case NAND_CMD_STATUS
:
967 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
974 case NAND_CMD_ERASE1
:
975 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
979 | (NAND_CMD_ERASE2
<< 8)
981 info
->ndcb1
= page_addr
;
986 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
991 case NAND_CMD_ERASE2
:
997 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
1005 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1006 int column
, int page_addr
)
1008 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1009 struct pxa3xx_nand_info
*info
= host
->info_data
;
1013 * if this is a x16 device ,then convert the input
1014 * "byte" address into a "word" address appropriate
1015 * for indexing a word-oriented device
1017 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1021 * There may be different NAND chip hooked to
1022 * different chip select, so check whether
1023 * chip select has been changed, if yes, reset the timing
1025 if (info
->cs
!= host
->cs
) {
1026 info
->cs
= host
->cs
;
1027 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1028 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1031 prepare_start_command(info
, command
);
1033 info
->state
= STATE_PREPARED
;
1034 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
1037 init_completion(&info
->cmd_complete
);
1038 init_completion(&info
->dev_ready
);
1039 info
->need_wait
= 1;
1040 pxa3xx_nand_start(info
);
1042 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1043 CHIP_DELAY_TIMEOUT
)) {
1044 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1045 /* Stop State Machine for next command cycle */
1046 pxa3xx_nand_stop(info
);
1049 info
->state
= STATE_IDLE
;
1052 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1053 const unsigned command
,
1054 int column
, int page_addr
)
1056 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1057 struct pxa3xx_nand_info
*info
= host
->info_data
;
1058 int exec_cmd
, ext_cmd_type
;
1061 * if this is a x16 device then convert the input
1062 * "byte" address into a "word" address appropriate
1063 * for indexing a word-oriented device
1065 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1069 * There may be different NAND chip hooked to
1070 * different chip select, so check whether
1071 * chip select has been changed, if yes, reset the timing
1073 if (info
->cs
!= host
->cs
) {
1074 info
->cs
= host
->cs
;
1075 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1076 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1079 /* Select the extended command for the first command */
1081 case NAND_CMD_READ0
:
1082 case NAND_CMD_READOOB
:
1083 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1085 case NAND_CMD_SEQIN
:
1086 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1088 case NAND_CMD_PAGEPROG
:
1089 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1096 prepare_start_command(info
, command
);
1099 * Prepare the "is ready" completion before starting a command
1100 * transaction sequence. If the command is not executed the
1101 * completion will be completed, see below.
1103 * We can do that inside the loop because the command variable
1104 * is invariant and thus so is the exec_cmd.
1106 info
->need_wait
= 1;
1107 init_completion(&info
->dev_ready
);
1109 info
->state
= STATE_PREPARED
;
1110 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1113 info
->need_wait
= 0;
1114 complete(&info
->dev_ready
);
1118 init_completion(&info
->cmd_complete
);
1119 pxa3xx_nand_start(info
);
1121 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1122 CHIP_DELAY_TIMEOUT
)) {
1123 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1124 /* Stop State Machine for next command cycle */
1125 pxa3xx_nand_stop(info
);
1129 /* Check if the sequence is complete */
1130 if (info
->data_size
== 0 && command
!= NAND_CMD_PAGEPROG
)
1134 * After a splitted program command sequence has issued
1135 * the command dispatch, the command sequence is complete.
1137 if (info
->data_size
== 0 &&
1138 command
== NAND_CMD_PAGEPROG
&&
1139 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1142 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1143 /* Last read: issue a 'last naked read' */
1144 if (info
->data_size
== info
->chunk_size
)
1145 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1147 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1150 * If a splitted program command has no more data to transfer,
1151 * the command dispatch must be issued to complete.
1153 } else if (command
== NAND_CMD_PAGEPROG
&&
1154 info
->data_size
== 0) {
1155 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1159 info
->state
= STATE_IDLE
;
1162 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1163 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
,
1166 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1167 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1172 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1173 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1176 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1177 struct pxa3xx_nand_info
*info
= host
->info_data
;
1179 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1180 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1182 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1183 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1185 } else if (info
->retcode
== ERR_UNCORERR
) {
1187 * for blank page (all 0xff), HW will calculate its ECC as
1188 * 0, which is different from the ECC information within
1189 * OOB, ignore such uncorrectable errors
1191 if (is_buf_blank(buf
, mtd
->writesize
))
1192 info
->retcode
= ERR_NONE
;
1194 mtd
->ecc_stats
.failed
++;
1197 return info
->max_bitflips
;
1200 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1202 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1203 struct pxa3xx_nand_info
*info
= host
->info_data
;
1206 if (info
->buf_start
< info
->buf_count
)
1207 /* Has just send a new command? */
1208 retval
= info
->data_buff
[info
->buf_start
++];
1213 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1215 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1216 struct pxa3xx_nand_info
*info
= host
->info_data
;
1217 u16 retval
= 0xFFFF;
1219 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1220 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1221 info
->buf_start
+= 2;
1226 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1228 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1229 struct pxa3xx_nand_info
*info
= host
->info_data
;
1230 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1232 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1233 info
->buf_start
+= real_len
;
1236 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1237 const uint8_t *buf
, int len
)
1239 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1240 struct pxa3xx_nand_info
*info
= host
->info_data
;
1241 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1243 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1244 info
->buf_start
+= real_len
;
1247 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1252 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1254 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1255 struct pxa3xx_nand_info
*info
= host
->info_data
;
1257 if (info
->need_wait
) {
1258 info
->need_wait
= 0;
1259 if (!wait_for_completion_timeout(&info
->dev_ready
,
1260 CHIP_DELAY_TIMEOUT
)) {
1261 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1262 return NAND_STATUS_FAIL
;
1266 /* pxa3xx_nand_send_command has waited for command complete */
1267 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1268 if (info
->retcode
== ERR_NONE
)
1271 return NAND_STATUS_FAIL
;
1274 return NAND_STATUS_READY
;
1277 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
,
1278 const struct pxa3xx_nand_flash
*f
)
1280 struct platform_device
*pdev
= info
->pdev
;
1281 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1282 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1283 uint32_t ndcr
= 0x0; /* enable all interrupts */
1285 if (f
->page_size
!= 2048 && f
->page_size
!= 512) {
1286 dev_err(&pdev
->dev
, "Current only support 2048 and 512 size\n");
1290 if (f
->flash_width
!= 16 && f
->flash_width
!= 8) {
1291 dev_err(&pdev
->dev
, "Only support 8bit and 16 bit!\n");
1295 /* calculate addressing information */
1296 host
->col_addr_cycles
= (f
->page_size
== 2048) ? 2 : 1;
1298 if (f
->num_blocks
* f
->page_per_block
> 65536)
1299 host
->row_addr_cycles
= 3;
1301 host
->row_addr_cycles
= 2;
1303 ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1304 ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1305 ndcr
|= (f
->page_per_block
== 64) ? NDCR_PG_PER_BLK
: 0;
1306 ndcr
|= (f
->page_size
== 2048) ? NDCR_PAGE_SZ
: 0;
1307 ndcr
|= (f
->flash_width
== 16) ? NDCR_DWIDTH_M
: 0;
1308 ndcr
|= (f
->dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
1310 ndcr
|= NDCR_RD_ID_CNT(READ_ID_BYTES
);
1311 ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
1313 info
->reg_ndcr
= ndcr
;
1315 pxa3xx_nand_set_timing(host
, f
->timing
);
1319 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1321 uint32_t ndcr
= nand_readl(info
, NDCR
);
1323 /* Set an initial chunk size */
1324 info
->chunk_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
1325 info
->reg_ndcr
= ndcr
&
1326 ~(NDCR_INT_MASK
| NDCR_ND_ARB_EN
| NFCV1_NDCR_ARB_CNTL
);
1327 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1328 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1332 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1334 struct platform_device
*pdev
= info
->pdev
;
1335 struct dma_slave_config config
;
1336 dma_cap_mask_t mask
;
1337 struct pxad_param param
;
1340 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1341 if (info
->data_buff
== NULL
)
1346 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1350 sg_init_one(&info
->sg
, info
->data_buff
, info
->buf_size
);
1352 dma_cap_set(DMA_SLAVE
, mask
);
1353 param
.prio
= PXAD_PRIO_LOWEST
;
1354 param
.drcmr
= info
->drcmr_dat
;
1355 info
->dma_chan
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
1358 if (!info
->dma_chan
) {
1359 dev_err(&pdev
->dev
, "unable to request data dma channel\n");
1363 memset(&config
, 0, sizeof(config
));
1364 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1365 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1366 config
.src_addr
= info
->mmio_phys
+ NDDB
;
1367 config
.dst_addr
= info
->mmio_phys
+ NDDB
;
1368 config
.src_maxburst
= 32;
1369 config
.dst_maxburst
= 32;
1370 ret
= dmaengine_slave_config(info
->dma_chan
, &config
);
1372 dev_err(&info
->pdev
->dev
,
1373 "dma channel configuration failed: %d\n",
1379 * Now that DMA buffers are allocated we turn on
1380 * DMA proper for I/O operations.
1386 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1388 if (info
->use_dma
) {
1389 dmaengine_terminate_all(info
->dma_chan
);
1390 dma_release_channel(info
->dma_chan
);
1392 kfree(info
->data_buff
);
1395 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info
*info
)
1397 struct mtd_info
*mtd
;
1398 struct nand_chip
*chip
;
1401 mtd
= info
->host
[info
->cs
]->mtd
;
1404 /* use the common timing to make a try */
1405 ret
= pxa3xx_nand_config_flash(info
, &builtin_flash_types
[0]);
1409 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
1410 ret
= chip
->waitfunc(mtd
, chip
);
1411 if (ret
& NAND_STATUS_FAIL
)
1417 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1418 struct nand_ecc_ctrl
*ecc
,
1419 int strength
, int ecc_stepsize
, int page_size
)
1421 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1422 info
->chunk_size
= 2048;
1423 info
->spare_size
= 40;
1424 info
->ecc_size
= 24;
1425 ecc
->mode
= NAND_ECC_HW
;
1429 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1430 info
->chunk_size
= 512;
1431 info
->spare_size
= 8;
1433 ecc
->mode
= NAND_ECC_HW
;
1438 * Required ECC: 4-bit correction per 512 bytes
1439 * Select: 16-bit correction per 2048 bytes
1441 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1443 info
->chunk_size
= 2048;
1444 info
->spare_size
= 32;
1445 info
->ecc_size
= 32;
1446 ecc
->mode
= NAND_ECC_HW
;
1447 ecc
->size
= info
->chunk_size
;
1448 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1451 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1453 info
->chunk_size
= 2048;
1454 info
->spare_size
= 32;
1455 info
->ecc_size
= 32;
1456 ecc
->mode
= NAND_ECC_HW
;
1457 ecc
->size
= info
->chunk_size
;
1458 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1462 * Required ECC: 8-bit correction per 512 bytes
1463 * Select: 16-bit correction per 1024 bytes
1465 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1467 info
->chunk_size
= 1024;
1468 info
->spare_size
= 0;
1469 info
->ecc_size
= 32;
1470 ecc
->mode
= NAND_ECC_HW
;
1471 ecc
->size
= info
->chunk_size
;
1472 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1475 dev_err(&info
->pdev
->dev
,
1476 "ECC strength %d at page size %d is not supported\n",
1477 strength
, page_size
);
1481 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1482 ecc
->strength
, ecc
->size
);
1486 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1488 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1489 struct pxa3xx_nand_info
*info
= host
->info_data
;
1490 struct platform_device
*pdev
= info
->pdev
;
1491 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1492 struct nand_flash_dev pxa3xx_flash_ids
[2], *def
= NULL
;
1493 const struct pxa3xx_nand_flash
*f
= NULL
;
1494 struct nand_chip
*chip
= mtd
->priv
;
1498 uint16_t ecc_strength
, ecc_step
;
1500 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
1503 /* Set a default chunk size */
1504 info
->chunk_size
= 512;
1506 ret
= pxa3xx_nand_sensing(info
);
1508 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
1514 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0, 0);
1515 id
= *((uint16_t *)(info
->data_buff
));
1517 dev_info(&info
->pdev
->dev
, "Detect a flash id %x\n", id
);
1519 dev_warn(&info
->pdev
->dev
,
1520 "Read out ID 0, potential timing set wrong!!\n");
1525 num
= ARRAY_SIZE(builtin_flash_types
) - 1;
1526 for (i
= 0; i
< num
; i
++) {
1527 f
= &builtin_flash_types
[i
+ 1];
1529 /* find the chip in default list */
1530 if (f
->chip_id
== id
)
1534 if (i
>= (ARRAY_SIZE(builtin_flash_types
) - 1)) {
1535 dev_err(&info
->pdev
->dev
, "ERROR!! flash not defined!!!\n");
1540 ret
= pxa3xx_nand_config_flash(info
, f
);
1542 dev_err(&info
->pdev
->dev
, "ERROR! Configure failed\n");
1546 memset(pxa3xx_flash_ids
, 0, sizeof(pxa3xx_flash_ids
));
1548 pxa3xx_flash_ids
[0].name
= f
->name
;
1549 pxa3xx_flash_ids
[0].dev_id
= (f
->chip_id
>> 8) & 0xffff;
1550 pxa3xx_flash_ids
[0].pagesize
= f
->page_size
;
1551 chipsize
= (uint64_t)f
->num_blocks
* f
->page_per_block
* f
->page_size
;
1552 pxa3xx_flash_ids
[0].chipsize
= chipsize
>> 20;
1553 pxa3xx_flash_ids
[0].erasesize
= f
->page_size
* f
->page_per_block
;
1554 if (f
->flash_width
== 16)
1555 pxa3xx_flash_ids
[0].options
= NAND_BUSWIDTH_16
;
1556 pxa3xx_flash_ids
[1].name
= NULL
;
1557 def
= pxa3xx_flash_ids
;
1559 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1560 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1561 chip
->options
|= NAND_BUSWIDTH_16
;
1563 /* Device detection must be done with ECC disabled */
1564 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1565 nand_writel(info
, NDECCCTRL
, 0x0);
1567 if (nand_scan_ident(mtd
, 1, def
))
1570 if (pdata
->flash_bbt
) {
1572 * We'll use a bad block table stored in-flash and don't
1573 * allow writing the bad block marker to the flash.
1575 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1576 NAND_BBT_NO_OOB_BBM
;
1577 chip
->bbt_td
= &bbt_main_descr
;
1578 chip
->bbt_md
= &bbt_mirror_descr
;
1582 * If the page size is bigger than the FIFO size, let's check
1583 * we are given the right variant and then switch to the extended
1584 * (aka splitted) command handling,
1586 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1587 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1588 chip
->cmdfunc
= nand_cmdfunc_extended
;
1590 dev_err(&info
->pdev
->dev
,
1591 "unsupported page size on this variant\n");
1596 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1597 ecc_strength
= pdata
->ecc_strength
;
1598 ecc_step
= pdata
->ecc_step_size
;
1600 ecc_strength
= chip
->ecc_strength_ds
;
1601 ecc_step
= chip
->ecc_step_ds
;
1604 /* Set default ECC strength requirements on non-ONFI devices */
1605 if (ecc_strength
< 1 && ecc_step
< 1) {
1610 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1611 ecc_step
, mtd
->writesize
);
1615 /* calculate addressing information */
1616 if (mtd
->writesize
>= 2048)
1617 host
->col_addr_cycles
= 2;
1619 host
->col_addr_cycles
= 1;
1621 /* release the initial buffer */
1622 kfree(info
->data_buff
);
1624 /* allocate the real data + oob buffer */
1625 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1626 ret
= pxa3xx_nand_init_buff(info
);
1629 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1631 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1632 host
->row_addr_cycles
= 3;
1634 host
->row_addr_cycles
= 2;
1635 return nand_scan_tail(mtd
);
1638 static int alloc_nand_resource(struct platform_device
*pdev
)
1640 struct pxa3xx_nand_platform_data
*pdata
;
1641 struct pxa3xx_nand_info
*info
;
1642 struct pxa3xx_nand_host
*host
;
1643 struct nand_chip
*chip
= NULL
;
1644 struct mtd_info
*mtd
;
1648 pdata
= dev_get_platdata(&pdev
->dev
);
1649 if (pdata
->num_cs
<= 0)
1651 info
= devm_kzalloc(&pdev
->dev
, sizeof(*info
) + (sizeof(*mtd
) +
1652 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1657 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1658 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1659 mtd
= (void *)&info
[1] + (sizeof(*mtd
) + sizeof(*host
)) * cs
;
1660 chip
= (struct nand_chip
*)(&mtd
[1]);
1661 host
= (struct pxa3xx_nand_host
*)chip
;
1662 info
->host
[cs
] = host
;
1665 host
->info_data
= info
;
1667 mtd
->dev
.parent
= &pdev
->dev
;
1669 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1670 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1671 chip
->controller
= &info
->controller
;
1672 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1673 chip
->select_chip
= pxa3xx_nand_select_chip
;
1674 chip
->read_word
= pxa3xx_nand_read_word
;
1675 chip
->read_byte
= pxa3xx_nand_read_byte
;
1676 chip
->read_buf
= pxa3xx_nand_read_buf
;
1677 chip
->write_buf
= pxa3xx_nand_write_buf
;
1678 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1679 chip
->cmdfunc
= nand_cmdfunc
;
1682 spin_lock_init(&chip
->controller
->lock
);
1683 init_waitqueue_head(&chip
->controller
->wq
);
1684 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1685 if (IS_ERR(info
->clk
)) {
1686 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1687 return PTR_ERR(info
->clk
);
1689 ret
= clk_prepare_enable(info
->clk
);
1694 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1697 "no resource defined for data DMA\n");
1699 goto fail_disable_clk
;
1701 info
->drcmr_dat
= r
->start
;
1703 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1706 "no resource defined for cmd DMA\n");
1708 goto fail_disable_clk
;
1710 info
->drcmr_cmd
= r
->start
;
1713 irq
= platform_get_irq(pdev
, 0);
1715 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1717 goto fail_disable_clk
;
1720 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1721 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1722 if (IS_ERR(info
->mmio_base
)) {
1723 ret
= PTR_ERR(info
->mmio_base
);
1724 goto fail_disable_clk
;
1726 info
->mmio_phys
= r
->start
;
1728 /* Allocate a buffer to allow flash detection */
1729 info
->buf_size
= INIT_BUFFER_SIZE
;
1730 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1731 if (info
->data_buff
== NULL
) {
1733 goto fail_disable_clk
;
1736 /* initialize all interrupts to be disabled */
1737 disable_int(info
, NDSR_MASK
);
1739 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1740 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1743 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1747 platform_set_drvdata(pdev
, info
);
1752 free_irq(irq
, info
);
1753 kfree(info
->data_buff
);
1755 clk_disable_unprepare(info
->clk
);
1759 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1761 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1762 struct pxa3xx_nand_platform_data
*pdata
;
1768 pdata
= dev_get_platdata(&pdev
->dev
);
1770 irq
= platform_get_irq(pdev
, 0);
1772 free_irq(irq
, info
);
1773 pxa3xx_nand_free_buff(info
);
1776 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1777 * In order to prevent a lockup of the system bus, the DFI bus
1778 * arbitration is granted to SMC upon driver removal. This is done by
1779 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1780 * access to the bus anymore.
1782 nand_writel(info
, NDCR
,
1783 (nand_readl(info
, NDCR
) & ~NDCR_ND_ARB_EN
) |
1784 NFCV1_NDCR_ARB_CNTL
);
1785 clk_disable_unprepare(info
->clk
);
1787 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1788 nand_release(info
->host
[cs
]->mtd
);
1792 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1794 struct pxa3xx_nand_platform_data
*pdata
;
1795 struct device_node
*np
= pdev
->dev
.of_node
;
1796 const struct of_device_id
*of_id
=
1797 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1802 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1806 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1807 pdata
->enable_arbiter
= 1;
1808 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1809 pdata
->keep_config
= 1;
1810 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1811 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1813 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1814 if (pdata
->ecc_strength
< 0)
1815 pdata
->ecc_strength
= 0;
1817 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1818 if (pdata
->ecc_step_size
< 0)
1819 pdata
->ecc_step_size
= 0;
1821 pdev
->dev
.platform_data
= pdata
;
1826 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1828 struct pxa3xx_nand_platform_data
*pdata
;
1829 struct mtd_part_parser_data ppdata
= {};
1830 struct pxa3xx_nand_info
*info
;
1831 int ret
, cs
, probe_success
, dma_available
;
1833 dma_available
= IS_ENABLED(CONFIG_ARM
) &&
1834 (IS_ENABLED(CONFIG_ARCH_PXA
) || IS_ENABLED(CONFIG_ARCH_MMP
));
1835 if (use_dma
&& !dma_available
) {
1837 dev_warn(&pdev
->dev
,
1838 "This platform can't do DMA on this device\n");
1841 ret
= pxa3xx_nand_probe_dt(pdev
);
1845 pdata
= dev_get_platdata(&pdev
->dev
);
1847 dev_err(&pdev
->dev
, "no platform data defined\n");
1851 ret
= alloc_nand_resource(pdev
);
1853 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1857 info
= platform_get_drvdata(pdev
);
1859 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1860 struct mtd_info
*mtd
= info
->host
[cs
]->mtd
;
1863 * The mtd name matches the one used in 'mtdparts' kernel
1864 * parameter. This name cannot be changed or otherwise
1865 * user's mtd partitions configuration would get broken.
1867 mtd
->name
= "pxa3xx_nand-0";
1869 ret
= pxa3xx_nand_scan(mtd
);
1871 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1876 ppdata
.of_node
= pdev
->dev
.of_node
;
1877 ret
= mtd_device_parse_register(mtd
, NULL
,
1878 &ppdata
, pdata
->parts
[cs
],
1879 pdata
->nr_parts
[cs
]);
1884 if (!probe_success
) {
1885 pxa3xx_nand_remove(pdev
);
1893 static int pxa3xx_nand_suspend(struct platform_device
*pdev
, pm_message_t state
)
1895 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1896 struct pxa3xx_nand_platform_data
*pdata
;
1897 struct mtd_info
*mtd
;
1900 pdata
= dev_get_platdata(&pdev
->dev
);
1902 dev_err(&pdev
->dev
, "driver busy, state = %d\n", info
->state
);
1906 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1907 mtd
= info
->host
[cs
]->mtd
;
1914 static int pxa3xx_nand_resume(struct platform_device
*pdev
)
1916 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1917 struct pxa3xx_nand_platform_data
*pdata
;
1918 struct mtd_info
*mtd
;
1921 pdata
= dev_get_platdata(&pdev
->dev
);
1922 /* We don't want to handle interrupt without calling mtd routine */
1923 disable_int(info
, NDCR_INT_MASK
);
1926 * Directly set the chip select to a invalid value,
1927 * then the driver would reset the timing according
1928 * to current chip select at the beginning of cmdfunc
1933 * As the spec says, the NDSR would be updated to 0x1800 when
1934 * doing the nand_clk disable/enable.
1935 * To prevent it damaging state machine of the driver, clear
1936 * all status before resume
1938 nand_writel(info
, NDSR
, NDSR_MASK
);
1939 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1940 mtd
= info
->host
[cs
]->mtd
;
1947 #define pxa3xx_nand_suspend NULL
1948 #define pxa3xx_nand_resume NULL
1951 static struct platform_driver pxa3xx_nand_driver
= {
1953 .name
= "pxa3xx-nand",
1954 .of_match_table
= pxa3xx_nand_dt_ids
,
1956 .probe
= pxa3xx_nand_probe
,
1957 .remove
= pxa3xx_nand_remove
,
1958 .suspend
= pxa3xx_nand_suspend
,
1959 .resume
= pxa3xx_nand_resume
,
1962 module_platform_driver(pxa3xx_nand_driver
);
1964 MODULE_LICENSE("GPL");
1965 MODULE_DESCRIPTION("PXA3xx NAND controller driver");