2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE (2048)
45 * Define a buffer size for the initial command that detects the flash device:
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
52 #define INIT_BUFFER_SIZE 2048
54 /* registers and bit definitions */
55 #define NDCR (0x00) /* Control register */
56 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR (0x14) /* Status Register */
59 #define NDPCR (0x18) /* Page Count Register */
60 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1 (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL (0x28) /* ECC control */
63 #define NDDB (0x40) /* Data Buffer */
64 #define NDCB0 (0x48) /* Command Buffer0 */
65 #define NDCB1 (0x4C) /* Command Buffer1 */
66 #define NDCB2 (0x50) /* Command Buffer2 */
68 #define NDCR_SPARE_EN (0x1 << 31)
69 #define NDCR_ECC_EN (0x1 << 30)
70 #define NDCR_DMA_EN (0x1 << 29)
71 #define NDCR_ND_RUN (0x1 << 28)
72 #define NDCR_DWIDTH_C (0x1 << 27)
73 #define NDCR_DWIDTH_M (0x1 << 26)
74 #define NDCR_PAGE_SZ (0x1 << 24)
75 #define NDCR_NCSX (0x1 << 23)
76 #define NDCR_ND_MODE (0x3 << 21)
77 #define NDCR_NAND_MODE (0x0)
78 #define NDCR_CLR_PG_CNT (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84 #define NDCR_RA_START (0x1 << 15)
85 #define NDCR_PG_PER_BLK (0x1 << 14)
86 #define NDCR_ND_ARB_EN (0x1 << 12)
87 #define NDCR_INT_MASK (0xFFF)
89 #define NDSR_MASK (0xfff)
90 #define NDSR_ERR_CNT_OFF (16)
91 #define NDSR_ERR_CNT_MASK (0x1f)
92 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY (0x1 << 12)
94 #define NDSR_FLASH_RDY (0x1 << 11)
95 #define NDSR_CS0_PAGED (0x1 << 10)
96 #define NDSR_CS1_PAGED (0x1 << 9)
97 #define NDSR_CS0_CMDD (0x1 << 8)
98 #define NDSR_CS1_CMDD (0x1 << 7)
99 #define NDSR_CS0_BBD (0x1 << 6)
100 #define NDSR_CS1_BBD (0x1 << 5)
101 #define NDSR_UNCORERR (0x1 << 4)
102 #define NDSR_CORERR (0x1 << 3)
103 #define NDSR_WRDREQ (0x1 << 2)
104 #define NDSR_RDDREQ (0x1 << 1)
105 #define NDSR_WRCMDREQ (0x1)
107 #define NDCB0_LEN_OVRD (0x1 << 28)
108 #define NDCB0_ST_ROW_EN (0x1 << 26)
109 #define NDCB0_AUTO_RS (0x1 << 25)
110 #define NDCB0_CSEL (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC (0x1 << 20)
116 #define NDCB0_DBC (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK (0xff << 8)
120 #define NDCB0_CMD1_MASK (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT (16)
123 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ 4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
136 #define READ_ID_BYTES 7
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val) \
140 writel_relaxed((val), (info)->mmio_base + (off))
142 #define nand_readl(info, off) \
143 readl_relaxed((info)->mmio_base + (off))
145 /* error code and state */
168 enum pxa3xx_nand_variant
{
169 PXA3XX_NAND_VARIANT_PXA
,
170 PXA3XX_NAND_VARIANT_ARMADA370
,
173 struct pxa3xx_nand_host
{
174 struct nand_chip chip
;
175 struct mtd_info
*mtd
;
178 /* page size of attached chip */
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles
;
184 unsigned int row_addr_cycles
;
187 struct pxa3xx_nand_info
{
188 struct nand_hw_control controller
;
189 struct platform_device
*pdev
;
192 void __iomem
*mmio_base
;
193 unsigned long mmio_phys
;
194 struct completion cmd_complete
, dev_ready
;
196 unsigned int buf_start
;
197 unsigned int buf_count
;
198 unsigned int buf_size
;
199 unsigned int data_buff_pos
;
200 unsigned int oob_buff_pos
;
202 /* DMA information */
203 struct scatterlist sg
;
204 enum dma_data_direction dma_dir
;
205 struct dma_chan
*dma_chan
;
206 dma_cookie_t dma_cookie
;
210 unsigned char *data_buff
;
211 unsigned char *oob_buff
;
212 dma_addr_t data_buff_phys
;
215 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
222 enum pxa3xx_nand_variant variant
;
225 int use_ecc
; /* use HW ECC ? */
226 int ecc_bch
; /* using BCH ECC? */
227 int use_dma
; /* use DMA ? */
228 int use_spare
; /* use spare ? */
231 unsigned int data_size
; /* data to be read from FIFO */
232 unsigned int chunk_size
; /* split commands chunk size */
233 unsigned int oob_size
;
234 unsigned int spare_size
;
235 unsigned int ecc_size
;
236 unsigned int ecc_err_cnt
;
237 unsigned int max_bitflips
;
240 /* cached register value */
245 /* generated NDCBx register values */
252 static bool use_dma
= 1;
253 module_param(use_dma
, bool, 0444);
254 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
256 struct pxa3xx_nand_timing
{
257 unsigned int tCH
; /* Enable signal hold time */
258 unsigned int tCS
; /* Enable signal setup time */
259 unsigned int tWH
; /* ND_nWE high duration */
260 unsigned int tWP
; /* ND_nWE pulse time */
261 unsigned int tRH
; /* ND_nRE high duration */
262 unsigned int tRP
; /* ND_nRE pulse width */
263 unsigned int tR
; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR
; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR
; /* ND_ALE low to ND_nRE low delay */
268 struct pxa3xx_nand_flash
{
271 unsigned int page_per_block
; /* Pages per block (PG_PER_BLK) */
272 unsigned int page_size
; /* Page size in bytes (PAGE_SZ) */
273 unsigned int flash_width
; /* Width of Flash memory (DWIDTH_M) */
274 unsigned int dfc_width
; /* Width of flash controller(DWIDTH_C) */
275 unsigned int num_blocks
; /* Number of physical blocks in Flash */
277 struct pxa3xx_nand_timing
*timing
; /* NAND Flash timing */
280 static struct pxa3xx_nand_timing timing
[] = {
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
287 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
288 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing
[0] },
289 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing
[1] },
290 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing
[1] },
291 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing
[1] },
292 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing
[2] },
293 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing
[2] },
294 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing
[2] },
295 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing
[2] },
296 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing
[3] },
299 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
302 static struct nand_bbt_descr bbt_main_descr
= {
303 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
304 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
308 .maxblocks
= 8, /* Last 8 blocks in each chip */
309 .pattern
= bbt_pattern
312 static struct nand_bbt_descr bbt_mirror_descr
= {
313 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
314 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
318 .maxblocks
= 8, /* Last 8 blocks in each chip */
319 .pattern
= bbt_mirror_pattern
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree
= { {2, 30} }
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree
= { {6, 26}, { 64, 32} }
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
357 /* Define a default flash type setting serve as flash detecting only */
358 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
360 #define NDTR0_tCH(c) (min((c), 7) << 19)
361 #define NDTR0_tCS(c) (min((c), 7) << 16)
362 #define NDTR0_tWH(c) (min((c), 7) << 11)
363 #define NDTR0_tWP(c) (min((c), 7) << 8)
364 #define NDTR0_tRH(c) (min((c), 7) << 3)
365 #define NDTR0_tRP(c) (min((c), 7) << 0)
367 #define NDTR1_tR(c) (min((c), 65535) << 16)
368 #define NDTR1_tWHR(c) (min((c), 15) << 4)
369 #define NDTR1_tAR(c) (min((c), 15) << 0)
371 /* convert nano-seconds to nand flash controller clock cycles */
372 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
374 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
376 .compatible
= "marvell,pxa3xx-nand",
377 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
380 .compatible
= "marvell,armada370-nand",
381 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
385 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
387 static enum pxa3xx_nand_variant
388 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
390 const struct of_device_id
*of_id
=
391 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
393 return PXA3XX_NAND_VARIANT_PXA
;
394 return (enum pxa3xx_nand_variant
)of_id
->data
;
397 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
398 const struct pxa3xx_nand_timing
*t
)
400 struct pxa3xx_nand_info
*info
= host
->info_data
;
401 unsigned long nand_clk
= clk_get_rate(info
->clk
);
402 uint32_t ndtr0
, ndtr1
;
404 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
405 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
406 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
407 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
408 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
409 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
411 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
412 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
413 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
415 info
->ndtr0cs0
= ndtr0
;
416 info
->ndtr1cs0
= ndtr1
;
417 nand_writel(info
, NDTR0CS0
, ndtr0
);
418 nand_writel(info
, NDTR1CS0
, ndtr1
);
421 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host
*host
,
422 const struct nand_sdr_timings
*t
)
424 struct pxa3xx_nand_info
*info
= host
->info_data
;
425 struct nand_chip
*chip
= &host
->chip
;
426 unsigned long nand_clk
= clk_get_rate(info
->clk
);
427 uint32_t ndtr0
, ndtr1
;
429 u32 tCH_min
= DIV_ROUND_UP(t
->tCH_min
, 1000);
430 u32 tCS_min
= DIV_ROUND_UP(t
->tCS_min
, 1000);
431 u32 tWH_min
= DIV_ROUND_UP(t
->tWH_min
, 1000);
432 u32 tWP_min
= DIV_ROUND_UP(t
->tWC_min
- t
->tWH_min
, 1000);
433 u32 tREH_min
= DIV_ROUND_UP(t
->tREH_min
, 1000);
434 u32 tRP_min
= DIV_ROUND_UP(t
->tRC_min
- t
->tREH_min
, 1000);
435 u32 tR
= chip
->chip_delay
* 1000;
436 u32 tWHR_min
= DIV_ROUND_UP(t
->tWHR_min
, 1000);
437 u32 tAR_min
= DIV_ROUND_UP(t
->tAR_min
, 1000);
439 /* fallback to a default value if tR = 0 */
443 ndtr0
= NDTR0_tCH(ns2cycle(tCH_min
, nand_clk
)) |
444 NDTR0_tCS(ns2cycle(tCS_min
, nand_clk
)) |
445 NDTR0_tWH(ns2cycle(tWH_min
, nand_clk
)) |
446 NDTR0_tWP(ns2cycle(tWP_min
, nand_clk
)) |
447 NDTR0_tRH(ns2cycle(tREH_min
, nand_clk
)) |
448 NDTR0_tRP(ns2cycle(tRP_min
, nand_clk
));
450 ndtr1
= NDTR1_tR(ns2cycle(tR
, nand_clk
)) |
451 NDTR1_tWHR(ns2cycle(tWHR_min
, nand_clk
)) |
452 NDTR1_tAR(ns2cycle(tAR_min
, nand_clk
));
454 info
->ndtr0cs0
= ndtr0
;
455 info
->ndtr1cs0
= ndtr1
;
456 nand_writel(info
, NDTR0CS0
, ndtr0
);
457 nand_writel(info
, NDTR1CS0
, ndtr1
);
460 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host
*host
,
461 unsigned int *flash_width
,
462 unsigned int *dfc_width
)
464 struct nand_chip
*chip
= &host
->chip
;
465 struct pxa3xx_nand_info
*info
= host
->info_data
;
466 const struct pxa3xx_nand_flash
*f
= NULL
;
469 ntypes
= ARRAY_SIZE(builtin_flash_types
);
471 chip
->cmdfunc(host
->mtd
, NAND_CMD_READID
, 0x00, -1);
473 id
= chip
->read_byte(host
->mtd
);
474 id
|= chip
->read_byte(host
->mtd
) << 0x8;
476 for (i
= 0; i
< ntypes
; i
++) {
477 f
= &builtin_flash_types
[i
];
479 if (f
->chip_id
== id
)
484 dev_err(&info
->pdev
->dev
, "Error: timings not found\n");
488 pxa3xx_nand_set_timing(host
, f
->timing
);
490 *flash_width
= f
->flash_width
;
491 *dfc_width
= f
->dfc_width
;
496 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host
*host
,
499 const struct nand_sdr_timings
*timings
;
501 mode
= fls(mode
) - 1;
505 timings
= onfi_async_timing_mode_to_sdr_timings(mode
);
507 return PTR_ERR(timings
);
509 pxa3xx_nand_set_sdr_timing(host
, timings
);
514 static int pxa3xx_nand_init(struct pxa3xx_nand_host
*host
)
516 struct nand_chip
*chip
= &host
->chip
;
517 struct pxa3xx_nand_info
*info
= host
->info_data
;
518 unsigned int flash_width
= 0, dfc_width
= 0;
521 mode
= onfi_get_async_timing_mode(chip
);
522 if (mode
== ONFI_TIMING_MODE_UNKNOWN
) {
523 err
= pxa3xx_nand_init_timings_compat(host
, &flash_width
,
528 if (flash_width
== 16) {
529 info
->reg_ndcr
|= NDCR_DWIDTH_M
;
530 chip
->options
|= NAND_BUSWIDTH_16
;
533 info
->reg_ndcr
|= (dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
535 err
= pxa3xx_nand_init_timings_onfi(host
, mode
);
544 * Set the data and OOB size, depending on the selected
545 * spare and ECC configuration.
546 * Only applicable to READ0, READOOB and PAGEPROG commands.
548 static void pxa3xx_set_datasize(struct pxa3xx_nand_info
*info
,
549 struct mtd_info
*mtd
)
551 int oob_enable
= info
->reg_ndcr
& NDCR_SPARE_EN
;
553 info
->data_size
= mtd
->writesize
;
557 info
->oob_size
= info
->spare_size
;
559 info
->oob_size
+= info
->ecc_size
;
563 * NOTE: it is a must to set ND_RUN firstly, then write
564 * command buffer, otherwise, it does not work.
565 * We enable all the interrupt at the same time, and
566 * let pxa3xx_nand_irq to handle all logic.
568 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
572 ndcr
= info
->reg_ndcr
;
577 nand_writel(info
, NDECCCTRL
, 0x1);
579 ndcr
&= ~NDCR_ECC_EN
;
581 nand_writel(info
, NDECCCTRL
, 0x0);
587 ndcr
&= ~NDCR_DMA_EN
;
590 ndcr
|= NDCR_SPARE_EN
;
592 ndcr
&= ~NDCR_SPARE_EN
;
596 /* clear status bits and run */
597 nand_writel(info
, NDSR
, NDSR_MASK
);
598 nand_writel(info
, NDCR
, 0);
599 nand_writel(info
, NDCR
, ndcr
);
602 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
605 int timeout
= NAND_STOP_DELAY
;
607 /* wait RUN bit in NDCR become 0 */
608 ndcr
= nand_readl(info
, NDCR
);
609 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
610 ndcr
= nand_readl(info
, NDCR
);
615 ndcr
&= ~NDCR_ND_RUN
;
616 nand_writel(info
, NDCR
, ndcr
);
619 dmaengine_terminate_all(info
->dma_chan
);
621 /* clear status bits */
622 nand_writel(info
, NDSR
, NDSR_MASK
);
625 static void __maybe_unused
626 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
630 ndcr
= nand_readl(info
, NDCR
);
631 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
634 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
638 ndcr
= nand_readl(info
, NDCR
);
639 nand_writel(info
, NDCR
, ndcr
| int_mask
);
642 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
649 * According to the datasheet, when reading from NDDB
650 * with BCH enabled, after each 32 bytes reads, we
651 * have to make sure that the NDSR.RDDREQ bit is set.
653 * Drain the FIFO 8 32 bits reads at a time, and skip
654 * the polling on the last read.
657 ioread32_rep(info
->mmio_base
+ NDDB
, data
, 8);
659 ret
= readl_relaxed_poll_timeout(info
->mmio_base
+ NDSR
, val
,
660 val
& NDSR_RDDREQ
, 1000, 5000);
662 dev_err(&info
->pdev
->dev
,
663 "Timeout on RDDREQ while draining the FIFO\n");
672 ioread32_rep(info
->mmio_base
+ NDDB
, data
, len
);
675 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
677 unsigned int do_bytes
= min(info
->data_size
, info
->chunk_size
);
679 switch (info
->state
) {
680 case STATE_PIO_WRITING
:
681 writesl(info
->mmio_base
+ NDDB
,
682 info
->data_buff
+ info
->data_buff_pos
,
683 DIV_ROUND_UP(do_bytes
, 4));
685 if (info
->oob_size
> 0)
686 writesl(info
->mmio_base
+ NDDB
,
687 info
->oob_buff
+ info
->oob_buff_pos
,
688 DIV_ROUND_UP(info
->oob_size
, 4));
690 case STATE_PIO_READING
:
692 info
->data_buff
+ info
->data_buff_pos
,
693 DIV_ROUND_UP(do_bytes
, 4));
695 if (info
->oob_size
> 0)
697 info
->oob_buff
+ info
->oob_buff_pos
,
698 DIV_ROUND_UP(info
->oob_size
, 4));
701 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
706 /* Update buffer pointers for multi-page read/write */
707 info
->data_buff_pos
+= do_bytes
;
708 info
->oob_buff_pos
+= info
->oob_size
;
709 info
->data_size
-= do_bytes
;
712 static void pxa3xx_nand_data_dma_irq(void *data
)
714 struct pxa3xx_nand_info
*info
= data
;
715 struct dma_tx_state state
;
716 enum dma_status status
;
718 status
= dmaengine_tx_status(info
->dma_chan
, info
->dma_cookie
, &state
);
719 if (likely(status
== DMA_COMPLETE
)) {
720 info
->state
= STATE_DMA_DONE
;
722 dev_err(&info
->pdev
->dev
, "DMA error on data channel\n");
723 info
->retcode
= ERR_DMABUSERR
;
725 dma_unmap_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
727 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
728 enable_int(info
, NDCR_INT_MASK
);
731 static void start_data_dma(struct pxa3xx_nand_info
*info
)
733 enum dma_transfer_direction direction
;
734 struct dma_async_tx_descriptor
*tx
;
736 switch (info
->state
) {
737 case STATE_DMA_WRITING
:
738 info
->dma_dir
= DMA_TO_DEVICE
;
739 direction
= DMA_MEM_TO_DEV
;
741 case STATE_DMA_READING
:
742 info
->dma_dir
= DMA_FROM_DEVICE
;
743 direction
= DMA_DEV_TO_MEM
;
746 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
750 info
->sg
.length
= info
->data_size
+
751 (info
->oob_size
? info
->spare_size
+ info
->ecc_size
: 0);
752 dma_map_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
754 tx
= dmaengine_prep_slave_sg(info
->dma_chan
, &info
->sg
, 1, direction
,
757 dev_err(&info
->pdev
->dev
, "prep_slave_sg() failed\n");
760 tx
->callback
= pxa3xx_nand_data_dma_irq
;
761 tx
->callback_param
= info
;
762 info
->dma_cookie
= dmaengine_submit(tx
);
763 dma_async_issue_pending(info
->dma_chan
);
764 dev_dbg(&info
->pdev
->dev
, "%s(dir=%d cookie=%x size=%u)\n",
765 __func__
, direction
, info
->dma_cookie
, info
->sg
.length
);
768 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
770 struct pxa3xx_nand_info
*info
= data
;
772 handle_data_pio(info
);
774 info
->state
= STATE_CMD_DONE
;
775 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
780 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
782 struct pxa3xx_nand_info
*info
= devid
;
783 unsigned int status
, is_completed
= 0, is_ready
= 0;
784 unsigned int ready
, cmd_done
;
785 irqreturn_t ret
= IRQ_HANDLED
;
788 ready
= NDSR_FLASH_RDY
;
789 cmd_done
= NDSR_CS0_CMDD
;
792 cmd_done
= NDSR_CS1_CMDD
;
795 status
= nand_readl(info
, NDSR
);
797 if (status
& NDSR_UNCORERR
)
798 info
->retcode
= ERR_UNCORERR
;
799 if (status
& NDSR_CORERR
) {
800 info
->retcode
= ERR_CORERR
;
801 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
803 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
805 info
->ecc_err_cnt
= 1;
808 * Each chunk composing a page is corrected independently,
809 * and we need to store maximum number of corrected bitflips
810 * to return it to the MTD layer in ecc.read_page().
812 info
->max_bitflips
= max_t(unsigned int,
816 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
817 /* whether use dma to transfer data */
819 disable_int(info
, NDCR_INT_MASK
);
820 info
->state
= (status
& NDSR_RDDREQ
) ?
821 STATE_DMA_READING
: STATE_DMA_WRITING
;
822 start_data_dma(info
);
823 goto NORMAL_IRQ_EXIT
;
825 info
->state
= (status
& NDSR_RDDREQ
) ?
826 STATE_PIO_READING
: STATE_PIO_WRITING
;
827 ret
= IRQ_WAKE_THREAD
;
828 goto NORMAL_IRQ_EXIT
;
831 if (status
& cmd_done
) {
832 info
->state
= STATE_CMD_DONE
;
835 if (status
& ready
) {
836 info
->state
= STATE_READY
;
841 * Clear all status bit before issuing the next command, which
842 * can and will alter the status bits and will deserve a new
843 * interrupt on its own. This lets the controller exit the IRQ
845 nand_writel(info
, NDSR
, status
);
847 if (status
& NDSR_WRCMDREQ
) {
848 status
&= ~NDSR_WRCMDREQ
;
849 info
->state
= STATE_CMD_HANDLE
;
852 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
853 * must be loaded by writing directly either 12 or 16
854 * bytes directly to NDCB0, four bytes at a time.
856 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
857 * but each NDCBx register can be read.
859 nand_writel(info
, NDCB0
, info
->ndcb0
);
860 nand_writel(info
, NDCB0
, info
->ndcb1
);
861 nand_writel(info
, NDCB0
, info
->ndcb2
);
863 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
864 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
865 nand_writel(info
, NDCB0
, info
->ndcb3
);
869 complete(&info
->cmd_complete
);
871 complete(&info
->dev_ready
);
876 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
878 for (; len
> 0; len
--)
884 static void set_command_address(struct pxa3xx_nand_info
*info
,
885 unsigned int page_size
, uint16_t column
, int page_addr
)
887 /* small page addr setting */
888 if (page_size
< PAGE_CHUNK_SIZE
) {
889 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
894 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
897 if (page_addr
& 0xFF0000)
898 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
904 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
906 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
907 struct mtd_info
*mtd
= host
->mtd
;
909 /* reset data and oob column point to handle data */
913 info
->data_buff_pos
= 0;
914 info
->oob_buff_pos
= 0;
917 info
->retcode
= ERR_NONE
;
918 info
->ecc_err_cnt
= 0;
924 case NAND_CMD_PAGEPROG
:
926 case NAND_CMD_READOOB
:
927 pxa3xx_set_datasize(info
, mtd
);
939 * If we are about to issue a read command, or about to set
940 * the write address, then clean the data buffer.
942 if (command
== NAND_CMD_READ0
||
943 command
== NAND_CMD_READOOB
||
944 command
== NAND_CMD_SEQIN
) {
946 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
947 memset(info
->data_buff
, 0xFF, info
->buf_count
);
952 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
953 int ext_cmd_type
, uint16_t column
, int page_addr
)
955 int addr_cycle
, exec_cmd
;
956 struct pxa3xx_nand_host
*host
;
957 struct mtd_info
*mtd
;
959 host
= info
->host
[info
->cs
];
965 info
->ndcb0
= NDCB0_CSEL
;
969 if (command
== NAND_CMD_SEQIN
)
972 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
973 + host
->col_addr_cycles
);
976 case NAND_CMD_READOOB
:
978 info
->buf_start
= column
;
979 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
983 if (command
== NAND_CMD_READOOB
)
984 info
->buf_start
+= mtd
->writesize
;
987 * Multiple page read needs an 'extended command type' field,
988 * which is either naked-read or last-read according to the
991 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
992 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
993 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
994 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
996 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
997 info
->ndcb3
= info
->chunk_size
+
1001 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
1004 case NAND_CMD_SEQIN
:
1006 info
->buf_start
= column
;
1007 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
1010 * Multiple page programming needs to execute the initial
1011 * SEQIN command that sets the page address.
1013 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1014 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1015 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1018 /* No data transfer in this case */
1019 info
->data_size
= 0;
1024 case NAND_CMD_PAGEPROG
:
1025 if (is_buf_blank(info
->data_buff
,
1026 (mtd
->writesize
+ mtd
->oobsize
))) {
1031 /* Second command setting for large pages */
1032 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1034 * Multiple page write uses the 'extended command'
1035 * field. This can be used to issue a command dispatch
1036 * or a naked-write depending on the current stage.
1038 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1040 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1041 info
->ndcb3
= info
->chunk_size
+
1045 * This is the command dispatch that completes a chunked
1046 * page program operation.
1048 if (info
->data_size
== 0) {
1049 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
1050 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1057 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1061 | (NAND_CMD_PAGEPROG
<< 8)
1067 case NAND_CMD_PARAM
:
1068 info
->buf_count
= INIT_BUFFER_SIZE
;
1069 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
1073 info
->ndcb1
= (column
& 0xFF);
1074 info
->ndcb3
= INIT_BUFFER_SIZE
;
1075 info
->data_size
= INIT_BUFFER_SIZE
;
1078 case NAND_CMD_READID
:
1079 info
->buf_count
= READ_ID_BYTES
;
1080 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
1083 info
->ndcb1
= (column
& 0xFF);
1085 info
->data_size
= 8;
1087 case NAND_CMD_STATUS
:
1088 info
->buf_count
= 1;
1089 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
1093 info
->data_size
= 8;
1096 case NAND_CMD_ERASE1
:
1097 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
1101 | (NAND_CMD_ERASE2
<< 8)
1103 info
->ndcb1
= page_addr
;
1107 case NAND_CMD_RESET
:
1108 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
1113 case NAND_CMD_ERASE2
:
1119 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
1127 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1128 int column
, int page_addr
)
1130 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1131 struct pxa3xx_nand_info
*info
= host
->info_data
;
1135 * if this is a x16 device ,then convert the input
1136 * "byte" address into a "word" address appropriate
1137 * for indexing a word-oriented device
1139 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1143 * There may be different NAND chip hooked to
1144 * different chip select, so check whether
1145 * chip select has been changed, if yes, reset the timing
1147 if (info
->cs
!= host
->cs
) {
1148 info
->cs
= host
->cs
;
1149 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1150 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1153 prepare_start_command(info
, command
);
1155 info
->state
= STATE_PREPARED
;
1156 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
1159 init_completion(&info
->cmd_complete
);
1160 init_completion(&info
->dev_ready
);
1161 info
->need_wait
= 1;
1162 pxa3xx_nand_start(info
);
1164 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1165 CHIP_DELAY_TIMEOUT
)) {
1166 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1167 /* Stop State Machine for next command cycle */
1168 pxa3xx_nand_stop(info
);
1171 info
->state
= STATE_IDLE
;
1174 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1175 const unsigned command
,
1176 int column
, int page_addr
)
1178 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1179 struct pxa3xx_nand_info
*info
= host
->info_data
;
1180 int exec_cmd
, ext_cmd_type
;
1183 * if this is a x16 device then convert the input
1184 * "byte" address into a "word" address appropriate
1185 * for indexing a word-oriented device
1187 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1191 * There may be different NAND chip hooked to
1192 * different chip select, so check whether
1193 * chip select has been changed, if yes, reset the timing
1195 if (info
->cs
!= host
->cs
) {
1196 info
->cs
= host
->cs
;
1197 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1198 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1201 /* Select the extended command for the first command */
1203 case NAND_CMD_READ0
:
1204 case NAND_CMD_READOOB
:
1205 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1207 case NAND_CMD_SEQIN
:
1208 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1210 case NAND_CMD_PAGEPROG
:
1211 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1218 prepare_start_command(info
, command
);
1221 * Prepare the "is ready" completion before starting a command
1222 * transaction sequence. If the command is not executed the
1223 * completion will be completed, see below.
1225 * We can do that inside the loop because the command variable
1226 * is invariant and thus so is the exec_cmd.
1228 info
->need_wait
= 1;
1229 init_completion(&info
->dev_ready
);
1231 info
->state
= STATE_PREPARED
;
1232 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1235 info
->need_wait
= 0;
1236 complete(&info
->dev_ready
);
1240 init_completion(&info
->cmd_complete
);
1241 pxa3xx_nand_start(info
);
1243 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1244 CHIP_DELAY_TIMEOUT
)) {
1245 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1246 /* Stop State Machine for next command cycle */
1247 pxa3xx_nand_stop(info
);
1251 /* Check if the sequence is complete */
1252 if (info
->data_size
== 0 && command
!= NAND_CMD_PAGEPROG
)
1256 * After a splitted program command sequence has issued
1257 * the command dispatch, the command sequence is complete.
1259 if (info
->data_size
== 0 &&
1260 command
== NAND_CMD_PAGEPROG
&&
1261 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1264 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1265 /* Last read: issue a 'last naked read' */
1266 if (info
->data_size
== info
->chunk_size
)
1267 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1269 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1272 * If a splitted program command has no more data to transfer,
1273 * the command dispatch must be issued to complete.
1275 } else if (command
== NAND_CMD_PAGEPROG
&&
1276 info
->data_size
== 0) {
1277 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1281 info
->state
= STATE_IDLE
;
1284 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1285 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
,
1288 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1289 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1294 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1295 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1298 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1299 struct pxa3xx_nand_info
*info
= host
->info_data
;
1301 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1302 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1304 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1305 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1307 } else if (info
->retcode
== ERR_UNCORERR
) {
1309 * for blank page (all 0xff), HW will calculate its ECC as
1310 * 0, which is different from the ECC information within
1311 * OOB, ignore such uncorrectable errors
1313 if (is_buf_blank(buf
, mtd
->writesize
))
1314 info
->retcode
= ERR_NONE
;
1316 mtd
->ecc_stats
.failed
++;
1319 return info
->max_bitflips
;
1322 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1324 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1325 struct pxa3xx_nand_info
*info
= host
->info_data
;
1328 if (info
->buf_start
< info
->buf_count
)
1329 /* Has just send a new command? */
1330 retval
= info
->data_buff
[info
->buf_start
++];
1335 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1337 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1338 struct pxa3xx_nand_info
*info
= host
->info_data
;
1339 u16 retval
= 0xFFFF;
1341 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1342 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1343 info
->buf_start
+= 2;
1348 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1350 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1351 struct pxa3xx_nand_info
*info
= host
->info_data
;
1352 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1354 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1355 info
->buf_start
+= real_len
;
1358 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1359 const uint8_t *buf
, int len
)
1361 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1362 struct pxa3xx_nand_info
*info
= host
->info_data
;
1363 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1365 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1366 info
->buf_start
+= real_len
;
1369 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1374 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1376 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1377 struct pxa3xx_nand_info
*info
= host
->info_data
;
1379 if (info
->need_wait
) {
1380 info
->need_wait
= 0;
1381 if (!wait_for_completion_timeout(&info
->dev_ready
,
1382 CHIP_DELAY_TIMEOUT
)) {
1383 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1384 return NAND_STATUS_FAIL
;
1388 /* pxa3xx_nand_send_command has waited for command complete */
1389 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1390 if (info
->retcode
== ERR_NONE
)
1393 return NAND_STATUS_FAIL
;
1396 return NAND_STATUS_READY
;
1399 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info
*info
)
1401 struct platform_device
*pdev
= info
->pdev
;
1402 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1403 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1404 struct mtd_info
*mtd
= host
->mtd
;
1405 struct nand_chip
*chip
= mtd
->priv
;
1407 /* configure default flash values */
1408 info
->reg_ndcr
= 0x0; /* enable all interrupts */
1409 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1410 info
->reg_ndcr
|= NDCR_RD_ID_CNT(READ_ID_BYTES
);
1411 info
->reg_ndcr
|= NDCR_SPARE_EN
; /* enable spare by default */
1412 info
->reg_ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1413 info
->reg_ndcr
|= (chip
->page_shift
== 6) ? NDCR_PG_PER_BLK
: 0;
1414 info
->reg_ndcr
|= (mtd
->writesize
== 2048) ? NDCR_PAGE_SZ
: 0;
1419 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1421 uint32_t ndcr
= nand_readl(info
, NDCR
);
1423 /* Set an initial chunk size */
1424 info
->chunk_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
1425 info
->reg_ndcr
= ndcr
&
1426 ~(NDCR_INT_MASK
| NDCR_ND_ARB_EN
| NFCV1_NDCR_ARB_CNTL
);
1427 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1428 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1432 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1434 struct platform_device
*pdev
= info
->pdev
;
1435 struct dma_slave_config config
;
1436 dma_cap_mask_t mask
;
1437 struct pxad_param param
;
1440 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1441 if (info
->data_buff
== NULL
)
1446 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1450 sg_init_one(&info
->sg
, info
->data_buff
, info
->buf_size
);
1452 dma_cap_set(DMA_SLAVE
, mask
);
1453 param
.prio
= PXAD_PRIO_LOWEST
;
1454 param
.drcmr
= info
->drcmr_dat
;
1455 info
->dma_chan
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
1458 if (!info
->dma_chan
) {
1459 dev_err(&pdev
->dev
, "unable to request data dma channel\n");
1463 memset(&config
, 0, sizeof(config
));
1464 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1465 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1466 config
.src_addr
= info
->mmio_phys
+ NDDB
;
1467 config
.dst_addr
= info
->mmio_phys
+ NDDB
;
1468 config
.src_maxburst
= 32;
1469 config
.dst_maxburst
= 32;
1470 ret
= dmaengine_slave_config(info
->dma_chan
, &config
);
1472 dev_err(&info
->pdev
->dev
,
1473 "dma channel configuration failed: %d\n",
1479 * Now that DMA buffers are allocated we turn on
1480 * DMA proper for I/O operations.
1486 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1488 if (info
->use_dma
) {
1489 dmaengine_terminate_all(info
->dma_chan
);
1490 dma_release_channel(info
->dma_chan
);
1492 kfree(info
->data_buff
);
1495 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host
*host
)
1497 struct pxa3xx_nand_info
*info
= host
->info_data
;
1498 struct mtd_info
*mtd
;
1499 struct nand_chip
*chip
;
1500 const struct nand_sdr_timings
*timings
;
1503 mtd
= info
->host
[info
->cs
]->mtd
;
1506 /* use the common timing to make a try */
1507 timings
= onfi_async_timing_mode_to_sdr_timings(0);
1508 if (IS_ERR(timings
))
1509 return PTR_ERR(timings
);
1511 pxa3xx_nand_set_sdr_timing(host
, timings
);
1513 chip
->cmdfunc(mtd
, NAND_CMD_RESET
, 0, 0);
1514 ret
= chip
->waitfunc(mtd
, chip
);
1515 if (ret
& NAND_STATUS_FAIL
)
1521 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1522 struct nand_ecc_ctrl
*ecc
,
1523 int strength
, int ecc_stepsize
, int page_size
)
1525 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1526 info
->chunk_size
= 2048;
1527 info
->spare_size
= 40;
1528 info
->ecc_size
= 24;
1529 ecc
->mode
= NAND_ECC_HW
;
1533 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1534 info
->chunk_size
= 512;
1535 info
->spare_size
= 8;
1537 ecc
->mode
= NAND_ECC_HW
;
1542 * Required ECC: 4-bit correction per 512 bytes
1543 * Select: 16-bit correction per 2048 bytes
1545 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1547 info
->chunk_size
= 2048;
1548 info
->spare_size
= 32;
1549 info
->ecc_size
= 32;
1550 ecc
->mode
= NAND_ECC_HW
;
1551 ecc
->size
= info
->chunk_size
;
1552 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1555 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1557 info
->chunk_size
= 2048;
1558 info
->spare_size
= 32;
1559 info
->ecc_size
= 32;
1560 ecc
->mode
= NAND_ECC_HW
;
1561 ecc
->size
= info
->chunk_size
;
1562 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1566 * Required ECC: 8-bit correction per 512 bytes
1567 * Select: 16-bit correction per 1024 bytes
1569 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1571 info
->chunk_size
= 1024;
1572 info
->spare_size
= 0;
1573 info
->ecc_size
= 32;
1574 ecc
->mode
= NAND_ECC_HW
;
1575 ecc
->size
= info
->chunk_size
;
1576 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1579 dev_err(&info
->pdev
->dev
,
1580 "ECC strength %d at page size %d is not supported\n",
1581 strength
, page_size
);
1585 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1586 ecc
->strength
, ecc
->size
);
1590 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1592 struct pxa3xx_nand_host
*host
= mtd
->priv
;
1593 struct pxa3xx_nand_info
*info
= host
->info_data
;
1594 struct platform_device
*pdev
= info
->pdev
;
1595 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1596 struct nand_chip
*chip
= mtd
->priv
;
1598 uint16_t ecc_strength
, ecc_step
;
1600 if (pdata
->keep_config
&& !pxa3xx_nand_detect_config(info
))
1603 /* Set a default chunk size */
1604 info
->chunk_size
= 512;
1606 ret
= pxa3xx_nand_config_flash(info
);
1610 ret
= pxa3xx_nand_sensing(host
);
1612 dev_info(&info
->pdev
->dev
, "There is no chip on cs %d!\n",
1619 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1620 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1621 chip
->options
|= NAND_BUSWIDTH_16
;
1623 /* Device detection must be done with ECC disabled */
1624 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1625 nand_writel(info
, NDECCCTRL
, 0x0);
1627 if (nand_scan_ident(mtd
, 1, NULL
))
1630 if (!pdata
->keep_config
) {
1631 ret
= pxa3xx_nand_init(host
);
1633 dev_err(&info
->pdev
->dev
, "Failed to init nand: %d\n",
1639 if (pdata
->flash_bbt
) {
1641 * We'll use a bad block table stored in-flash and don't
1642 * allow writing the bad block marker to the flash.
1644 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1645 NAND_BBT_NO_OOB_BBM
;
1646 chip
->bbt_td
= &bbt_main_descr
;
1647 chip
->bbt_md
= &bbt_mirror_descr
;
1651 * If the page size is bigger than the FIFO size, let's check
1652 * we are given the right variant and then switch to the extended
1653 * (aka splitted) command handling,
1655 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1656 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1657 chip
->cmdfunc
= nand_cmdfunc_extended
;
1659 dev_err(&info
->pdev
->dev
,
1660 "unsupported page size on this variant\n");
1665 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1666 ecc_strength
= pdata
->ecc_strength
;
1667 ecc_step
= pdata
->ecc_step_size
;
1669 ecc_strength
= chip
->ecc_strength_ds
;
1670 ecc_step
= chip
->ecc_step_ds
;
1673 /* Set default ECC strength requirements on non-ONFI devices */
1674 if (ecc_strength
< 1 && ecc_step
< 1) {
1679 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1680 ecc_step
, mtd
->writesize
);
1684 /* calculate addressing information */
1685 if (mtd
->writesize
>= 2048)
1686 host
->col_addr_cycles
= 2;
1688 host
->col_addr_cycles
= 1;
1690 /* release the initial buffer */
1691 kfree(info
->data_buff
);
1693 /* allocate the real data + oob buffer */
1694 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1695 ret
= pxa3xx_nand_init_buff(info
);
1698 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1700 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1701 host
->row_addr_cycles
= 3;
1703 host
->row_addr_cycles
= 2;
1704 return nand_scan_tail(mtd
);
1707 static int alloc_nand_resource(struct platform_device
*pdev
)
1709 struct pxa3xx_nand_platform_data
*pdata
;
1710 struct pxa3xx_nand_info
*info
;
1711 struct pxa3xx_nand_host
*host
;
1712 struct nand_chip
*chip
= NULL
;
1713 struct mtd_info
*mtd
;
1717 pdata
= dev_get_platdata(&pdev
->dev
);
1718 if (pdata
->num_cs
<= 0)
1720 info
= devm_kzalloc(&pdev
->dev
, sizeof(*info
) + (sizeof(*mtd
) +
1721 sizeof(*host
)) * pdata
->num_cs
, GFP_KERNEL
);
1726 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1727 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1728 mtd
= (void *)&info
[1] + (sizeof(*mtd
) + sizeof(*host
)) * cs
;
1729 chip
= (struct nand_chip
*)(&mtd
[1]);
1730 host
= (struct pxa3xx_nand_host
*)chip
;
1731 info
->host
[cs
] = host
;
1734 host
->info_data
= info
;
1736 mtd
->dev
.parent
= &pdev
->dev
;
1738 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1739 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1740 chip
->controller
= &info
->controller
;
1741 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1742 chip
->select_chip
= pxa3xx_nand_select_chip
;
1743 chip
->read_word
= pxa3xx_nand_read_word
;
1744 chip
->read_byte
= pxa3xx_nand_read_byte
;
1745 chip
->read_buf
= pxa3xx_nand_read_buf
;
1746 chip
->write_buf
= pxa3xx_nand_write_buf
;
1747 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1748 chip
->cmdfunc
= nand_cmdfunc
;
1751 spin_lock_init(&chip
->controller
->lock
);
1752 init_waitqueue_head(&chip
->controller
->wq
);
1753 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1754 if (IS_ERR(info
->clk
)) {
1755 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1756 return PTR_ERR(info
->clk
);
1758 ret
= clk_prepare_enable(info
->clk
);
1763 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1766 "no resource defined for data DMA\n");
1768 goto fail_disable_clk
;
1770 info
->drcmr_dat
= r
->start
;
1772 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1775 "no resource defined for cmd DMA\n");
1777 goto fail_disable_clk
;
1779 info
->drcmr_cmd
= r
->start
;
1782 irq
= platform_get_irq(pdev
, 0);
1784 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1786 goto fail_disable_clk
;
1789 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1790 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1791 if (IS_ERR(info
->mmio_base
)) {
1792 ret
= PTR_ERR(info
->mmio_base
);
1793 goto fail_disable_clk
;
1795 info
->mmio_phys
= r
->start
;
1797 /* Allocate a buffer to allow flash detection */
1798 info
->buf_size
= INIT_BUFFER_SIZE
;
1799 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1800 if (info
->data_buff
== NULL
) {
1802 goto fail_disable_clk
;
1805 /* initialize all interrupts to be disabled */
1806 disable_int(info
, NDSR_MASK
);
1808 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1809 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1812 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1816 platform_set_drvdata(pdev
, info
);
1821 free_irq(irq
, info
);
1822 kfree(info
->data_buff
);
1824 clk_disable_unprepare(info
->clk
);
1828 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1830 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1831 struct pxa3xx_nand_platform_data
*pdata
;
1837 pdata
= dev_get_platdata(&pdev
->dev
);
1839 irq
= platform_get_irq(pdev
, 0);
1841 free_irq(irq
, info
);
1842 pxa3xx_nand_free_buff(info
);
1845 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1846 * In order to prevent a lockup of the system bus, the DFI bus
1847 * arbitration is granted to SMC upon driver removal. This is done by
1848 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1849 * access to the bus anymore.
1851 nand_writel(info
, NDCR
,
1852 (nand_readl(info
, NDCR
) & ~NDCR_ND_ARB_EN
) |
1853 NFCV1_NDCR_ARB_CNTL
);
1854 clk_disable_unprepare(info
->clk
);
1856 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1857 nand_release(info
->host
[cs
]->mtd
);
1861 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1863 struct pxa3xx_nand_platform_data
*pdata
;
1864 struct device_node
*np
= pdev
->dev
.of_node
;
1865 const struct of_device_id
*of_id
=
1866 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1871 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1875 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1876 pdata
->enable_arbiter
= 1;
1877 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1878 pdata
->keep_config
= 1;
1879 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1880 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1882 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1883 if (pdata
->ecc_strength
< 0)
1884 pdata
->ecc_strength
= 0;
1886 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1887 if (pdata
->ecc_step_size
< 0)
1888 pdata
->ecc_step_size
= 0;
1890 pdev
->dev
.platform_data
= pdata
;
1895 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1897 struct pxa3xx_nand_platform_data
*pdata
;
1898 struct mtd_part_parser_data ppdata
= {};
1899 struct pxa3xx_nand_info
*info
;
1900 int ret
, cs
, probe_success
, dma_available
;
1902 dma_available
= IS_ENABLED(CONFIG_ARM
) &&
1903 (IS_ENABLED(CONFIG_ARCH_PXA
) || IS_ENABLED(CONFIG_ARCH_MMP
));
1904 if (use_dma
&& !dma_available
) {
1906 dev_warn(&pdev
->dev
,
1907 "This platform can't do DMA on this device\n");
1910 ret
= pxa3xx_nand_probe_dt(pdev
);
1914 pdata
= dev_get_platdata(&pdev
->dev
);
1916 dev_err(&pdev
->dev
, "no platform data defined\n");
1920 ret
= alloc_nand_resource(pdev
);
1922 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1926 info
= platform_get_drvdata(pdev
);
1928 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1929 struct mtd_info
*mtd
= info
->host
[cs
]->mtd
;
1932 * The mtd name matches the one used in 'mtdparts' kernel
1933 * parameter. This name cannot be changed or otherwise
1934 * user's mtd partitions configuration would get broken.
1936 mtd
->name
= "pxa3xx_nand-0";
1938 ret
= pxa3xx_nand_scan(mtd
);
1940 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1945 ppdata
.of_node
= pdev
->dev
.of_node
;
1946 ret
= mtd_device_parse_register(mtd
, NULL
,
1947 &ppdata
, pdata
->parts
[cs
],
1948 pdata
->nr_parts
[cs
]);
1953 if (!probe_success
) {
1954 pxa3xx_nand_remove(pdev
);
1962 static int pxa3xx_nand_suspend(struct device
*dev
)
1964 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
1967 dev_err(dev
, "driver busy, state = %d\n", info
->state
);
1974 static int pxa3xx_nand_resume(struct device
*dev
)
1976 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
1978 /* We don't want to handle interrupt without calling mtd routine */
1979 disable_int(info
, NDCR_INT_MASK
);
1982 * Directly set the chip select to a invalid value,
1983 * then the driver would reset the timing according
1984 * to current chip select at the beginning of cmdfunc
1989 * As the spec says, the NDSR would be updated to 0x1800 when
1990 * doing the nand_clk disable/enable.
1991 * To prevent it damaging state machine of the driver, clear
1992 * all status before resume
1994 nand_writel(info
, NDSR
, NDSR_MASK
);
1999 #define pxa3xx_nand_suspend NULL
2000 #define pxa3xx_nand_resume NULL
2003 static const struct dev_pm_ops pxa3xx_nand_pm_ops
= {
2004 .suspend
= pxa3xx_nand_suspend
,
2005 .resume
= pxa3xx_nand_resume
,
2008 static struct platform_driver pxa3xx_nand_driver
= {
2010 .name
= "pxa3xx-nand",
2011 .of_match_table
= pxa3xx_nand_dt_ids
,
2012 .pm
= &pxa3xx_nand_pm_ops
,
2014 .probe
= pxa3xx_nand_probe
,
2015 .remove
= pxa3xx_nand_remove
,
2018 module_platform_driver(pxa3xx_nand_driver
);
2020 MODULE_LICENSE("GPL");
2021 MODULE_DESCRIPTION("PXA3xx NAND controller driver");