2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
35 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE (2048)
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
47 #define INIT_BUFFER_SIZE 2048
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
79 #define NDCR_RA_START (0x1 << 15)
80 #define NDCR_PG_PER_BLK (0x1 << 14)
81 #define NDCR_ND_ARB_EN (0x1 << 12)
82 #define NDCR_INT_MASK (0xFFF)
84 #define NDSR_MASK (0xfff)
85 #define NDSR_ERR_CNT_OFF (16)
86 #define NDSR_ERR_CNT_MASK (0x1f)
87 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_UNCORERR (0x1 << 4)
97 #define NDSR_CORERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC (0x1 << 20)
111 #define NDCB0_DBC (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK (0xff << 8)
115 #define NDCB0_CMD1_MASK (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT (16)
118 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ 4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
131 #define READ_ID_BYTES 7
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val) \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
142 #define nand_readl(info, off) \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
152 /* error code and state */
175 enum pxa3xx_nand_variant
{
176 PXA3XX_NAND_VARIANT_PXA
,
177 PXA3XX_NAND_VARIANT_ARMADA370
,
180 struct pxa3xx_nand_host
{
181 struct nand_chip chip
;
184 /* page size of attached chip */
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles
;
190 unsigned int row_addr_cycles
;
193 struct pxa3xx_nand_info
{
194 struct nand_hw_control controller
;
195 struct platform_device
*pdev
;
198 void __iomem
*mmio_base
;
199 unsigned long mmio_phys
;
200 struct completion cmd_complete
, dev_ready
;
202 unsigned int buf_start
;
203 unsigned int buf_count
;
204 unsigned int buf_size
;
205 unsigned int data_buff_pos
;
206 unsigned int oob_buff_pos
;
208 /* DMA information */
209 struct scatterlist sg
;
210 enum dma_data_direction dma_dir
;
211 struct dma_chan
*dma_chan
;
212 dma_cookie_t dma_cookie
;
216 unsigned char *data_buff
;
217 unsigned char *oob_buff
;
218 dma_addr_t data_buff_phys
;
221 struct pxa3xx_nand_host
*host
[NUM_CHIP_SELECT
];
225 * This driver supports NFCv1 (as found in PXA SoC)
226 * and NFCv2 (as found in Armada 370/XP SoC).
228 enum pxa3xx_nand_variant variant
;
231 int use_ecc
; /* use HW ECC ? */
232 int ecc_bch
; /* using BCH ECC? */
233 int use_dma
; /* use DMA ? */
234 int use_spare
; /* use spare ? */
237 /* Amount of real data per full chunk */
238 unsigned int chunk_size
;
240 /* Amount of spare data per full chunk */
241 unsigned int spare_size
;
243 /* Number of full chunks (i.e chunk_size + spare_size) */
244 unsigned int nfullchunks
;
247 * Total number of chunks. If equal to nfullchunks, then there
248 * are only full chunks. Otherwise, there is one last chunk of
249 * size (last_chunk_size + last_spare_size)
251 unsigned int ntotalchunks
;
253 /* Amount of real data in the last chunk */
254 unsigned int last_chunk_size
;
256 /* Amount of spare data in the last chunk */
257 unsigned int last_spare_size
;
259 unsigned int ecc_size
;
260 unsigned int ecc_err_cnt
;
261 unsigned int max_bitflips
;
265 * Variables only valid during command
266 * execution. step_chunk_size and step_spare_size is the
267 * amount of real data and spare data in the current
268 * chunk. cur_chunk is the current chunk being
271 unsigned int step_chunk_size
;
272 unsigned int step_spare_size
;
273 unsigned int cur_chunk
;
275 /* cached register value */
280 /* generated NDCBx register values */
287 static bool use_dma
= 1;
288 module_param(use_dma
, bool, 0444);
289 MODULE_PARM_DESC(use_dma
, "enable DMA for data transferring to/from NAND HW");
291 struct pxa3xx_nand_timing
{
292 unsigned int tCH
; /* Enable signal hold time */
293 unsigned int tCS
; /* Enable signal setup time */
294 unsigned int tWH
; /* ND_nWE high duration */
295 unsigned int tWP
; /* ND_nWE pulse time */
296 unsigned int tRH
; /* ND_nRE high duration */
297 unsigned int tRP
; /* ND_nRE pulse width */
298 unsigned int tR
; /* ND_nWE high to ND_nRE low for read */
299 unsigned int tWHR
; /* ND_nWE high to ND_nRE low for status read */
300 unsigned int tAR
; /* ND_ALE low to ND_nRE low delay */
303 struct pxa3xx_nand_flash
{
305 unsigned int flash_width
; /* Width of Flash memory (DWIDTH_M) */
306 unsigned int dfc_width
; /* Width of flash controller(DWIDTH_C) */
307 struct pxa3xx_nand_timing
*timing
; /* NAND Flash timing */
310 static struct pxa3xx_nand_timing timing
[] = {
311 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
312 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
313 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
314 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
317 static struct pxa3xx_nand_flash builtin_flash_types
[] = {
318 { 0x46ec, 16, 16, &timing
[1] },
319 { 0xdaec, 8, 8, &timing
[1] },
320 { 0xd7ec, 8, 8, &timing
[1] },
321 { 0xa12c, 8, 8, &timing
[2] },
322 { 0xb12c, 16, 16, &timing
[2] },
323 { 0xdc2c, 8, 8, &timing
[2] },
324 { 0xcc2c, 16, 16, &timing
[2] },
325 { 0xba20, 16, 16, &timing
[3] },
328 static u8 bbt_pattern
[] = {'M', 'V', 'B', 'b', 't', '0' };
329 static u8 bbt_mirror_pattern
[] = {'1', 't', 'b', 'B', 'V', 'M' };
331 static struct nand_bbt_descr bbt_main_descr
= {
332 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
333 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
337 .maxblocks
= 8, /* Last 8 blocks in each chip */
338 .pattern
= bbt_pattern
341 static struct nand_bbt_descr bbt_mirror_descr
= {
342 .options
= NAND_BBT_LASTBLOCK
| NAND_BBT_CREATE
| NAND_BBT_WRITE
343 | NAND_BBT_2BIT
| NAND_BBT_VERSION
,
347 .maxblocks
= 8, /* Last 8 blocks in each chip */
348 .pattern
= bbt_mirror_pattern
351 static struct nand_ecclayout ecc_layout_2KB_bch4bit
= {
354 32, 33, 34, 35, 36, 37, 38, 39,
355 40, 41, 42, 43, 44, 45, 46, 47,
356 48, 49, 50, 51, 52, 53, 54, 55,
357 56, 57, 58, 59, 60, 61, 62, 63},
358 .oobfree
= { {2, 30} }
361 static struct nand_ecclayout ecc_layout_4KB_bch4bit
= {
364 32, 33, 34, 35, 36, 37, 38, 39,
365 40, 41, 42, 43, 44, 45, 46, 47,
366 48, 49, 50, 51, 52, 53, 54, 55,
367 56, 57, 58, 59, 60, 61, 62, 63,
368 96, 97, 98, 99, 100, 101, 102, 103,
369 104, 105, 106, 107, 108, 109, 110, 111,
370 112, 113, 114, 115, 116, 117, 118, 119,
371 120, 121, 122, 123, 124, 125, 126, 127},
372 /* Bootrom looks in bytes 0 & 5 for bad blocks */
373 .oobfree
= { {6, 26}, { 64, 32} }
376 static struct nand_ecclayout ecc_layout_4KB_bch8bit
= {
379 32, 33, 34, 35, 36, 37, 38, 39,
380 40, 41, 42, 43, 44, 45, 46, 47,
381 48, 49, 50, 51, 52, 53, 54, 55,
382 56, 57, 58, 59, 60, 61, 62, 63},
386 #define NDTR0_tCH(c) (min((c), 7) << 19)
387 #define NDTR0_tCS(c) (min((c), 7) << 16)
388 #define NDTR0_tWH(c) (min((c), 7) << 11)
389 #define NDTR0_tWP(c) (min((c), 7) << 8)
390 #define NDTR0_tRH(c) (min((c), 7) << 3)
391 #define NDTR0_tRP(c) (min((c), 7) << 0)
393 #define NDTR1_tR(c) (min((c), 65535) << 16)
394 #define NDTR1_tWHR(c) (min((c), 15) << 4)
395 #define NDTR1_tAR(c) (min((c), 15) << 0)
397 /* convert nano-seconds to nand flash controller clock cycles */
398 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
400 static const struct of_device_id pxa3xx_nand_dt_ids
[] = {
402 .compatible
= "marvell,pxa3xx-nand",
403 .data
= (void *)PXA3XX_NAND_VARIANT_PXA
,
406 .compatible
= "marvell,armada370-nand",
407 .data
= (void *)PXA3XX_NAND_VARIANT_ARMADA370
,
411 MODULE_DEVICE_TABLE(of
, pxa3xx_nand_dt_ids
);
413 static enum pxa3xx_nand_variant
414 pxa3xx_nand_get_variant(struct platform_device
*pdev
)
416 const struct of_device_id
*of_id
=
417 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
419 return PXA3XX_NAND_VARIANT_PXA
;
420 return (enum pxa3xx_nand_variant
)of_id
->data
;
423 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host
*host
,
424 const struct pxa3xx_nand_timing
*t
)
426 struct pxa3xx_nand_info
*info
= host
->info_data
;
427 unsigned long nand_clk
= clk_get_rate(info
->clk
);
428 uint32_t ndtr0
, ndtr1
;
430 ndtr0
= NDTR0_tCH(ns2cycle(t
->tCH
, nand_clk
)) |
431 NDTR0_tCS(ns2cycle(t
->tCS
, nand_clk
)) |
432 NDTR0_tWH(ns2cycle(t
->tWH
, nand_clk
)) |
433 NDTR0_tWP(ns2cycle(t
->tWP
, nand_clk
)) |
434 NDTR0_tRH(ns2cycle(t
->tRH
, nand_clk
)) |
435 NDTR0_tRP(ns2cycle(t
->tRP
, nand_clk
));
437 ndtr1
= NDTR1_tR(ns2cycle(t
->tR
, nand_clk
)) |
438 NDTR1_tWHR(ns2cycle(t
->tWHR
, nand_clk
)) |
439 NDTR1_tAR(ns2cycle(t
->tAR
, nand_clk
));
441 info
->ndtr0cs0
= ndtr0
;
442 info
->ndtr1cs0
= ndtr1
;
443 nand_writel(info
, NDTR0CS0
, ndtr0
);
444 nand_writel(info
, NDTR1CS0
, ndtr1
);
447 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host
*host
,
448 const struct nand_sdr_timings
*t
)
450 struct pxa3xx_nand_info
*info
= host
->info_data
;
451 struct nand_chip
*chip
= &host
->chip
;
452 unsigned long nand_clk
= clk_get_rate(info
->clk
);
453 uint32_t ndtr0
, ndtr1
;
455 u32 tCH_min
= DIV_ROUND_UP(t
->tCH_min
, 1000);
456 u32 tCS_min
= DIV_ROUND_UP(t
->tCS_min
, 1000);
457 u32 tWH_min
= DIV_ROUND_UP(t
->tWH_min
, 1000);
458 u32 tWP_min
= DIV_ROUND_UP(t
->tWC_min
- t
->tWH_min
, 1000);
459 u32 tREH_min
= DIV_ROUND_UP(t
->tREH_min
, 1000);
460 u32 tRP_min
= DIV_ROUND_UP(t
->tRC_min
- t
->tREH_min
, 1000);
461 u32 tR
= chip
->chip_delay
* 1000;
462 u32 tWHR_min
= DIV_ROUND_UP(t
->tWHR_min
, 1000);
463 u32 tAR_min
= DIV_ROUND_UP(t
->tAR_min
, 1000);
465 /* fallback to a default value if tR = 0 */
469 ndtr0
= NDTR0_tCH(ns2cycle(tCH_min
, nand_clk
)) |
470 NDTR0_tCS(ns2cycle(tCS_min
, nand_clk
)) |
471 NDTR0_tWH(ns2cycle(tWH_min
, nand_clk
)) |
472 NDTR0_tWP(ns2cycle(tWP_min
, nand_clk
)) |
473 NDTR0_tRH(ns2cycle(tREH_min
, nand_clk
)) |
474 NDTR0_tRP(ns2cycle(tRP_min
, nand_clk
));
476 ndtr1
= NDTR1_tR(ns2cycle(tR
, nand_clk
)) |
477 NDTR1_tWHR(ns2cycle(tWHR_min
, nand_clk
)) |
478 NDTR1_tAR(ns2cycle(tAR_min
, nand_clk
));
480 info
->ndtr0cs0
= ndtr0
;
481 info
->ndtr1cs0
= ndtr1
;
482 nand_writel(info
, NDTR0CS0
, ndtr0
);
483 nand_writel(info
, NDTR1CS0
, ndtr1
);
486 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host
*host
,
487 unsigned int *flash_width
,
488 unsigned int *dfc_width
)
490 struct nand_chip
*chip
= &host
->chip
;
491 struct pxa3xx_nand_info
*info
= host
->info_data
;
492 const struct pxa3xx_nand_flash
*f
= NULL
;
493 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
496 ntypes
= ARRAY_SIZE(builtin_flash_types
);
498 chip
->cmdfunc(mtd
, NAND_CMD_READID
, 0x00, -1);
500 id
= chip
->read_byte(mtd
);
501 id
|= chip
->read_byte(mtd
) << 0x8;
503 for (i
= 0; i
< ntypes
; i
++) {
504 f
= &builtin_flash_types
[i
];
506 if (f
->chip_id
== id
)
511 dev_err(&info
->pdev
->dev
, "Error: timings not found\n");
515 pxa3xx_nand_set_timing(host
, f
->timing
);
517 *flash_width
= f
->flash_width
;
518 *dfc_width
= f
->dfc_width
;
523 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host
*host
,
526 const struct nand_sdr_timings
*timings
;
528 mode
= fls(mode
) - 1;
532 timings
= onfi_async_timing_mode_to_sdr_timings(mode
);
534 return PTR_ERR(timings
);
536 pxa3xx_nand_set_sdr_timing(host
, timings
);
541 static int pxa3xx_nand_init(struct pxa3xx_nand_host
*host
)
543 struct nand_chip
*chip
= &host
->chip
;
544 struct pxa3xx_nand_info
*info
= host
->info_data
;
545 unsigned int flash_width
= 0, dfc_width
= 0;
548 mode
= onfi_get_async_timing_mode(chip
);
549 if (mode
== ONFI_TIMING_MODE_UNKNOWN
) {
550 err
= pxa3xx_nand_init_timings_compat(host
, &flash_width
,
555 if (flash_width
== 16) {
556 info
->reg_ndcr
|= NDCR_DWIDTH_M
;
557 chip
->options
|= NAND_BUSWIDTH_16
;
560 info
->reg_ndcr
|= (dfc_width
== 16) ? NDCR_DWIDTH_C
: 0;
562 err
= pxa3xx_nand_init_timings_onfi(host
, mode
);
571 * NOTE: it is a must to set ND_RUN firstly, then write
572 * command buffer, otherwise, it does not work.
573 * We enable all the interrupt at the same time, and
574 * let pxa3xx_nand_irq to handle all logic.
576 static void pxa3xx_nand_start(struct pxa3xx_nand_info
*info
)
580 ndcr
= info
->reg_ndcr
;
585 nand_writel(info
, NDECCCTRL
, 0x1);
587 ndcr
&= ~NDCR_ECC_EN
;
589 nand_writel(info
, NDECCCTRL
, 0x0);
595 ndcr
&= ~NDCR_DMA_EN
;
598 ndcr
|= NDCR_SPARE_EN
;
600 ndcr
&= ~NDCR_SPARE_EN
;
604 /* clear status bits and run */
605 nand_writel(info
, NDSR
, NDSR_MASK
);
606 nand_writel(info
, NDCR
, 0);
607 nand_writel(info
, NDCR
, ndcr
);
610 static void pxa3xx_nand_stop(struct pxa3xx_nand_info
*info
)
613 int timeout
= NAND_STOP_DELAY
;
615 /* wait RUN bit in NDCR become 0 */
616 ndcr
= nand_readl(info
, NDCR
);
617 while ((ndcr
& NDCR_ND_RUN
) && (timeout
-- > 0)) {
618 ndcr
= nand_readl(info
, NDCR
);
623 ndcr
&= ~NDCR_ND_RUN
;
624 nand_writel(info
, NDCR
, ndcr
);
627 dmaengine_terminate_all(info
->dma_chan
);
629 /* clear status bits */
630 nand_writel(info
, NDSR
, NDSR_MASK
);
633 static void __maybe_unused
634 enable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
638 ndcr
= nand_readl(info
, NDCR
);
639 nand_writel(info
, NDCR
, ndcr
& ~int_mask
);
642 static void disable_int(struct pxa3xx_nand_info
*info
, uint32_t int_mask
)
646 ndcr
= nand_readl(info
, NDCR
);
647 nand_writel(info
, NDCR
, ndcr
| int_mask
);
650 static void drain_fifo(struct pxa3xx_nand_info
*info
, void *data
, int len
)
657 * According to the datasheet, when reading from NDDB
658 * with BCH enabled, after each 32 bytes reads, we
659 * have to make sure that the NDSR.RDDREQ bit is set.
661 * Drain the FIFO 8 32 bits reads at a time, and skip
662 * the polling on the last read.
665 ioread32_rep(info
->mmio_base
+ NDDB
, data
, 8);
667 ret
= readl_relaxed_poll_timeout(info
->mmio_base
+ NDSR
, val
,
668 val
& NDSR_RDDREQ
, 1000, 5000);
670 dev_err(&info
->pdev
->dev
,
671 "Timeout on RDDREQ while draining the FIFO\n");
680 ioread32_rep(info
->mmio_base
+ NDDB
, data
, len
);
683 static void handle_data_pio(struct pxa3xx_nand_info
*info
)
685 switch (info
->state
) {
686 case STATE_PIO_WRITING
:
687 if (info
->step_chunk_size
)
688 writesl(info
->mmio_base
+ NDDB
,
689 info
->data_buff
+ info
->data_buff_pos
,
690 DIV_ROUND_UP(info
->step_chunk_size
, 4));
692 if (info
->step_spare_size
)
693 writesl(info
->mmio_base
+ NDDB
,
694 info
->oob_buff
+ info
->oob_buff_pos
,
695 DIV_ROUND_UP(info
->step_spare_size
, 4));
697 case STATE_PIO_READING
:
698 if (info
->step_chunk_size
)
700 info
->data_buff
+ info
->data_buff_pos
,
701 DIV_ROUND_UP(info
->step_chunk_size
, 4));
703 if (info
->step_spare_size
)
705 info
->oob_buff
+ info
->oob_buff_pos
,
706 DIV_ROUND_UP(info
->step_spare_size
, 4));
709 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
714 /* Update buffer pointers for multi-page read/write */
715 info
->data_buff_pos
+= info
->step_chunk_size
;
716 info
->oob_buff_pos
+= info
->step_spare_size
;
719 static void pxa3xx_nand_data_dma_irq(void *data
)
721 struct pxa3xx_nand_info
*info
= data
;
722 struct dma_tx_state state
;
723 enum dma_status status
;
725 status
= dmaengine_tx_status(info
->dma_chan
, info
->dma_cookie
, &state
);
726 if (likely(status
== DMA_COMPLETE
)) {
727 info
->state
= STATE_DMA_DONE
;
729 dev_err(&info
->pdev
->dev
, "DMA error on data channel\n");
730 info
->retcode
= ERR_DMABUSERR
;
732 dma_unmap_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
734 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
735 enable_int(info
, NDCR_INT_MASK
);
738 static void start_data_dma(struct pxa3xx_nand_info
*info
)
740 enum dma_transfer_direction direction
;
741 struct dma_async_tx_descriptor
*tx
;
743 switch (info
->state
) {
744 case STATE_DMA_WRITING
:
745 info
->dma_dir
= DMA_TO_DEVICE
;
746 direction
= DMA_MEM_TO_DEV
;
748 case STATE_DMA_READING
:
749 info
->dma_dir
= DMA_FROM_DEVICE
;
750 direction
= DMA_DEV_TO_MEM
;
753 dev_err(&info
->pdev
->dev
, "%s: invalid state %d\n", __func__
,
757 info
->sg
.length
= info
->chunk_size
;
759 info
->sg
.length
+= info
->spare_size
+ info
->ecc_size
;
760 dma_map_sg(info
->dma_chan
->device
->dev
, &info
->sg
, 1, info
->dma_dir
);
762 tx
= dmaengine_prep_slave_sg(info
->dma_chan
, &info
->sg
, 1, direction
,
765 dev_err(&info
->pdev
->dev
, "prep_slave_sg() failed\n");
768 tx
->callback
= pxa3xx_nand_data_dma_irq
;
769 tx
->callback_param
= info
;
770 info
->dma_cookie
= dmaengine_submit(tx
);
771 dma_async_issue_pending(info
->dma_chan
);
772 dev_dbg(&info
->pdev
->dev
, "%s(dir=%d cookie=%x size=%u)\n",
773 __func__
, direction
, info
->dma_cookie
, info
->sg
.length
);
776 static irqreturn_t
pxa3xx_nand_irq_thread(int irq
, void *data
)
778 struct pxa3xx_nand_info
*info
= data
;
780 handle_data_pio(info
);
782 info
->state
= STATE_CMD_DONE
;
783 nand_writel(info
, NDSR
, NDSR_WRDREQ
| NDSR_RDDREQ
);
788 static irqreturn_t
pxa3xx_nand_irq(int irq
, void *devid
)
790 struct pxa3xx_nand_info
*info
= devid
;
791 unsigned int status
, is_completed
= 0, is_ready
= 0;
792 unsigned int ready
, cmd_done
;
793 irqreturn_t ret
= IRQ_HANDLED
;
796 ready
= NDSR_FLASH_RDY
;
797 cmd_done
= NDSR_CS0_CMDD
;
800 cmd_done
= NDSR_CS1_CMDD
;
803 status
= nand_readl(info
, NDSR
);
805 if (status
& NDSR_UNCORERR
)
806 info
->retcode
= ERR_UNCORERR
;
807 if (status
& NDSR_CORERR
) {
808 info
->retcode
= ERR_CORERR
;
809 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
&&
811 info
->ecc_err_cnt
= NDSR_ERR_CNT(status
);
813 info
->ecc_err_cnt
= 1;
816 * Each chunk composing a page is corrected independently,
817 * and we need to store maximum number of corrected bitflips
818 * to return it to the MTD layer in ecc.read_page().
820 info
->max_bitflips
= max_t(unsigned int,
824 if (status
& (NDSR_RDDREQ
| NDSR_WRDREQ
)) {
825 /* whether use dma to transfer data */
827 disable_int(info
, NDCR_INT_MASK
);
828 info
->state
= (status
& NDSR_RDDREQ
) ?
829 STATE_DMA_READING
: STATE_DMA_WRITING
;
830 start_data_dma(info
);
831 goto NORMAL_IRQ_EXIT
;
833 info
->state
= (status
& NDSR_RDDREQ
) ?
834 STATE_PIO_READING
: STATE_PIO_WRITING
;
835 ret
= IRQ_WAKE_THREAD
;
836 goto NORMAL_IRQ_EXIT
;
839 if (status
& cmd_done
) {
840 info
->state
= STATE_CMD_DONE
;
843 if (status
& ready
) {
844 info
->state
= STATE_READY
;
849 * Clear all status bit before issuing the next command, which
850 * can and will alter the status bits and will deserve a new
851 * interrupt on its own. This lets the controller exit the IRQ
853 nand_writel(info
, NDSR
, status
);
855 if (status
& NDSR_WRCMDREQ
) {
856 status
&= ~NDSR_WRCMDREQ
;
857 info
->state
= STATE_CMD_HANDLE
;
860 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
861 * must be loaded by writing directly either 12 or 16
862 * bytes directly to NDCB0, four bytes at a time.
864 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
865 * but each NDCBx register can be read.
867 nand_writel(info
, NDCB0
, info
->ndcb0
);
868 nand_writel(info
, NDCB0
, info
->ndcb1
);
869 nand_writel(info
, NDCB0
, info
->ndcb2
);
871 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
872 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
873 nand_writel(info
, NDCB0
, info
->ndcb3
);
877 complete(&info
->cmd_complete
);
879 complete(&info
->dev_ready
);
884 static inline int is_buf_blank(uint8_t *buf
, size_t len
)
886 for (; len
> 0; len
--)
892 static void set_command_address(struct pxa3xx_nand_info
*info
,
893 unsigned int page_size
, uint16_t column
, int page_addr
)
895 /* small page addr setting */
896 if (page_size
< PAGE_CHUNK_SIZE
) {
897 info
->ndcb1
= ((page_addr
& 0xFFFFFF) << 8)
902 info
->ndcb1
= ((page_addr
& 0xFFFF) << 16)
905 if (page_addr
& 0xFF0000)
906 info
->ndcb2
= (page_addr
& 0xFF0000) >> 16;
912 static void prepare_start_command(struct pxa3xx_nand_info
*info
, int command
)
914 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
915 struct mtd_info
*mtd
= nand_to_mtd(&host
->chip
);
917 /* reset data and oob column point to handle data */
920 info
->data_buff_pos
= 0;
921 info
->oob_buff_pos
= 0;
922 info
->step_chunk_size
= 0;
923 info
->step_spare_size
= 0;
927 info
->retcode
= ERR_NONE
;
928 info
->ecc_err_cnt
= 0;
934 case NAND_CMD_PAGEPROG
:
947 * If we are about to issue a read command, or about to set
948 * the write address, then clean the data buffer.
950 if (command
== NAND_CMD_READ0
||
951 command
== NAND_CMD_READOOB
||
952 command
== NAND_CMD_SEQIN
) {
954 info
->buf_count
= mtd
->writesize
+ mtd
->oobsize
;
955 memset(info
->data_buff
, 0xFF, info
->buf_count
);
960 static int prepare_set_command(struct pxa3xx_nand_info
*info
, int command
,
961 int ext_cmd_type
, uint16_t column
, int page_addr
)
963 int addr_cycle
, exec_cmd
;
964 struct pxa3xx_nand_host
*host
;
965 struct mtd_info
*mtd
;
967 host
= info
->host
[info
->cs
];
968 mtd
= nand_to_mtd(&host
->chip
);
973 info
->ndcb0
= NDCB0_CSEL
;
977 if (command
== NAND_CMD_SEQIN
)
980 addr_cycle
= NDCB0_ADDR_CYC(host
->row_addr_cycles
981 + host
->col_addr_cycles
);
984 case NAND_CMD_READOOB
:
986 info
->buf_start
= column
;
987 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
991 if (command
== NAND_CMD_READOOB
)
992 info
->buf_start
+= mtd
->writesize
;
994 if (info
->cur_chunk
< info
->nfullchunks
) {
995 info
->step_chunk_size
= info
->chunk_size
;
996 info
->step_spare_size
= info
->spare_size
;
998 info
->step_chunk_size
= info
->last_chunk_size
;
999 info
->step_spare_size
= info
->last_spare_size
;
1003 * Multiple page read needs an 'extended command type' field,
1004 * which is either naked-read or last-read according to the
1007 if (mtd
->writesize
== PAGE_CHUNK_SIZE
) {
1008 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8);
1009 } else if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1010 info
->ndcb0
|= NDCB0_DBC
| (NAND_CMD_READSTART
<< 8)
1012 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1013 info
->ndcb3
= info
->step_chunk_size
+
1014 info
->step_spare_size
;
1017 set_command_address(info
, mtd
->writesize
, column
, page_addr
);
1020 case NAND_CMD_SEQIN
:
1022 info
->buf_start
= column
;
1023 set_command_address(info
, mtd
->writesize
, 0, page_addr
);
1026 * Multiple page programming needs to execute the initial
1027 * SEQIN command that sets the page address.
1029 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1030 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1031 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1038 case NAND_CMD_PAGEPROG
:
1039 if (is_buf_blank(info
->data_buff
,
1040 (mtd
->writesize
+ mtd
->oobsize
))) {
1045 if (info
->cur_chunk
< info
->nfullchunks
) {
1046 info
->step_chunk_size
= info
->chunk_size
;
1047 info
->step_spare_size
= info
->spare_size
;
1049 info
->step_chunk_size
= info
->last_chunk_size
;
1050 info
->step_spare_size
= info
->last_spare_size
;
1053 /* Second command setting for large pages */
1054 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1056 * Multiple page write uses the 'extended command'
1057 * field. This can be used to issue a command dispatch
1058 * or a naked-write depending on the current stage.
1060 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1062 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
);
1063 info
->ndcb3
= info
->step_chunk_size
+
1064 info
->step_spare_size
;
1067 * This is the command dispatch that completes a chunked
1068 * page program operation.
1070 if (info
->cur_chunk
== info
->ntotalchunks
) {
1071 info
->ndcb0
= NDCB0_CMD_TYPE(0x1)
1072 | NDCB0_EXT_CMD_TYPE(ext_cmd_type
)
1079 info
->ndcb0
|= NDCB0_CMD_TYPE(0x1)
1083 | (NAND_CMD_PAGEPROG
<< 8)
1089 case NAND_CMD_PARAM
:
1090 info
->buf_count
= INIT_BUFFER_SIZE
;
1091 info
->ndcb0
|= NDCB0_CMD_TYPE(0)
1095 info
->ndcb1
= (column
& 0xFF);
1096 info
->ndcb3
= INIT_BUFFER_SIZE
;
1097 info
->step_chunk_size
= INIT_BUFFER_SIZE
;
1100 case NAND_CMD_READID
:
1101 info
->buf_count
= READ_ID_BYTES
;
1102 info
->ndcb0
|= NDCB0_CMD_TYPE(3)
1105 info
->ndcb1
= (column
& 0xFF);
1107 info
->step_chunk_size
= 8;
1109 case NAND_CMD_STATUS
:
1110 info
->buf_count
= 1;
1111 info
->ndcb0
|= NDCB0_CMD_TYPE(4)
1115 info
->step_chunk_size
= 8;
1118 case NAND_CMD_ERASE1
:
1119 info
->ndcb0
|= NDCB0_CMD_TYPE(2)
1123 | (NAND_CMD_ERASE2
<< 8)
1125 info
->ndcb1
= page_addr
;
1129 case NAND_CMD_RESET
:
1130 info
->ndcb0
|= NDCB0_CMD_TYPE(5)
1135 case NAND_CMD_ERASE2
:
1141 dev_err(&info
->pdev
->dev
, "non-supported command %x\n",
1149 static void nand_cmdfunc(struct mtd_info
*mtd
, unsigned command
,
1150 int column
, int page_addr
)
1152 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1153 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1154 struct pxa3xx_nand_info
*info
= host
->info_data
;
1158 * if this is a x16 device ,then convert the input
1159 * "byte" address into a "word" address appropriate
1160 * for indexing a word-oriented device
1162 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1166 * There may be different NAND chip hooked to
1167 * different chip select, so check whether
1168 * chip select has been changed, if yes, reset the timing
1170 if (info
->cs
!= host
->cs
) {
1171 info
->cs
= host
->cs
;
1172 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1173 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1176 prepare_start_command(info
, command
);
1178 info
->state
= STATE_PREPARED
;
1179 exec_cmd
= prepare_set_command(info
, command
, 0, column
, page_addr
);
1182 init_completion(&info
->cmd_complete
);
1183 init_completion(&info
->dev_ready
);
1184 info
->need_wait
= 1;
1185 pxa3xx_nand_start(info
);
1187 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1188 CHIP_DELAY_TIMEOUT
)) {
1189 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1190 /* Stop State Machine for next command cycle */
1191 pxa3xx_nand_stop(info
);
1194 info
->state
= STATE_IDLE
;
1197 static void nand_cmdfunc_extended(struct mtd_info
*mtd
,
1198 const unsigned command
,
1199 int column
, int page_addr
)
1201 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1202 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1203 struct pxa3xx_nand_info
*info
= host
->info_data
;
1204 int exec_cmd
, ext_cmd_type
;
1207 * if this is a x16 device then convert the input
1208 * "byte" address into a "word" address appropriate
1209 * for indexing a word-oriented device
1211 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1215 * There may be different NAND chip hooked to
1216 * different chip select, so check whether
1217 * chip select has been changed, if yes, reset the timing
1219 if (info
->cs
!= host
->cs
) {
1220 info
->cs
= host
->cs
;
1221 nand_writel(info
, NDTR0CS0
, info
->ndtr0cs0
);
1222 nand_writel(info
, NDTR1CS0
, info
->ndtr1cs0
);
1225 /* Select the extended command for the first command */
1227 case NAND_CMD_READ0
:
1228 case NAND_CMD_READOOB
:
1229 ext_cmd_type
= EXT_CMD_TYPE_MONO
;
1231 case NAND_CMD_SEQIN
:
1232 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1234 case NAND_CMD_PAGEPROG
:
1235 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1242 prepare_start_command(info
, command
);
1245 * Prepare the "is ready" completion before starting a command
1246 * transaction sequence. If the command is not executed the
1247 * completion will be completed, see below.
1249 * We can do that inside the loop because the command variable
1250 * is invariant and thus so is the exec_cmd.
1252 info
->need_wait
= 1;
1253 init_completion(&info
->dev_ready
);
1255 info
->state
= STATE_PREPARED
;
1257 exec_cmd
= prepare_set_command(info
, command
, ext_cmd_type
,
1260 info
->need_wait
= 0;
1261 complete(&info
->dev_ready
);
1265 init_completion(&info
->cmd_complete
);
1266 pxa3xx_nand_start(info
);
1268 if (!wait_for_completion_timeout(&info
->cmd_complete
,
1269 CHIP_DELAY_TIMEOUT
)) {
1270 dev_err(&info
->pdev
->dev
, "Wait time out!!!\n");
1271 /* Stop State Machine for next command cycle */
1272 pxa3xx_nand_stop(info
);
1276 /* Only a few commands need several steps */
1277 if (command
!= NAND_CMD_PAGEPROG
&&
1278 command
!= NAND_CMD_READ0
&&
1279 command
!= NAND_CMD_READOOB
)
1284 /* Check if the sequence is complete */
1285 if (info
->cur_chunk
== info
->ntotalchunks
&& command
!= NAND_CMD_PAGEPROG
)
1289 * After a splitted program command sequence has issued
1290 * the command dispatch, the command sequence is complete.
1292 if (info
->cur_chunk
== (info
->ntotalchunks
+ 1) &&
1293 command
== NAND_CMD_PAGEPROG
&&
1294 ext_cmd_type
== EXT_CMD_TYPE_DISPATCH
)
1297 if (command
== NAND_CMD_READ0
|| command
== NAND_CMD_READOOB
) {
1298 /* Last read: issue a 'last naked read' */
1299 if (info
->cur_chunk
== info
->ntotalchunks
- 1)
1300 ext_cmd_type
= EXT_CMD_TYPE_LAST_RW
;
1302 ext_cmd_type
= EXT_CMD_TYPE_NAKED_RW
;
1305 * If a splitted program command has no more data to transfer,
1306 * the command dispatch must be issued to complete.
1308 } else if (command
== NAND_CMD_PAGEPROG
&&
1309 info
->cur_chunk
== info
->ntotalchunks
) {
1310 ext_cmd_type
= EXT_CMD_TYPE_DISPATCH
;
1314 info
->state
= STATE_IDLE
;
1317 static int pxa3xx_nand_write_page_hwecc(struct mtd_info
*mtd
,
1318 struct nand_chip
*chip
, const uint8_t *buf
, int oob_required
,
1321 chip
->write_buf(mtd
, buf
, mtd
->writesize
);
1322 chip
->write_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1327 static int pxa3xx_nand_read_page_hwecc(struct mtd_info
*mtd
,
1328 struct nand_chip
*chip
, uint8_t *buf
, int oob_required
,
1331 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1332 struct pxa3xx_nand_info
*info
= host
->info_data
;
1334 chip
->read_buf(mtd
, buf
, mtd
->writesize
);
1335 chip
->read_buf(mtd
, chip
->oob_poi
, mtd
->oobsize
);
1337 if (info
->retcode
== ERR_CORERR
&& info
->use_ecc
) {
1338 mtd
->ecc_stats
.corrected
+= info
->ecc_err_cnt
;
1340 } else if (info
->retcode
== ERR_UNCORERR
) {
1342 * for blank page (all 0xff), HW will calculate its ECC as
1343 * 0, which is different from the ECC information within
1344 * OOB, ignore such uncorrectable errors
1346 if (is_buf_blank(buf
, mtd
->writesize
))
1347 info
->retcode
= ERR_NONE
;
1349 mtd
->ecc_stats
.failed
++;
1352 return info
->max_bitflips
;
1355 static uint8_t pxa3xx_nand_read_byte(struct mtd_info
*mtd
)
1357 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1358 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1359 struct pxa3xx_nand_info
*info
= host
->info_data
;
1362 if (info
->buf_start
< info
->buf_count
)
1363 /* Has just send a new command? */
1364 retval
= info
->data_buff
[info
->buf_start
++];
1369 static u16
pxa3xx_nand_read_word(struct mtd_info
*mtd
)
1371 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1372 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1373 struct pxa3xx_nand_info
*info
= host
->info_data
;
1374 u16 retval
= 0xFFFF;
1376 if (!(info
->buf_start
& 0x01) && info
->buf_start
< info
->buf_count
) {
1377 retval
= *((u16
*)(info
->data_buff
+info
->buf_start
));
1378 info
->buf_start
+= 2;
1383 static void pxa3xx_nand_read_buf(struct mtd_info
*mtd
, uint8_t *buf
, int len
)
1385 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1386 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1387 struct pxa3xx_nand_info
*info
= host
->info_data
;
1388 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1390 memcpy(buf
, info
->data_buff
+ info
->buf_start
, real_len
);
1391 info
->buf_start
+= real_len
;
1394 static void pxa3xx_nand_write_buf(struct mtd_info
*mtd
,
1395 const uint8_t *buf
, int len
)
1397 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1398 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1399 struct pxa3xx_nand_info
*info
= host
->info_data
;
1400 int real_len
= min_t(size_t, len
, info
->buf_count
- info
->buf_start
);
1402 memcpy(info
->data_buff
+ info
->buf_start
, buf
, real_len
);
1403 info
->buf_start
+= real_len
;
1406 static void pxa3xx_nand_select_chip(struct mtd_info
*mtd
, int chip
)
1411 static int pxa3xx_nand_waitfunc(struct mtd_info
*mtd
, struct nand_chip
*this)
1413 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1414 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1415 struct pxa3xx_nand_info
*info
= host
->info_data
;
1417 if (info
->need_wait
) {
1418 info
->need_wait
= 0;
1419 if (!wait_for_completion_timeout(&info
->dev_ready
,
1420 CHIP_DELAY_TIMEOUT
)) {
1421 dev_err(&info
->pdev
->dev
, "Ready time out!!!\n");
1422 return NAND_STATUS_FAIL
;
1426 /* pxa3xx_nand_send_command has waited for command complete */
1427 if (this->state
== FL_WRITING
|| this->state
== FL_ERASING
) {
1428 if (info
->retcode
== ERR_NONE
)
1431 return NAND_STATUS_FAIL
;
1434 return NAND_STATUS_READY
;
1437 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info
*info
)
1439 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1440 struct platform_device
*pdev
= info
->pdev
;
1441 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1442 const struct nand_sdr_timings
*timings
;
1444 /* Configure default flash values */
1445 info
->chunk_size
= PAGE_CHUNK_SIZE
;
1446 info
->reg_ndcr
= 0x0; /* enable all interrupts */
1447 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1448 info
->reg_ndcr
|= NDCR_RD_ID_CNT(READ_ID_BYTES
);
1449 info
->reg_ndcr
|= NDCR_SPARE_EN
;
1451 /* use the common timing to make a try */
1452 timings
= onfi_async_timing_mode_to_sdr_timings(0);
1453 if (IS_ERR(timings
))
1454 return PTR_ERR(timings
);
1456 pxa3xx_nand_set_sdr_timing(host
, timings
);
1460 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info
*info
)
1462 struct pxa3xx_nand_host
*host
= info
->host
[info
->cs
];
1463 struct nand_chip
*chip
= &host
->chip
;
1464 struct mtd_info
*mtd
= nand_to_mtd(chip
);
1466 info
->reg_ndcr
|= (host
->col_addr_cycles
== 2) ? NDCR_RA_START
: 0;
1467 info
->reg_ndcr
|= (chip
->page_shift
== 6) ? NDCR_PG_PER_BLK
: 0;
1468 info
->reg_ndcr
|= (mtd
->writesize
== 2048) ? NDCR_PAGE_SZ
: 0;
1471 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info
*info
)
1473 struct platform_device
*pdev
= info
->pdev
;
1474 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1475 uint32_t ndcr
= nand_readl(info
, NDCR
);
1477 /* Set an initial chunk size */
1478 info
->chunk_size
= ndcr
& NDCR_PAGE_SZ
? 2048 : 512;
1479 info
->reg_ndcr
= ndcr
&
1480 ~(NDCR_INT_MASK
| NDCR_ND_ARB_EN
| NFCV1_NDCR_ARB_CNTL
);
1481 info
->reg_ndcr
|= (pdata
->enable_arbiter
) ? NDCR_ND_ARB_EN
: 0;
1482 info
->ndtr0cs0
= nand_readl(info
, NDTR0CS0
);
1483 info
->ndtr1cs0
= nand_readl(info
, NDTR1CS0
);
1486 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info
*info
)
1488 struct platform_device
*pdev
= info
->pdev
;
1489 struct dma_slave_config config
;
1490 dma_cap_mask_t mask
;
1491 struct pxad_param param
;
1494 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1495 if (info
->data_buff
== NULL
)
1500 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1504 sg_init_one(&info
->sg
, info
->data_buff
, info
->buf_size
);
1506 dma_cap_set(DMA_SLAVE
, mask
);
1507 param
.prio
= PXAD_PRIO_LOWEST
;
1508 param
.drcmr
= info
->drcmr_dat
;
1509 info
->dma_chan
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
1512 if (!info
->dma_chan
) {
1513 dev_err(&pdev
->dev
, "unable to request data dma channel\n");
1517 memset(&config
, 0, sizeof(config
));
1518 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1519 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1520 config
.src_addr
= info
->mmio_phys
+ NDDB
;
1521 config
.dst_addr
= info
->mmio_phys
+ NDDB
;
1522 config
.src_maxburst
= 32;
1523 config
.dst_maxburst
= 32;
1524 ret
= dmaengine_slave_config(info
->dma_chan
, &config
);
1526 dev_err(&info
->pdev
->dev
,
1527 "dma channel configuration failed: %d\n",
1533 * Now that DMA buffers are allocated we turn on
1534 * DMA proper for I/O operations.
1540 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info
*info
)
1542 if (info
->use_dma
) {
1543 dmaengine_terminate_all(info
->dma_chan
);
1544 dma_release_channel(info
->dma_chan
);
1546 kfree(info
->data_buff
);
1549 static int pxa_ecc_init(struct pxa3xx_nand_info
*info
,
1550 struct nand_ecc_ctrl
*ecc
,
1551 int strength
, int ecc_stepsize
, int page_size
)
1553 if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 2048) {
1554 info
->nfullchunks
= 1;
1555 info
->ntotalchunks
= 1;
1556 info
->chunk_size
= 2048;
1557 info
->spare_size
= 40;
1558 info
->ecc_size
= 24;
1559 ecc
->mode
= NAND_ECC_HW
;
1563 } else if (strength
== 1 && ecc_stepsize
== 512 && page_size
== 512) {
1564 info
->nfullchunks
= 1;
1565 info
->ntotalchunks
= 1;
1566 info
->chunk_size
= 512;
1567 info
->spare_size
= 8;
1569 ecc
->mode
= NAND_ECC_HW
;
1574 * Required ECC: 4-bit correction per 512 bytes
1575 * Select: 16-bit correction per 2048 bytes
1577 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 2048) {
1579 info
->nfullchunks
= 1;
1580 info
->ntotalchunks
= 1;
1581 info
->chunk_size
= 2048;
1582 info
->spare_size
= 32;
1583 info
->ecc_size
= 32;
1584 ecc
->mode
= NAND_ECC_HW
;
1585 ecc
->size
= info
->chunk_size
;
1586 ecc
->layout
= &ecc_layout_2KB_bch4bit
;
1589 } else if (strength
== 4 && ecc_stepsize
== 512 && page_size
== 4096) {
1591 info
->nfullchunks
= 2;
1592 info
->ntotalchunks
= 2;
1593 info
->chunk_size
= 2048;
1594 info
->spare_size
= 32;
1595 info
->ecc_size
= 32;
1596 ecc
->mode
= NAND_ECC_HW
;
1597 ecc
->size
= info
->chunk_size
;
1598 ecc
->layout
= &ecc_layout_4KB_bch4bit
;
1602 * Required ECC: 8-bit correction per 512 bytes
1603 * Select: 16-bit correction per 1024 bytes
1605 } else if (strength
== 8 && ecc_stepsize
== 512 && page_size
== 4096) {
1607 info
->nfullchunks
= 4;
1608 info
->ntotalchunks
= 5;
1609 info
->chunk_size
= 1024;
1610 info
->spare_size
= 0;
1611 info
->last_chunk_size
= 0;
1612 info
->last_spare_size
= 64;
1613 info
->ecc_size
= 32;
1614 ecc
->mode
= NAND_ECC_HW
;
1615 ecc
->size
= info
->chunk_size
;
1616 ecc
->layout
= &ecc_layout_4KB_bch8bit
;
1619 dev_err(&info
->pdev
->dev
,
1620 "ECC strength %d at page size %d is not supported\n",
1621 strength
, page_size
);
1625 dev_info(&info
->pdev
->dev
, "ECC strength %d, ECC step size %d\n",
1626 ecc
->strength
, ecc
->size
);
1630 static int pxa3xx_nand_scan(struct mtd_info
*mtd
)
1632 struct nand_chip
*chip
= mtd_to_nand(mtd
);
1633 struct pxa3xx_nand_host
*host
= nand_get_controller_data(chip
);
1634 struct pxa3xx_nand_info
*info
= host
->info_data
;
1635 struct platform_device
*pdev
= info
->pdev
;
1636 struct pxa3xx_nand_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1638 uint16_t ecc_strength
, ecc_step
;
1640 if (pdata
->keep_config
) {
1641 pxa3xx_nand_detect_config(info
);
1643 ret
= pxa3xx_nand_config_ident(info
);
1648 if (info
->reg_ndcr
& NDCR_DWIDTH_M
)
1649 chip
->options
|= NAND_BUSWIDTH_16
;
1651 /* Device detection must be done with ECC disabled */
1652 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
)
1653 nand_writel(info
, NDECCCTRL
, 0x0);
1655 if (nand_scan_ident(mtd
, 1, NULL
))
1658 if (!pdata
->keep_config
) {
1659 ret
= pxa3xx_nand_init(host
);
1661 dev_err(&info
->pdev
->dev
, "Failed to init nand: %d\n",
1667 if (pdata
->flash_bbt
) {
1669 * We'll use a bad block table stored in-flash and don't
1670 * allow writing the bad block marker to the flash.
1672 chip
->bbt_options
|= NAND_BBT_USE_FLASH
|
1673 NAND_BBT_NO_OOB_BBM
;
1674 chip
->bbt_td
= &bbt_main_descr
;
1675 chip
->bbt_md
= &bbt_mirror_descr
;
1679 * If the page size is bigger than the FIFO size, let's check
1680 * we are given the right variant and then switch to the extended
1681 * (aka splitted) command handling,
1683 if (mtd
->writesize
> PAGE_CHUNK_SIZE
) {
1684 if (info
->variant
== PXA3XX_NAND_VARIANT_ARMADA370
) {
1685 chip
->cmdfunc
= nand_cmdfunc_extended
;
1687 dev_err(&info
->pdev
->dev
,
1688 "unsupported page size on this variant\n");
1693 if (pdata
->ecc_strength
&& pdata
->ecc_step_size
) {
1694 ecc_strength
= pdata
->ecc_strength
;
1695 ecc_step
= pdata
->ecc_step_size
;
1697 ecc_strength
= chip
->ecc_strength_ds
;
1698 ecc_step
= chip
->ecc_step_ds
;
1701 /* Set default ECC strength requirements on non-ONFI devices */
1702 if (ecc_strength
< 1 && ecc_step
< 1) {
1707 ret
= pxa_ecc_init(info
, &chip
->ecc
, ecc_strength
,
1708 ecc_step
, mtd
->writesize
);
1712 /* calculate addressing information */
1713 if (mtd
->writesize
>= 2048)
1714 host
->col_addr_cycles
= 2;
1716 host
->col_addr_cycles
= 1;
1718 /* release the initial buffer */
1719 kfree(info
->data_buff
);
1721 /* allocate the real data + oob buffer */
1722 info
->buf_size
= mtd
->writesize
+ mtd
->oobsize
;
1723 ret
= pxa3xx_nand_init_buff(info
);
1726 info
->oob_buff
= info
->data_buff
+ mtd
->writesize
;
1728 if ((mtd
->size
>> chip
->page_shift
) > 65536)
1729 host
->row_addr_cycles
= 3;
1731 host
->row_addr_cycles
= 2;
1733 if (!pdata
->keep_config
)
1734 pxa3xx_nand_config_tail(info
);
1736 return nand_scan_tail(mtd
);
1739 static int alloc_nand_resource(struct platform_device
*pdev
)
1741 struct device_node
*np
= pdev
->dev
.of_node
;
1742 struct pxa3xx_nand_platform_data
*pdata
;
1743 struct pxa3xx_nand_info
*info
;
1744 struct pxa3xx_nand_host
*host
;
1745 struct nand_chip
*chip
= NULL
;
1746 struct mtd_info
*mtd
;
1750 pdata
= dev_get_platdata(&pdev
->dev
);
1751 if (pdata
->num_cs
<= 0)
1753 info
= devm_kzalloc(&pdev
->dev
,
1754 sizeof(*info
) + sizeof(*host
) * pdata
->num_cs
,
1760 info
->variant
= pxa3xx_nand_get_variant(pdev
);
1761 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1762 host
= (void *)&info
[1] + sizeof(*host
) * cs
;
1764 nand_set_controller_data(chip
, host
);
1765 mtd
= nand_to_mtd(chip
);
1766 info
->host
[cs
] = host
;
1768 host
->info_data
= info
;
1769 mtd
->dev
.parent
= &pdev
->dev
;
1770 /* FIXME: all chips use the same device tree partitions */
1771 nand_set_flash_node(chip
, np
);
1773 nand_set_controller_data(chip
, host
);
1774 chip
->ecc
.read_page
= pxa3xx_nand_read_page_hwecc
;
1775 chip
->ecc
.write_page
= pxa3xx_nand_write_page_hwecc
;
1776 chip
->controller
= &info
->controller
;
1777 chip
->waitfunc
= pxa3xx_nand_waitfunc
;
1778 chip
->select_chip
= pxa3xx_nand_select_chip
;
1779 chip
->read_word
= pxa3xx_nand_read_word
;
1780 chip
->read_byte
= pxa3xx_nand_read_byte
;
1781 chip
->read_buf
= pxa3xx_nand_read_buf
;
1782 chip
->write_buf
= pxa3xx_nand_write_buf
;
1783 chip
->options
|= NAND_NO_SUBPAGE_WRITE
;
1784 chip
->cmdfunc
= nand_cmdfunc
;
1787 spin_lock_init(&chip
->controller
->lock
);
1788 init_waitqueue_head(&chip
->controller
->wq
);
1789 info
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1790 if (IS_ERR(info
->clk
)) {
1791 dev_err(&pdev
->dev
, "failed to get nand clock\n");
1792 return PTR_ERR(info
->clk
);
1794 ret
= clk_prepare_enable(info
->clk
);
1799 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1802 "no resource defined for data DMA\n");
1804 goto fail_disable_clk
;
1806 info
->drcmr_dat
= r
->start
;
1808 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1811 "no resource defined for cmd DMA\n");
1813 goto fail_disable_clk
;
1815 info
->drcmr_cmd
= r
->start
;
1818 irq
= platform_get_irq(pdev
, 0);
1820 dev_err(&pdev
->dev
, "no IRQ resource defined\n");
1822 goto fail_disable_clk
;
1825 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1826 info
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, r
);
1827 if (IS_ERR(info
->mmio_base
)) {
1828 ret
= PTR_ERR(info
->mmio_base
);
1829 goto fail_disable_clk
;
1831 info
->mmio_phys
= r
->start
;
1833 /* Allocate a buffer to allow flash detection */
1834 info
->buf_size
= INIT_BUFFER_SIZE
;
1835 info
->data_buff
= kmalloc(info
->buf_size
, GFP_KERNEL
);
1836 if (info
->data_buff
== NULL
) {
1838 goto fail_disable_clk
;
1841 /* initialize all interrupts to be disabled */
1842 disable_int(info
, NDSR_MASK
);
1844 ret
= request_threaded_irq(irq
, pxa3xx_nand_irq
,
1845 pxa3xx_nand_irq_thread
, IRQF_ONESHOT
,
1848 dev_err(&pdev
->dev
, "failed to request IRQ\n");
1852 platform_set_drvdata(pdev
, info
);
1857 free_irq(irq
, info
);
1858 kfree(info
->data_buff
);
1860 clk_disable_unprepare(info
->clk
);
1864 static int pxa3xx_nand_remove(struct platform_device
*pdev
)
1866 struct pxa3xx_nand_info
*info
= platform_get_drvdata(pdev
);
1867 struct pxa3xx_nand_platform_data
*pdata
;
1873 pdata
= dev_get_platdata(&pdev
->dev
);
1875 irq
= platform_get_irq(pdev
, 0);
1877 free_irq(irq
, info
);
1878 pxa3xx_nand_free_buff(info
);
1881 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1882 * In order to prevent a lockup of the system bus, the DFI bus
1883 * arbitration is granted to SMC upon driver removal. This is done by
1884 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1885 * access to the bus anymore.
1887 nand_writel(info
, NDCR
,
1888 (nand_readl(info
, NDCR
) & ~NDCR_ND_ARB_EN
) |
1889 NFCV1_NDCR_ARB_CNTL
);
1890 clk_disable_unprepare(info
->clk
);
1892 for (cs
= 0; cs
< pdata
->num_cs
; cs
++)
1893 nand_release(nand_to_mtd(&info
->host
[cs
]->chip
));
1897 static int pxa3xx_nand_probe_dt(struct platform_device
*pdev
)
1899 struct pxa3xx_nand_platform_data
*pdata
;
1900 struct device_node
*np
= pdev
->dev
.of_node
;
1901 const struct of_device_id
*of_id
=
1902 of_match_device(pxa3xx_nand_dt_ids
, &pdev
->dev
);
1907 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1911 if (of_get_property(np
, "marvell,nand-enable-arbiter", NULL
))
1912 pdata
->enable_arbiter
= 1;
1913 if (of_get_property(np
, "marvell,nand-keep-config", NULL
))
1914 pdata
->keep_config
= 1;
1915 of_property_read_u32(np
, "num-cs", &pdata
->num_cs
);
1916 pdata
->flash_bbt
= of_get_nand_on_flash_bbt(np
);
1918 pdata
->ecc_strength
= of_get_nand_ecc_strength(np
);
1919 if (pdata
->ecc_strength
< 0)
1920 pdata
->ecc_strength
= 0;
1922 pdata
->ecc_step_size
= of_get_nand_ecc_step_size(np
);
1923 if (pdata
->ecc_step_size
< 0)
1924 pdata
->ecc_step_size
= 0;
1926 pdev
->dev
.platform_data
= pdata
;
1931 static int pxa3xx_nand_probe(struct platform_device
*pdev
)
1933 struct pxa3xx_nand_platform_data
*pdata
;
1934 struct pxa3xx_nand_info
*info
;
1935 int ret
, cs
, probe_success
, dma_available
;
1937 dma_available
= IS_ENABLED(CONFIG_ARM
) &&
1938 (IS_ENABLED(CONFIG_ARCH_PXA
) || IS_ENABLED(CONFIG_ARCH_MMP
));
1939 if (use_dma
&& !dma_available
) {
1941 dev_warn(&pdev
->dev
,
1942 "This platform can't do DMA on this device\n");
1945 ret
= pxa3xx_nand_probe_dt(pdev
);
1949 pdata
= dev_get_platdata(&pdev
->dev
);
1951 dev_err(&pdev
->dev
, "no platform data defined\n");
1955 ret
= alloc_nand_resource(pdev
);
1957 dev_err(&pdev
->dev
, "alloc nand resource failed\n");
1961 info
= platform_get_drvdata(pdev
);
1963 for (cs
= 0; cs
< pdata
->num_cs
; cs
++) {
1964 struct mtd_info
*mtd
= nand_to_mtd(&info
->host
[cs
]->chip
);
1967 * The mtd name matches the one used in 'mtdparts' kernel
1968 * parameter. This name cannot be changed or otherwise
1969 * user's mtd partitions configuration would get broken.
1971 mtd
->name
= "pxa3xx_nand-0";
1973 ret
= pxa3xx_nand_scan(mtd
);
1975 dev_warn(&pdev
->dev
, "failed to scan nand at cs %d\n",
1980 ret
= mtd_device_register(mtd
, pdata
->parts
[cs
],
1981 pdata
->nr_parts
[cs
]);
1986 if (!probe_success
) {
1987 pxa3xx_nand_remove(pdev
);
1995 static int pxa3xx_nand_suspend(struct device
*dev
)
1997 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
2000 dev_err(dev
, "driver busy, state = %d\n", info
->state
);
2004 clk_disable(info
->clk
);
2008 static int pxa3xx_nand_resume(struct device
*dev
)
2010 struct pxa3xx_nand_info
*info
= dev_get_drvdata(dev
);
2013 ret
= clk_enable(info
->clk
);
2017 /* We don't want to handle interrupt without calling mtd routine */
2018 disable_int(info
, NDCR_INT_MASK
);
2021 * Directly set the chip select to a invalid value,
2022 * then the driver would reset the timing according
2023 * to current chip select at the beginning of cmdfunc
2028 * As the spec says, the NDSR would be updated to 0x1800 when
2029 * doing the nand_clk disable/enable.
2030 * To prevent it damaging state machine of the driver, clear
2031 * all status before resume
2033 nand_writel(info
, NDSR
, NDSR_MASK
);
2038 #define pxa3xx_nand_suspend NULL
2039 #define pxa3xx_nand_resume NULL
2042 static const struct dev_pm_ops pxa3xx_nand_pm_ops
= {
2043 .suspend
= pxa3xx_nand_suspend
,
2044 .resume
= pxa3xx_nand_resume
,
2047 static struct platform_driver pxa3xx_nand_driver
= {
2049 .name
= "pxa3xx-nand",
2050 .of_match_table
= pxa3xx_nand_dt_ids
,
2051 .pm
= &pxa3xx_nand_pm_ops
,
2053 .probe
= pxa3xx_nand_probe
,
2054 .remove
= pxa3xx_nand_remove
,
2057 module_platform_driver(pxa3xx_nand_driver
);
2059 MODULE_LICENSE("GPL");
2060 MODULE_DESCRIPTION("PXA3xx NAND controller driver");