mtd: nand: pxa3xx_nand: show parent device in sysfs
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
776f265e 32#include <linux/of_mtd.h>
fe69af00 33
ce914e6b 34#if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
f4db2e3a
EG
35#define ARCH_HAS_DMA
36#endif
37
293b2da1 38#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 39
e5860c18
NMG
40#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 42#define PAGE_CHUNK_SIZE (2048)
fe69af00 43
62e8b851
EG
44/*
45 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 51 */
c1634097 52#define INIT_BUFFER_SIZE 2048
62e8b851 53
fe69af00 54/* registers and bit definitions */
55#define NDCR (0x00) /* Control register */
56#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58#define NDSR (0x14) /* Status Register */
59#define NDPCR (0x18) /* Page Count Register */
60#define NDBDR0 (0x1C) /* Bad Block Register 0 */
61#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 62#define NDECCCTRL (0x28) /* ECC control */
fe69af00 63#define NDDB (0x40) /* Data Buffer */
64#define NDCB0 (0x48) /* Command Buffer0 */
65#define NDCB1 (0x4C) /* Command Buffer1 */
66#define NDCB2 (0x50) /* Command Buffer2 */
67
68#define NDCR_SPARE_EN (0x1 << 31)
69#define NDCR_ECC_EN (0x1 << 30)
70#define NDCR_DMA_EN (0x1 << 29)
71#define NDCR_ND_RUN (0x1 << 28)
72#define NDCR_DWIDTH_C (0x1 << 27)
73#define NDCR_DWIDTH_M (0x1 << 26)
74#define NDCR_PAGE_SZ (0x1 << 24)
75#define NDCR_NCSX (0x1 << 23)
76#define NDCR_ND_MODE (0x3 << 21)
77#define NDCR_NAND_MODE (0x0)
78#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
79#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 81#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84#define NDCR_RA_START (0x1 << 15)
85#define NDCR_PG_PER_BLK (0x1 << 14)
86#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 87#define NDCR_INT_MASK (0xFFF)
fe69af00 88
89#define NDSR_MASK (0xfff)
87f5336e
EG
90#define NDSR_ERR_CNT_OFF (16)
91#define NDSR_ERR_CNT_MASK (0x1f)
92#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
93#define NDSR_RDY (0x1 << 12)
94#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 95#define NDSR_CS0_PAGED (0x1 << 10)
96#define NDSR_CS1_PAGED (0x1 << 9)
97#define NDSR_CS0_CMDD (0x1 << 8)
98#define NDSR_CS1_CMDD (0x1 << 7)
99#define NDSR_CS0_BBD (0x1 << 6)
100#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
101#define NDSR_UNCORERR (0x1 << 4)
102#define NDSR_CORERR (0x1 << 3)
fe69af00 103#define NDSR_WRDREQ (0x1 << 2)
104#define NDSR_RDDREQ (0x1 << 1)
105#define NDSR_WRCMDREQ (0x1)
106
41a63430 107#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 108#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 109#define NDCB0_AUTO_RS (0x1 << 25)
110#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
111#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 113#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115#define NDCB0_NC (0x1 << 20)
116#define NDCB0_DBC (0x1 << 19)
117#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119#define NDCB0_CMD2_MASK (0xff << 8)
120#define NDCB0_CMD1_MASK (0xff)
121#define NDCB0_ADDR_CYC_SHIFT (16)
122
70ed8523
EG
123#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125#define EXT_CMD_TYPE_READ 4 /* Read */
126#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127#define EXT_CMD_TYPE_FINAL 3 /* Final command */
128#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
130
b226eca2
EG
131/*
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 */
136#define READ_ID_BYTES 7
137
fe69af00 138/* macros for registers read/write */
139#define nand_writel(info, off, val) \
b7e46062 140 writel_relaxed((val), (info)->mmio_base + (off))
fe69af00 141
142#define nand_readl(info, off) \
b7e46062 143 readl_relaxed((info)->mmio_base + (off))
fe69af00 144
145/* error code and state */
146enum {
147 ERR_NONE = 0,
148 ERR_DMABUSERR = -1,
149 ERR_SENDCMD = -2,
87f5336e 150 ERR_UNCORERR = -3,
fe69af00 151 ERR_BBERR = -4,
87f5336e 152 ERR_CORERR = -5,
fe69af00 153};
154
155enum {
f8155a40 156 STATE_IDLE = 0,
d456882b 157 STATE_PREPARED,
fe69af00 158 STATE_CMD_HANDLE,
159 STATE_DMA_READING,
160 STATE_DMA_WRITING,
161 STATE_DMA_DONE,
162 STATE_PIO_READING,
163 STATE_PIO_WRITING,
f8155a40
LW
164 STATE_CMD_DONE,
165 STATE_READY,
fe69af00 166};
167
c0f3b864
EG
168enum pxa3xx_nand_variant {
169 PXA3XX_NAND_VARIANT_PXA,
170 PXA3XX_NAND_VARIANT_ARMADA370,
171};
172
d456882b
LW
173struct pxa3xx_nand_host {
174 struct nand_chip chip;
d456882b
LW
175 struct mtd_info *mtd;
176 void *info_data;
177
178 /* page size of attached chip */
d456882b 179 int use_ecc;
f3c8cfc2 180 int cs;
fe69af00 181
d456882b
LW
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles;
184 unsigned int row_addr_cycles;
d456882b
LW
185};
186
187struct pxa3xx_nand_info {
401e67e2 188 struct nand_hw_control controller;
fe69af00 189 struct platform_device *pdev;
fe69af00 190
191 struct clk *clk;
192 void __iomem *mmio_base;
8638fac8 193 unsigned long mmio_phys;
55d9fd6e 194 struct completion cmd_complete, dev_ready;
fe69af00 195
196 unsigned int buf_start;
197 unsigned int buf_count;
62e8b851 198 unsigned int buf_size;
fa543bef
EG
199 unsigned int data_buff_pos;
200 unsigned int oob_buff_pos;
fe69af00 201
202 /* DMA information */
8f5ba31a
RJ
203 struct scatterlist sg;
204 enum dma_data_direction dma_dir;
205 struct dma_chan *dma_chan;
206 dma_cookie_t dma_cookie;
fe69af00 207 int drcmr_dat;
208 int drcmr_cmd;
209
210 unsigned char *data_buff;
18c81b18 211 unsigned char *oob_buff;
fe69af00 212 dma_addr_t data_buff_phys;
fe69af00 213 int data_dma_ch;
fe69af00 214
f3c8cfc2 215 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 216 unsigned int state;
217
c0f3b864
EG
218 /*
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
221 */
222 enum pxa3xx_nand_variant variant;
223
f3c8cfc2 224 int cs;
fe69af00 225 int use_ecc; /* use HW ECC ? */
43bcfd2b 226 int ecc_bch; /* using BCH ECC? */
fe69af00 227 int use_dma; /* use DMA ? */
5bb653e8 228 int use_spare; /* use spare ? */
55d9fd6e 229 int need_wait;
fe69af00 230
2128b08c 231 unsigned int data_size; /* data to be read from FIFO */
70ed8523 232 unsigned int chunk_size; /* split commands chunk size */
d456882b 233 unsigned int oob_size;
43bcfd2b
EG
234 unsigned int spare_size;
235 unsigned int ecc_size;
87f5336e
EG
236 unsigned int ecc_err_cnt;
237 unsigned int max_bitflips;
fe69af00 238 int retcode;
fe69af00 239
48cf7efa
EG
240 /* cached register value */
241 uint32_t reg_ndcr;
242 uint32_t ndtr0cs0;
243 uint32_t ndtr1cs0;
244
fe69af00 245 /* generated NDCBx register values */
246 uint32_t ndcb0;
247 uint32_t ndcb1;
248 uint32_t ndcb2;
3a1a344a 249 uint32_t ndcb3;
fe69af00 250};
251
90ab5ee9 252static bool use_dma = 1;
fe69af00 253module_param(use_dma, bool, 0444);
25985edc 254MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 255
a9cadf72
EG
256struct pxa3xx_nand_timing {
257 unsigned int tCH; /* Enable signal hold time */
258 unsigned int tCS; /* Enable signal setup time */
259 unsigned int tWH; /* ND_nWE high duration */
260 unsigned int tWP; /* ND_nWE pulse time */
261 unsigned int tRH; /* ND_nRE high duration */
262 unsigned int tRP; /* ND_nRE pulse width */
263 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
266};
267
268struct pxa3xx_nand_flash {
269 char *name;
270 uint32_t chip_id;
271 unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
272 unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
273 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
274 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
275 unsigned int num_blocks; /* Number of physical blocks in Flash */
276
277 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
278};
279
c1f82478 280static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
285};
286
c1f82478 287static struct pxa3xx_nand_flash builtin_flash_types[] = {
4332c116
LW
288{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
289{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
290{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
291{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
292{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
293{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
294{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
295{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
296{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
d3490dfd
HZ
297};
298
776f265e
EG
299static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305 .offs = 8,
306 .len = 6,
307 .veroffs = 14,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
310};
311
312static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
320};
321
3db227b6
RG
322static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323 .eccbytes = 32,
324 .eccpos = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
330};
331
70ed8523
EG
332static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
333 .eccbytes = 64,
334 .eccpos = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree = { {6, 26}, { 64, 32} }
345};
346
347static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
348 .eccbytes = 128,
349 .eccpos = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
354 .oobfree = { }
355};
356
227a886c
LW
357/* Define a default flash type setting serve as flash detecting only */
358#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359
fe69af00 360#define NDTR0_tCH(c) (min((c), 7) << 19)
361#define NDTR0_tCS(c) (min((c), 7) << 16)
362#define NDTR0_tWH(c) (min((c), 7) << 11)
363#define NDTR0_tWP(c) (min((c), 7) << 8)
364#define NDTR0_tRH(c) (min((c), 7) << 3)
365#define NDTR0_tRP(c) (min((c), 7) << 0)
366
367#define NDTR1_tR(c) (min((c), 65535) << 16)
368#define NDTR1_tWHR(c) (min((c), 15) << 4)
369#define NDTR1_tAR(c) (min((c), 15) << 0)
370
371/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 372#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 373
17754ad6 374static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
375 {
376 .compatible = "marvell,pxa3xx-nand",
377 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
378 },
1963ff97
EG
379 {
380 .compatible = "marvell,armada370-nand",
381 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
382 },
c7e9c7e7
EG
383 {}
384};
385MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386
387static enum pxa3xx_nand_variant
388pxa3xx_nand_get_variant(struct platform_device *pdev)
389{
390 const struct of_device_id *of_id =
391 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392 if (!of_id)
393 return PXA3XX_NAND_VARIANT_PXA;
394 return (enum pxa3xx_nand_variant)of_id->data;
395}
396
d456882b 397static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 398 const struct pxa3xx_nand_timing *t)
fe69af00 399{
d456882b 400 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 401 unsigned long nand_clk = clk_get_rate(info->clk);
402 uint32_t ndtr0, ndtr1;
403
404 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
405 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
406 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
407 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
408 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
409 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410
411 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
412 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
413 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414
48cf7efa
EG
415 info->ndtr0cs0 = ndtr0;
416 info->ndtr1cs0 = ndtr1;
fe69af00 417 nand_writel(info, NDTR0CS0, ndtr0);
418 nand_writel(info, NDTR1CS0, ndtr1);
419}
420
6a3e4865
EG
421/*
422 * Set the data and OOB size, depending on the selected
423 * spare and ECC configuration.
424 * Only applicable to READ0, READOOB and PAGEPROG commands.
425 */
fa543bef
EG
426static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
427 struct mtd_info *mtd)
fe69af00 428{
48cf7efa 429 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 430
fa543bef 431 info->data_size = mtd->writesize;
43bcfd2b 432 if (!oob_enable)
9d8b1043 433 return;
9d8b1043 434
43bcfd2b
EG
435 info->oob_size = info->spare_size;
436 if (!info->use_ecc)
437 info->oob_size += info->ecc_size;
18c81b18
LW
438}
439
f8155a40
LW
440/**
441 * NOTE: it is a must to set ND_RUN firstly, then write
442 * command buffer, otherwise, it does not work.
443 * We enable all the interrupt at the same time, and
444 * let pxa3xx_nand_irq to handle all logic.
445 */
446static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
447{
448 uint32_t ndcr;
449
48cf7efa 450 ndcr = info->reg_ndcr;
cd9d1182 451
43bcfd2b 452 if (info->use_ecc) {
cd9d1182 453 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
454 if (info->ecc_bch)
455 nand_writel(info, NDECCCTRL, 0x1);
456 } else {
cd9d1182 457 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
458 if (info->ecc_bch)
459 nand_writel(info, NDECCCTRL, 0x0);
460 }
cd9d1182
EG
461
462 if (info->use_dma)
463 ndcr |= NDCR_DMA_EN;
464 else
465 ndcr &= ~NDCR_DMA_EN;
466
5bb653e8
EG
467 if (info->use_spare)
468 ndcr |= NDCR_SPARE_EN;
469 else
470 ndcr &= ~NDCR_SPARE_EN;
471
f8155a40
LW
472 ndcr |= NDCR_ND_RUN;
473
474 /* clear status bits and run */
f8155a40 475 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 476 nand_writel(info, NDCR, 0);
f8155a40
LW
477 nand_writel(info, NDCR, ndcr);
478}
479
480static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
481{
482 uint32_t ndcr;
483 int timeout = NAND_STOP_DELAY;
484
485 /* wait RUN bit in NDCR become 0 */
486 ndcr = nand_readl(info, NDCR);
487 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
488 ndcr = nand_readl(info, NDCR);
489 udelay(1);
490 }
491
492 if (timeout <= 0) {
493 ndcr &= ~NDCR_ND_RUN;
494 nand_writel(info, NDCR, ndcr);
495 }
8f5ba31a
RJ
496 if (info->dma_chan)
497 dmaengine_terminate_all(info->dma_chan);
498
f8155a40
LW
499 /* clear status bits */
500 nand_writel(info, NDSR, NDSR_MASK);
501}
502
57ff88f0
EG
503static void __maybe_unused
504enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 505{
506 uint32_t ndcr;
507
508 ndcr = nand_readl(info, NDCR);
509 nand_writel(info, NDCR, ndcr & ~int_mask);
510}
511
512static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
513{
514 uint32_t ndcr;
515
516 ndcr = nand_readl(info, NDCR);
517 nand_writel(info, NDCR, ndcr | int_mask);
518}
519
8dad0386
MR
520static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
521{
522 if (info->ecc_bch) {
afca11ec
MR
523 u32 val;
524 int ret;
8dad0386
MR
525
526 /*
527 * According to the datasheet, when reading from NDDB
528 * with BCH enabled, after each 32 bytes reads, we
529 * have to make sure that the NDSR.RDDREQ bit is set.
530 *
531 * Drain the FIFO 8 32 bits reads at a time, and skip
532 * the polling on the last read.
533 */
534 while (len > 8) {
ce914e6b 535 readsl(info->mmio_base + NDDB, data, 8);
8dad0386 536
afca11ec
MR
537 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
538 val & NDSR_RDDREQ, 1000, 5000);
539 if (ret) {
540 dev_err(&info->pdev->dev,
541 "Timeout on RDDREQ while draining the FIFO\n");
542 return;
8dad0386
MR
543 }
544
545 data += 32;
546 len -= 8;
547 }
548 }
549
ce914e6b 550 readsl(info->mmio_base + NDDB, data, len);
8dad0386
MR
551}
552
f8155a40 553static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 554{
70ed8523 555 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 556
fe69af00 557 switch (info->state) {
558 case STATE_PIO_WRITING:
ce914e6b
RH
559 writesl(info->mmio_base + NDDB,
560 info->data_buff + info->data_buff_pos,
561 DIV_ROUND_UP(do_bytes, 4));
fa543bef 562
9d8b1043 563 if (info->oob_size > 0)
ce914e6b
RH
564 writesl(info->mmio_base + NDDB,
565 info->oob_buff + info->oob_buff_pos,
566 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 567 break;
568 case STATE_PIO_READING:
8dad0386
MR
569 drain_fifo(info,
570 info->data_buff + info->data_buff_pos,
571 DIV_ROUND_UP(do_bytes, 4));
fa543bef 572
9d8b1043 573 if (info->oob_size > 0)
8dad0386
MR
574 drain_fifo(info,
575 info->oob_buff + info->oob_buff_pos,
576 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 577 break;
578 default:
da675b4e 579 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 580 info->state);
f8155a40 581 BUG();
fe69af00 582 }
fa543bef
EG
583
584 /* Update buffer pointers for multi-page read/write */
585 info->data_buff_pos += do_bytes;
586 info->oob_buff_pos += info->oob_size;
587 info->data_size -= do_bytes;
fe69af00 588}
589
8f5ba31a 590static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 591{
8f5ba31a
RJ
592 struct pxa3xx_nand_info *info = data;
593 struct dma_tx_state state;
594 enum dma_status status;
fe69af00 595
8f5ba31a
RJ
596 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
597 if (likely(status == DMA_COMPLETE)) {
598 info->state = STATE_DMA_DONE;
599 } else {
600 dev_err(&info->pdev->dev, "DMA error on data channel\n");
601 info->retcode = ERR_DMABUSERR;
602 }
603 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
604
605 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
606 enable_int(info, NDCR_INT_MASK);
607}
608
609static void start_data_dma(struct pxa3xx_nand_info *info)
610{
611 enum dma_transfer_direction direction;
612 struct dma_async_tx_descriptor *tx;
fe69af00 613
f8155a40
LW
614 switch (info->state) {
615 case STATE_DMA_WRITING:
8f5ba31a
RJ
616 info->dma_dir = DMA_TO_DEVICE;
617 direction = DMA_MEM_TO_DEV;
f8155a40
LW
618 break;
619 case STATE_DMA_READING:
8f5ba31a
RJ
620 info->dma_dir = DMA_FROM_DEVICE;
621 direction = DMA_DEV_TO_MEM;
f8155a40
LW
622 break;
623 default:
da675b4e 624 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
625 info->state);
626 BUG();
fe69af00 627 }
8f5ba31a
RJ
628 info->sg.length = info->data_size +
629 (info->oob_size ? info->spare_size + info->ecc_size : 0);
630 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
631
632 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
633 DMA_PREP_INTERRUPT);
634 if (!tx) {
635 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
636 return;
fe69af00 637 }
8f5ba31a
RJ
638 tx->callback = pxa3xx_nand_data_dma_irq;
639 tx->callback_param = info;
640 info->dma_cookie = dmaengine_submit(tx);
641 dma_async_issue_pending(info->dma_chan);
642 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
643 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 644}
645
24542257
RJ
646static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
647{
648 struct pxa3xx_nand_info *info = data;
649
650 handle_data_pio(info);
651
652 info->state = STATE_CMD_DONE;
653 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
654
655 return IRQ_HANDLED;
656}
657
fe69af00 658static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
659{
660 struct pxa3xx_nand_info *info = devid;
55d9fd6e 661 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 662 unsigned int ready, cmd_done;
24542257 663 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
664
665 if (info->cs == 0) {
666 ready = NDSR_FLASH_RDY;
667 cmd_done = NDSR_CS0_CMDD;
668 } else {
669 ready = NDSR_RDY;
670 cmd_done = NDSR_CS1_CMDD;
671 }
fe69af00 672
673 status = nand_readl(info, NDSR);
674
87f5336e
EG
675 if (status & NDSR_UNCORERR)
676 info->retcode = ERR_UNCORERR;
677 if (status & NDSR_CORERR) {
678 info->retcode = ERR_CORERR;
679 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
680 info->ecc_bch)
681 info->ecc_err_cnt = NDSR_ERR_CNT(status);
682 else
683 info->ecc_err_cnt = 1;
684
685 /*
686 * Each chunk composing a page is corrected independently,
687 * and we need to store maximum number of corrected bitflips
688 * to return it to the MTD layer in ecc.read_page().
689 */
690 info->max_bitflips = max_t(unsigned int,
691 info->max_bitflips,
692 info->ecc_err_cnt);
693 }
f8155a40
LW
694 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
695 /* whether use dma to transfer data */
fe69af00 696 if (info->use_dma) {
f8155a40
LW
697 disable_int(info, NDCR_INT_MASK);
698 info->state = (status & NDSR_RDDREQ) ?
699 STATE_DMA_READING : STATE_DMA_WRITING;
700 start_data_dma(info);
701 goto NORMAL_IRQ_EXIT;
fe69af00 702 } else {
f8155a40
LW
703 info->state = (status & NDSR_RDDREQ) ?
704 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
705 ret = IRQ_WAKE_THREAD;
706 goto NORMAL_IRQ_EXIT;
fe69af00 707 }
fe69af00 708 }
f3c8cfc2 709 if (status & cmd_done) {
f8155a40
LW
710 info->state = STATE_CMD_DONE;
711 is_completed = 1;
fe69af00 712 }
f3c8cfc2 713 if (status & ready) {
f8155a40 714 info->state = STATE_READY;
55d9fd6e 715 is_ready = 1;
401e67e2 716 }
fe69af00 717
21fc0ef9
RJ
718 /*
719 * Clear all status bit before issuing the next command, which
720 * can and will alter the status bits and will deserve a new
721 * interrupt on its own. This lets the controller exit the IRQ
722 */
723 nand_writel(info, NDSR, status);
724
f8155a40 725 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
726 status &= ~NDSR_WRCMDREQ;
727 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
728
729 /*
730 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
731 * must be loaded by writing directly either 12 or 16
732 * bytes directly to NDCB0, four bytes at a time.
733 *
734 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
735 * but each NDCBx register can be read.
736 */
f8155a40
LW
737 nand_writel(info, NDCB0, info->ndcb0);
738 nand_writel(info, NDCB0, info->ndcb1);
739 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
740
741 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
742 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
743 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 744 }
745
f8155a40
LW
746 if (is_completed)
747 complete(&info->cmd_complete);
55d9fd6e
EG
748 if (is_ready)
749 complete(&info->dev_ready);
f8155a40 750NORMAL_IRQ_EXIT:
24542257 751 return ret;
fe69af00 752}
753
fe69af00 754static inline int is_buf_blank(uint8_t *buf, size_t len)
755{
756 for (; len > 0; len--)
757 if (*buf++ != 0xff)
758 return 0;
759 return 1;
760}
761
86beebae
EG
762static void set_command_address(struct pxa3xx_nand_info *info,
763 unsigned int page_size, uint16_t column, int page_addr)
764{
765 /* small page addr setting */
766 if (page_size < PAGE_CHUNK_SIZE) {
767 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
768 | (column & 0xFF);
769
770 info->ndcb2 = 0;
771 } else {
772 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
773 | (column & 0xFFFF);
774
775 if (page_addr & 0xFF0000)
776 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
777 else
778 info->ndcb2 = 0;
779 }
780}
781
c39ff03a 782static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 783{
39f83d15
EG
784 struct pxa3xx_nand_host *host = info->host[info->cs];
785 struct mtd_info *mtd = host->mtd;
786
4eb2da89 787 /* reset data and oob column point to handle data */
401e67e2
LW
788 info->buf_start = 0;
789 info->buf_count = 0;
4eb2da89 790 info->oob_size = 0;
fa543bef
EG
791 info->data_buff_pos = 0;
792 info->oob_buff_pos = 0;
4eb2da89 793 info->use_ecc = 0;
5bb653e8 794 info->use_spare = 1;
4eb2da89 795 info->retcode = ERR_NONE;
87f5336e 796 info->ecc_err_cnt = 0;
f0e6a32e 797 info->ndcb3 = 0;
d20d0a6c 798 info->need_wait = 0;
fe69af00 799
800 switch (command) {
4eb2da89
LW
801 case NAND_CMD_READ0:
802 case NAND_CMD_PAGEPROG:
803 info->use_ecc = 1;
fe69af00 804 case NAND_CMD_READOOB:
fa543bef 805 pxa3xx_set_datasize(info, mtd);
fe69af00 806 break;
41a63430
EG
807 case NAND_CMD_PARAM:
808 info->use_spare = 0;
809 break;
4eb2da89
LW
810 default:
811 info->ndcb1 = 0;
812 info->ndcb2 = 0;
813 break;
814 }
39f83d15
EG
815
816 /*
817 * If we are about to issue a read command, or about to set
818 * the write address, then clean the data buffer.
819 */
820 if (command == NAND_CMD_READ0 ||
821 command == NAND_CMD_READOOB ||
822 command == NAND_CMD_SEQIN) {
823
824 info->buf_count = mtd->writesize + mtd->oobsize;
825 memset(info->data_buff, 0xFF, info->buf_count);
826 }
827
c39ff03a
EG
828}
829
830static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 831 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
832{
833 int addr_cycle, exec_cmd;
834 struct pxa3xx_nand_host *host;
835 struct mtd_info *mtd;
836
837 host = info->host[info->cs];
838 mtd = host->mtd;
839 addr_cycle = 0;
840 exec_cmd = 1;
841
842 if (info->cs != 0)
843 info->ndcb0 = NDCB0_CSEL;
844 else
845 info->ndcb0 = 0;
846
847 if (command == NAND_CMD_SEQIN)
848 exec_cmd = 0;
4eb2da89 849
d456882b
LW
850 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
851 + host->col_addr_cycles);
fe69af00 852
4eb2da89
LW
853 switch (command) {
854 case NAND_CMD_READOOB:
fe69af00 855 case NAND_CMD_READ0:
ec82135a
EG
856 info->buf_start = column;
857 info->ndcb0 |= NDCB0_CMD_TYPE(0)
858 | addr_cycle
859 | NAND_CMD_READ0;
860
4eb2da89 861 if (command == NAND_CMD_READOOB)
ec82135a 862 info->buf_start += mtd->writesize;
4eb2da89 863
70ed8523
EG
864 /*
865 * Multiple page read needs an 'extended command type' field,
866 * which is either naked-read or last-read according to the
867 * state.
868 */
869 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 870 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
871 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
872 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
873 | NDCB0_LEN_OVRD
874 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
875 info->ndcb3 = info->chunk_size +
876 info->oob_size;
877 }
fe69af00 878
01d9947e 879 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
880 break;
881
fe69af00 882 case NAND_CMD_SEQIN:
4eb2da89 883
e7f9a6a4
EG
884 info->buf_start = column;
885 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
886
887 /*
888 * Multiple page programming needs to execute the initial
889 * SEQIN command that sets the page address.
890 */
891 if (mtd->writesize > PAGE_CHUNK_SIZE) {
892 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
893 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
894 | addr_cycle
895 | command;
896 /* No data transfer in this case */
897 info->data_size = 0;
898 exec_cmd = 1;
899 }
fe69af00 900 break;
4eb2da89 901
fe69af00 902 case NAND_CMD_PAGEPROG:
4eb2da89
LW
903 if (is_buf_blank(info->data_buff,
904 (mtd->writesize + mtd->oobsize))) {
905 exec_cmd = 0;
906 break;
907 }
fe69af00 908
535cb57a
EG
909 /* Second command setting for large pages */
910 if (mtd->writesize > PAGE_CHUNK_SIZE) {
911 /*
912 * Multiple page write uses the 'extended command'
913 * field. This can be used to issue a command dispatch
914 * or a naked-write depending on the current stage.
915 */
916 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917 | NDCB0_LEN_OVRD
918 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
919 info->ndcb3 = info->chunk_size +
920 info->oob_size;
921
922 /*
923 * This is the command dispatch that completes a chunked
924 * page program operation.
925 */
926 if (info->data_size == 0) {
927 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
928 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
929 | command;
930 info->ndcb1 = 0;
931 info->ndcb2 = 0;
932 info->ndcb3 = 0;
933 }
934 } else {
935 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
936 | NDCB0_AUTO_RS
937 | NDCB0_ST_ROW_EN
938 | NDCB0_DBC
939 | (NAND_CMD_PAGEPROG << 8)
940 | NAND_CMD_SEQIN
941 | addr_cycle;
942 }
fe69af00 943 break;
4eb2da89 944
ce0268f6 945 case NAND_CMD_PARAM:
c1634097 946 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
947 info->ndcb0 |= NDCB0_CMD_TYPE(0)
948 | NDCB0_ADDR_CYC(1)
41a63430 949 | NDCB0_LEN_OVRD
ec82135a 950 | command;
ce0268f6 951 info->ndcb1 = (column & 0xFF);
c1634097
EG
952 info->ndcb3 = INIT_BUFFER_SIZE;
953 info->data_size = INIT_BUFFER_SIZE;
ce0268f6
EG
954 break;
955
fe69af00 956 case NAND_CMD_READID:
b226eca2 957 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
958 info->ndcb0 |= NDCB0_CMD_TYPE(3)
959 | NDCB0_ADDR_CYC(1)
ec82135a 960 | command;
d14231f1 961 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
962
963 info->data_size = 8;
964 break;
fe69af00 965 case NAND_CMD_STATUS:
4eb2da89
LW
966 info->buf_count = 1;
967 info->ndcb0 |= NDCB0_CMD_TYPE(4)
968 | NDCB0_ADDR_CYC(1)
ec82135a 969 | command;
4eb2da89
LW
970
971 info->data_size = 8;
972 break;
973
974 case NAND_CMD_ERASE1:
4eb2da89
LW
975 info->ndcb0 |= NDCB0_CMD_TYPE(2)
976 | NDCB0_AUTO_RS
977 | NDCB0_ADDR_CYC(3)
978 | NDCB0_DBC
ec82135a
EG
979 | (NAND_CMD_ERASE2 << 8)
980 | NAND_CMD_ERASE1;
4eb2da89
LW
981 info->ndcb1 = page_addr;
982 info->ndcb2 = 0;
983
fe69af00 984 break;
985 case NAND_CMD_RESET:
4eb2da89 986 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 987 | command;
4eb2da89
LW
988
989 break;
990
991 case NAND_CMD_ERASE2:
992 exec_cmd = 0;
fe69af00 993 break;
4eb2da89 994
fe69af00 995 default:
4eb2da89 996 exec_cmd = 0;
da675b4e
LW
997 dev_err(&info->pdev->dev, "non-supported command %x\n",
998 command);
fe69af00 999 break;
1000 }
1001
4eb2da89
LW
1002 return exec_cmd;
1003}
1004
5cbbdc6a
EG
1005static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1006 int column, int page_addr)
4eb2da89 1007{
d456882b
LW
1008 struct pxa3xx_nand_host *host = mtd->priv;
1009 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1010 int exec_cmd;
4eb2da89
LW
1011
1012 /*
1013 * if this is a x16 device ,then convert the input
1014 * "byte" address into a "word" address appropriate
1015 * for indexing a word-oriented device
1016 */
48cf7efa 1017 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1018 column /= 2;
1019
f3c8cfc2
LW
1020 /*
1021 * There may be different NAND chip hooked to
1022 * different chip select, so check whether
1023 * chip select has been changed, if yes, reset the timing
1024 */
1025 if (info->cs != host->cs) {
1026 info->cs = host->cs;
48cf7efa
EG
1027 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1028 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1029 }
1030
c39ff03a
EG
1031 prepare_start_command(info, command);
1032
d456882b 1033 info->state = STATE_PREPARED;
70ed8523
EG
1034 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1035
f8155a40
LW
1036 if (exec_cmd) {
1037 init_completion(&info->cmd_complete);
55d9fd6e
EG
1038 init_completion(&info->dev_ready);
1039 info->need_wait = 1;
f8155a40
LW
1040 pxa3xx_nand_start(info);
1041
e5860c18
NMG
1042 if (!wait_for_completion_timeout(&info->cmd_complete,
1043 CHIP_DELAY_TIMEOUT)) {
da675b4e 1044 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1045 /* Stop State Machine for next command cycle */
1046 pxa3xx_nand_stop(info);
1047 }
f8155a40 1048 }
d456882b 1049 info->state = STATE_IDLE;
f8155a40
LW
1050}
1051
5cbbdc6a
EG
1052static void nand_cmdfunc_extended(struct mtd_info *mtd,
1053 const unsigned command,
1054 int column, int page_addr)
70ed8523
EG
1055{
1056 struct pxa3xx_nand_host *host = mtd->priv;
1057 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1058 int exec_cmd, ext_cmd_type;
70ed8523
EG
1059
1060 /*
1061 * if this is a x16 device then convert the input
1062 * "byte" address into a "word" address appropriate
1063 * for indexing a word-oriented device
1064 */
1065 if (info->reg_ndcr & NDCR_DWIDTH_M)
1066 column /= 2;
1067
1068 /*
1069 * There may be different NAND chip hooked to
1070 * different chip select, so check whether
1071 * chip select has been changed, if yes, reset the timing
1072 */
1073 if (info->cs != host->cs) {
1074 info->cs = host->cs;
1075 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1076 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1077 }
1078
1079 /* Select the extended command for the first command */
1080 switch (command) {
1081 case NAND_CMD_READ0:
1082 case NAND_CMD_READOOB:
1083 ext_cmd_type = EXT_CMD_TYPE_MONO;
1084 break;
535cb57a
EG
1085 case NAND_CMD_SEQIN:
1086 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1087 break;
1088 case NAND_CMD_PAGEPROG:
1089 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1090 break;
70ed8523
EG
1091 default:
1092 ext_cmd_type = 0;
535cb57a 1093 break;
70ed8523
EG
1094 }
1095
1096 prepare_start_command(info, command);
1097
1098 /*
1099 * Prepare the "is ready" completion before starting a command
1100 * transaction sequence. If the command is not executed the
1101 * completion will be completed, see below.
1102 *
1103 * We can do that inside the loop because the command variable
1104 * is invariant and thus so is the exec_cmd.
1105 */
1106 info->need_wait = 1;
1107 init_completion(&info->dev_ready);
1108 do {
1109 info->state = STATE_PREPARED;
1110 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1111 column, page_addr);
1112 if (!exec_cmd) {
1113 info->need_wait = 0;
1114 complete(&info->dev_ready);
1115 break;
1116 }
1117
1118 init_completion(&info->cmd_complete);
1119 pxa3xx_nand_start(info);
1120
e5860c18
NMG
1121 if (!wait_for_completion_timeout(&info->cmd_complete,
1122 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1123 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1124 /* Stop State Machine for next command cycle */
1125 pxa3xx_nand_stop(info);
1126 break;
1127 }
1128
1129 /* Check if the sequence is complete */
535cb57a
EG
1130 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1131 break;
1132
1133 /*
1134 * After a splitted program command sequence has issued
1135 * the command dispatch, the command sequence is complete.
1136 */
1137 if (info->data_size == 0 &&
1138 command == NAND_CMD_PAGEPROG &&
1139 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1140 break;
1141
1142 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1143 /* Last read: issue a 'last naked read' */
1144 if (info->data_size == info->chunk_size)
1145 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1146 else
1147 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1148
1149 /*
1150 * If a splitted program command has no more data to transfer,
1151 * the command dispatch must be issued to complete.
1152 */
1153 } else if (command == NAND_CMD_PAGEPROG &&
1154 info->data_size == 0) {
1155 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1156 }
1157 } while (1);
1158
1159 info->state = STATE_IDLE;
1160}
1161
fdbad98d 1162static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1fbb938d 1163 struct nand_chip *chip, const uint8_t *buf, int oob_required)
f8155a40
LW
1164{
1165 chip->write_buf(mtd, buf, mtd->writesize);
1166 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1167
1168 return 0;
f8155a40
LW
1169}
1170
1171static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1172 struct nand_chip *chip, uint8_t *buf, int oob_required,
1173 int page)
f8155a40 1174{
d456882b
LW
1175 struct pxa3xx_nand_host *host = mtd->priv;
1176 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1177
1178 chip->read_buf(mtd, buf, mtd->writesize);
1179 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1180
87f5336e
EG
1181 if (info->retcode == ERR_CORERR && info->use_ecc) {
1182 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1183
1184 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1185 /*
1186 * for blank page (all 0xff), HW will calculate its ECC as
1187 * 0, which is different from the ECC information within
87f5336e 1188 * OOB, ignore such uncorrectable errors
f8155a40
LW
1189 */
1190 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1191 info->retcode = ERR_NONE;
1192 else
f8155a40 1193 mtd->ecc_stats.failed++;
fe69af00 1194 }
f8155a40 1195
87f5336e 1196 return info->max_bitflips;
fe69af00 1197}
1198
1199static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1200{
d456882b
LW
1201 struct pxa3xx_nand_host *host = mtd->priv;
1202 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1203 char retval = 0xFF;
1204
1205 if (info->buf_start < info->buf_count)
1206 /* Has just send a new command? */
1207 retval = info->data_buff[info->buf_start++];
1208
1209 return retval;
1210}
1211
1212static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1213{
d456882b
LW
1214 struct pxa3xx_nand_host *host = mtd->priv;
1215 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1216 u16 retval = 0xFFFF;
1217
1218 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1219 retval = *((u16 *)(info->data_buff+info->buf_start));
1220 info->buf_start += 2;
1221 }
1222 return retval;
1223}
1224
1225static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1226{
d456882b
LW
1227 struct pxa3xx_nand_host *host = mtd->priv;
1228 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1229 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1230
1231 memcpy(buf, info->data_buff + info->buf_start, real_len);
1232 info->buf_start += real_len;
1233}
1234
1235static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1236 const uint8_t *buf, int len)
1237{
d456882b
LW
1238 struct pxa3xx_nand_host *host = mtd->priv;
1239 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1240 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1241
1242 memcpy(info->data_buff + info->buf_start, buf, real_len);
1243 info->buf_start += real_len;
1244}
1245
fe69af00 1246static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1247{
1248 return;
1249}
1250
1251static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1252{
d456882b
LW
1253 struct pxa3xx_nand_host *host = mtd->priv;
1254 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1255
1256 if (info->need_wait) {
55d9fd6e 1257 info->need_wait = 0;
e5860c18
NMG
1258 if (!wait_for_completion_timeout(&info->dev_ready,
1259 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1260 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1261 return NAND_STATUS_FAIL;
1262 }
1263 }
fe69af00 1264
1265 /* pxa3xx_nand_send_command has waited for command complete */
1266 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1267 if (info->retcode == ERR_NONE)
1268 return 0;
55d9fd6e
EG
1269 else
1270 return NAND_STATUS_FAIL;
fe69af00 1271 }
1272
55d9fd6e 1273 return NAND_STATUS_READY;
fe69af00 1274}
1275
fe69af00 1276static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
c8c17c88 1277 const struct pxa3xx_nand_flash *f)
fe69af00 1278{
1279 struct platform_device *pdev = info->pdev;
453810b7 1280 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f3c8cfc2 1281 struct pxa3xx_nand_host *host = info->host[info->cs];
f8155a40 1282 uint32_t ndcr = 0x0; /* enable all interrupts */
fe69af00 1283
da675b4e
LW
1284 if (f->page_size != 2048 && f->page_size != 512) {
1285 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
fe69af00 1286 return -EINVAL;
da675b4e 1287 }
fe69af00 1288
da675b4e
LW
1289 if (f->flash_width != 16 && f->flash_width != 8) {
1290 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
fe69af00 1291 return -EINVAL;
da675b4e 1292 }
fe69af00 1293
fe69af00 1294 /* calculate addressing information */
d456882b 1295 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
fe69af00 1296
1297 if (f->num_blocks * f->page_per_block > 65536)
d456882b 1298 host->row_addr_cycles = 3;
fe69af00 1299 else
d456882b 1300 host->row_addr_cycles = 2;
fe69af00 1301
1302 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
d456882b 1303 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
fe69af00 1304 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1305 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1306 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1307 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1308
b226eca2 1309 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
fe69af00 1310 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1311
48cf7efa 1312 info->reg_ndcr = ndcr;
fe69af00 1313
d456882b 1314 pxa3xx_nand_set_timing(host, f->timing);
fe69af00 1315 return 0;
1316}
1317
f271049e
MR
1318static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1319{
1320 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1321
70ed8523 1322 /* Set an initial chunk size */
b226eca2 1323 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1324 info->reg_ndcr = ndcr &
1325 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
48cf7efa
EG
1326 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1327 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1328 return 0;
1329}
1330
fe69af00 1331static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1332{
1333 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1334 struct dma_slave_config config;
1335 dma_cap_mask_t mask;
1336 struct pxad_param param;
1337 int ret;
fe69af00 1338
8f5ba31a
RJ
1339 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1340 if (info->data_buff == NULL)
1341 return -ENOMEM;
1342 if (use_dma == 0)
fe69af00 1343 return 0;
fe69af00 1344
8f5ba31a
RJ
1345 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1346 if (ret)
1347 return ret;
fe69af00 1348
8f5ba31a
RJ
1349 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1350 dma_cap_zero(mask);
1351 dma_cap_set(DMA_SLAVE, mask);
1352 param.prio = PXAD_PRIO_LOWEST;
1353 param.drcmr = info->drcmr_dat;
1354 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1355 &param, &pdev->dev,
1356 "data");
1357 if (!info->dma_chan) {
1358 dev_err(&pdev->dev, "unable to request data dma channel\n");
1359 return -ENODEV;
1360 }
fe69af00 1361
8f5ba31a
RJ
1362 memset(&config, 0, sizeof(config));
1363 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1364 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1365 config.src_addr = info->mmio_phys + NDDB;
1366 config.dst_addr = info->mmio_phys + NDDB;
1367 config.src_maxburst = 32;
1368 config.dst_maxburst = 32;
1369 ret = dmaengine_slave_config(info->dma_chan, &config);
1370 if (ret < 0) {
1371 dev_err(&info->pdev->dev,
1372 "dma channel configuration failed: %d\n",
1373 ret);
1374 return ret;
fe69af00 1375 }
1376
95b26563
EG
1377 /*
1378 * Now that DMA buffers are allocated we turn on
1379 * DMA proper for I/O operations.
1380 */
1381 info->use_dma = 1;
fe69af00 1382 return 0;
1383}
1384
498b6145
EG
1385static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1386{
15b540c7 1387 if (info->use_dma) {
8f5ba31a
RJ
1388 dmaengine_terminate_all(info->dma_chan);
1389 dma_release_channel(info->dma_chan);
498b6145 1390 }
f4db2e3a
EG
1391 kfree(info->data_buff);
1392}
498b6145 1393
401e67e2
LW
1394static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1395{
f3c8cfc2 1396 struct mtd_info *mtd;
2d79ab16 1397 struct nand_chip *chip;
d456882b 1398 int ret;
2d79ab16 1399
f3c8cfc2 1400 mtd = info->host[info->cs]->mtd;
2d79ab16
EG
1401 chip = mtd->priv;
1402
401e67e2 1403 /* use the common timing to make a try */
d456882b
LW
1404 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1405 if (ret)
1406 return ret;
1407
2d79ab16 1408 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
56704d85
EG
1409 ret = chip->waitfunc(mtd, chip);
1410 if (ret & NAND_STATUS_FAIL)
1411 return -ENODEV;
d456882b 1412
56704d85 1413 return 0;
401e67e2 1414}
fe69af00 1415
43bcfd2b
EG
1416static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1417 struct nand_ecc_ctrl *ecc,
30b2afc8 1418 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1419{
30b2afc8 1420 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1421 info->chunk_size = 2048;
43bcfd2b
EG
1422 info->spare_size = 40;
1423 info->ecc_size = 24;
1424 ecc->mode = NAND_ECC_HW;
1425 ecc->size = 512;
1426 ecc->strength = 1;
43bcfd2b 1427
30b2afc8 1428 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1429 info->chunk_size = 512;
43bcfd2b
EG
1430 info->spare_size = 8;
1431 info->ecc_size = 8;
1432 ecc->mode = NAND_ECC_HW;
1433 ecc->size = 512;
1434 ecc->strength = 1;
43bcfd2b 1435
6033a949
BN
1436 /*
1437 * Required ECC: 4-bit correction per 512 bytes
1438 * Select: 16-bit correction per 2048 bytes
1439 */
3db227b6
RG
1440 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1441 info->ecc_bch = 1;
1442 info->chunk_size = 2048;
1443 info->spare_size = 32;
1444 info->ecc_size = 32;
1445 ecc->mode = NAND_ECC_HW;
1446 ecc->size = info->chunk_size;
1447 ecc->layout = &ecc_layout_2KB_bch4bit;
1448 ecc->strength = 16;
3db227b6 1449
30b2afc8 1450 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1451 info->ecc_bch = 1;
1452 info->chunk_size = 2048;
1453 info->spare_size = 32;
1454 info->ecc_size = 32;
1455 ecc->mode = NAND_ECC_HW;
1456 ecc->size = info->chunk_size;
1457 ecc->layout = &ecc_layout_4KB_bch4bit;
1458 ecc->strength = 16;
70ed8523 1459
6033a949
BN
1460 /*
1461 * Required ECC: 8-bit correction per 512 bytes
1462 * Select: 16-bit correction per 1024 bytes
1463 */
1464 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1465 info->ecc_bch = 1;
1466 info->chunk_size = 1024;
1467 info->spare_size = 0;
1468 info->ecc_size = 32;
1469 ecc->mode = NAND_ECC_HW;
1470 ecc->size = info->chunk_size;
1471 ecc->layout = &ecc_layout_4KB_bch8bit;
1472 ecc->strength = 16;
eee0166d
EG
1473 } else {
1474 dev_err(&info->pdev->dev,
1475 "ECC strength %d at page size %d is not supported\n",
1476 strength, page_size);
1477 return -ENODEV;
70ed8523 1478 }
eee0166d
EG
1479
1480 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1481 ecc->strength, ecc->size);
43bcfd2b
EG
1482 return 0;
1483}
1484
401e67e2 1485static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1486{
d456882b
LW
1487 struct pxa3xx_nand_host *host = mtd->priv;
1488 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1489 struct platform_device *pdev = info->pdev;
453810b7 1490 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
0fab028b 1491 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
401e67e2
LW
1492 const struct pxa3xx_nand_flash *f = NULL;
1493 struct nand_chip *chip = mtd->priv;
1494 uint32_t id = -1;
4332c116 1495 uint64_t chipsize;
401e67e2 1496 int i, ret, num;
30b2afc8 1497 uint16_t ecc_strength, ecc_step;
401e67e2
LW
1498
1499 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
4332c116 1500 goto KEEP_CONFIG;
401e67e2 1501
bc3e00f0
AT
1502 /* Set a default chunk size */
1503 info->chunk_size = 512;
1504
401e67e2 1505 ret = pxa3xx_nand_sensing(info);
d456882b 1506 if (ret) {
f3c8cfc2
LW
1507 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1508 info->cs);
401e67e2 1509
d456882b 1510 return ret;
401e67e2
LW
1511 }
1512
1513 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1514 id = *((uint16_t *)(info->data_buff));
1515 if (id != 0)
da675b4e 1516 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
401e67e2 1517 else {
da675b4e
LW
1518 dev_warn(&info->pdev->dev,
1519 "Read out ID 0, potential timing set wrong!!\n");
401e67e2
LW
1520
1521 return -EINVAL;
1522 }
1523
a9cadf72 1524 num = ARRAY_SIZE(builtin_flash_types) - 1;
401e67e2 1525 for (i = 0; i < num; i++) {
a9cadf72 1526 f = &builtin_flash_types[i + 1];
401e67e2
LW
1527
1528 /* find the chip in default list */
4332c116 1529 if (f->chip_id == id)
401e67e2 1530 break;
401e67e2
LW
1531 }
1532
a9cadf72 1533 if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
da675b4e 1534 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
401e67e2
LW
1535
1536 return -EINVAL;
1537 }
1538
d456882b
LW
1539 ret = pxa3xx_nand_config_flash(info, f);
1540 if (ret) {
1541 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1542 return ret;
1543 }
1544
7c2f7176
AT
1545 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1546
4332c116 1547 pxa3xx_flash_ids[0].name = f->name;
68aa352d 1548 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
4332c116
LW
1549 pxa3xx_flash_ids[0].pagesize = f->page_size;
1550 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1551 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1552 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1553 if (f->flash_width == 16)
1554 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
0fab028b
LW
1555 pxa3xx_flash_ids[1].name = NULL;
1556 def = pxa3xx_flash_ids;
4332c116 1557KEEP_CONFIG:
e971affa 1558 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa 1559 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1560 chip->options |= NAND_BUSWIDTH_16;
1561
43bcfd2b
EG
1562 /* Device detection must be done with ECC disabled */
1563 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1564 nand_writel(info, NDECCCTRL, 0x0);
1565
0fab028b 1566 if (nand_scan_ident(mtd, 1, def))
4332c116 1567 return -ENODEV;
776f265e
EG
1568
1569 if (pdata->flash_bbt) {
1570 /*
1571 * We'll use a bad block table stored in-flash and don't
1572 * allow writing the bad block marker to the flash.
1573 */
1574 chip->bbt_options |= NAND_BBT_USE_FLASH |
1575 NAND_BBT_NO_OOB_BBM;
1576 chip->bbt_td = &bbt_main_descr;
1577 chip->bbt_md = &bbt_mirror_descr;
1578 }
1579
5cbbdc6a
EG
1580 /*
1581 * If the page size is bigger than the FIFO size, let's check
1582 * we are given the right variant and then switch to the extended
1583 * (aka splitted) command handling,
1584 */
1585 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1586 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1587 chip->cmdfunc = nand_cmdfunc_extended;
1588 } else {
1589 dev_err(&info->pdev->dev,
1590 "unsupported page size on this variant\n");
1591 return -ENODEV;
1592 }
1593 }
1594
5b3e5078
EG
1595 if (pdata->ecc_strength && pdata->ecc_step_size) {
1596 ecc_strength = pdata->ecc_strength;
1597 ecc_step = pdata->ecc_step_size;
1598 } else {
1599 ecc_strength = chip->ecc_strength_ds;
1600 ecc_step = chip->ecc_step_ds;
1601 }
30b2afc8
EG
1602
1603 /* Set default ECC strength requirements on non-ONFI devices */
1604 if (ecc_strength < 1 && ecc_step < 1) {
1605 ecc_strength = 1;
1606 ecc_step = 512;
1607 }
1608
1609 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1610 ecc_step, mtd->writesize);
eee0166d
EG
1611 if (ret)
1612 return ret;
43bcfd2b 1613
4332c116 1614 /* calculate addressing information */
d456882b
LW
1615 if (mtd->writesize >= 2048)
1616 host->col_addr_cycles = 2;
1617 else
1618 host->col_addr_cycles = 1;
1619
62e8b851
EG
1620 /* release the initial buffer */
1621 kfree(info->data_buff);
1622
1623 /* allocate the real data + oob buffer */
1624 info->buf_size = mtd->writesize + mtd->oobsize;
1625 ret = pxa3xx_nand_init_buff(info);
1626 if (ret)
1627 return ret;
4332c116 1628 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1629
4332c116 1630 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1631 host->row_addr_cycles = 3;
4332c116 1632 else
d456882b 1633 host->row_addr_cycles = 2;
401e67e2 1634 return nand_scan_tail(mtd);
fe69af00 1635}
1636
d456882b 1637static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1638{
f3c8cfc2 1639 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1640 struct pxa3xx_nand_info *info;
d456882b 1641 struct pxa3xx_nand_host *host;
6e308f87 1642 struct nand_chip *chip = NULL;
fe69af00 1643 struct mtd_info *mtd;
1644 struct resource *r;
f3c8cfc2 1645 int ret, irq, cs;
fe69af00 1646
453810b7 1647 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1648 if (pdata->num_cs <= 0)
1649 return -ENODEV;
4c073cd2
EG
1650 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1651 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1652 if (!info)
d456882b 1653 return -ENOMEM;
fe69af00 1654
fe69af00 1655 info->pdev = pdev;
c7e9c7e7 1656 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1657 for (cs = 0; cs < pdata->num_cs; cs++) {
ce914e6b 1658 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
f3c8cfc2
LW
1659 chip = (struct nand_chip *)(&mtd[1]);
1660 host = (struct pxa3xx_nand_host *)chip;
1661 info->host[cs] = host;
1662 host->mtd = mtd;
1663 host->cs = cs;
1664 host->info_data = info;
1665 mtd->priv = host;
550dab5b 1666 mtd->dev.parent = &pdev->dev;
f3c8cfc2
LW
1667
1668 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1669 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1670 chip->controller = &info->controller;
1671 chip->waitfunc = pxa3xx_nand_waitfunc;
1672 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1673 chip->read_word = pxa3xx_nand_read_word;
1674 chip->read_byte = pxa3xx_nand_read_byte;
1675 chip->read_buf = pxa3xx_nand_read_buf;
1676 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1677 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1678 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1679 }
401e67e2
LW
1680
1681 spin_lock_init(&chip->controller->lock);
1682 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1683 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1684 if (IS_ERR(info->clk)) {
1685 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1686 return PTR_ERR(info->clk);
fe69af00 1687 }
1f8eaff2
EG
1688 ret = clk_prepare_enable(info->clk);
1689 if (ret < 0)
1690 return ret;
fe69af00 1691
6b45c1ee 1692 if (use_dma) {
8f5ba31a
RJ
1693 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1694 if (r == NULL) {
1695 dev_err(&pdev->dev,
1696 "no resource defined for data DMA\n");
1697 ret = -ENXIO;
1698 goto fail_disable_clk;
1e7ba630 1699 }
8f5ba31a
RJ
1700 info->drcmr_dat = r->start;
1701
1702 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1703 if (r == NULL) {
1704 dev_err(&pdev->dev,
1705 "no resource defined for cmd DMA\n");
1706 ret = -ENXIO;
1707 goto fail_disable_clk;
1708 }
1709 info->drcmr_cmd = r->start;
fe69af00 1710 }
fe69af00 1711
1712 irq = platform_get_irq(pdev, 0);
1713 if (irq < 0) {
1714 dev_err(&pdev->dev, "no IRQ resource defined\n");
1715 ret = -ENXIO;
9ca7944d 1716 goto fail_disable_clk;
fe69af00 1717 }
1718
1719 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1720 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1721 if (IS_ERR(info->mmio_base)) {
1722 ret = PTR_ERR(info->mmio_base);
9ca7944d 1723 goto fail_disable_clk;
fe69af00 1724 }
8638fac8 1725 info->mmio_phys = r->start;
fe69af00 1726
62e8b851
EG
1727 /* Allocate a buffer to allow flash detection */
1728 info->buf_size = INIT_BUFFER_SIZE;
1729 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1730 if (info->data_buff == NULL) {
1731 ret = -ENOMEM;
9ca7944d 1732 goto fail_disable_clk;
62e8b851 1733 }
fe69af00 1734
346e1259
HZ
1735 /* initialize all interrupts to be disabled */
1736 disable_int(info, NDSR_MASK);
1737
24542257
RJ
1738 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1739 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1740 pdev->name, info);
fe69af00 1741 if (ret < 0) {
1742 dev_err(&pdev->dev, "failed to request IRQ\n");
1743 goto fail_free_buf;
1744 }
1745
e353a20a 1746 platform_set_drvdata(pdev, info);
fe69af00 1747
d456882b 1748 return 0;
fe69af00 1749
fe69af00 1750fail_free_buf:
401e67e2 1751 free_irq(irq, info);
62e8b851 1752 kfree(info->data_buff);
9ca7944d 1753fail_disable_clk:
fb32061f 1754 clk_disable_unprepare(info->clk);
d456882b 1755 return ret;
fe69af00 1756}
1757
1758static int pxa3xx_nand_remove(struct platform_device *pdev)
1759{
e353a20a 1760 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1761 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1762 int irq, cs;
fe69af00 1763
d456882b
LW
1764 if (!info)
1765 return 0;
1766
453810b7 1767 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1768
dbf5986a
HZ
1769 irq = platform_get_irq(pdev, 0);
1770 if (irq >= 0)
1771 free_irq(irq, info);
498b6145 1772 pxa3xx_nand_free_buff(info);
82a72d10 1773
e971affa
RJ
1774 /*
1775 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1776 * In order to prevent a lockup of the system bus, the DFI bus
1777 * arbitration is granted to SMC upon driver removal. This is done by
1778 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1779 * access to the bus anymore.
1780 */
1781 nand_writel(info, NDCR,
1782 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1783 NFCV1_NDCR_ARB_CNTL);
fb32061f 1784 clk_disable_unprepare(info->clk);
82a72d10 1785
f3c8cfc2
LW
1786 for (cs = 0; cs < pdata->num_cs; cs++)
1787 nand_release(info->host[cs]->mtd);
fe69af00 1788 return 0;
1789}
1790
1e7ba630
DM
1791static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1792{
1793 struct pxa3xx_nand_platform_data *pdata;
1794 struct device_node *np = pdev->dev.of_node;
1795 const struct of_device_id *of_id =
1796 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1797
1798 if (!of_id)
1799 return 0;
1800
1801 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1802 if (!pdata)
1803 return -ENOMEM;
1804
1805 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1806 pdata->enable_arbiter = 1;
1807 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1808 pdata->keep_config = 1;
1809 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1810 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1811
5b3e5078
EG
1812 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1813 if (pdata->ecc_strength < 0)
1814 pdata->ecc_strength = 0;
1815
1816 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1817 if (pdata->ecc_step_size < 0)
1818 pdata->ecc_step_size = 0;
1819
1e7ba630
DM
1820 pdev->dev.platform_data = pdata;
1821
1822 return 0;
1823}
1e7ba630 1824
e353a20a
LW
1825static int pxa3xx_nand_probe(struct platform_device *pdev)
1826{
1827 struct pxa3xx_nand_platform_data *pdata;
1e7ba630 1828 struct mtd_part_parser_data ppdata = {};
e353a20a 1829 struct pxa3xx_nand_info *info;
8f5ba31a 1830 int ret, cs, probe_success, dma_available;
e353a20a 1831
8f5ba31a
RJ
1832 dma_available = IS_ENABLED(CONFIG_ARM) &&
1833 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1834 if (use_dma && !dma_available) {
f4db2e3a
EG
1835 use_dma = 0;
1836 dev_warn(&pdev->dev,
1837 "This platform can't do DMA on this device\n");
1838 }
8f5ba31a 1839
1e7ba630
DM
1840 ret = pxa3xx_nand_probe_dt(pdev);
1841 if (ret)
1842 return ret;
1843
453810b7 1844 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1845 if (!pdata) {
1846 dev_err(&pdev->dev, "no platform data defined\n");
1847 return -ENODEV;
1848 }
1849
d456882b
LW
1850 ret = alloc_nand_resource(pdev);
1851 if (ret) {
1852 dev_err(&pdev->dev, "alloc nand resource failed\n");
1853 return ret;
1854 }
e353a20a 1855
d456882b 1856 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1857 probe_success = 0;
1858 for (cs = 0; cs < pdata->num_cs; cs++) {
b7655bcb 1859 struct mtd_info *mtd = info->host[cs]->mtd;
f455578d 1860
18a84e93
EG
1861 /*
1862 * The mtd name matches the one used in 'mtdparts' kernel
1863 * parameter. This name cannot be changed or otherwise
1864 * user's mtd partitions configuration would get broken.
1865 */
1866 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1867 info->cs = cs;
b7655bcb 1868 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1869 if (ret) {
1870 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1871 cs);
1872 continue;
1873 }
1874
1e7ba630 1875 ppdata.of_node = pdev->dev.of_node;
b7655bcb 1876 ret = mtd_device_parse_register(mtd, NULL,
1e7ba630 1877 &ppdata, pdata->parts[cs],
42d7fbe2 1878 pdata->nr_parts[cs]);
f3c8cfc2
LW
1879 if (!ret)
1880 probe_success = 1;
1881 }
1882
1883 if (!probe_success) {
e353a20a
LW
1884 pxa3xx_nand_remove(pdev);
1885 return -ENODEV;
1886 }
1887
f3c8cfc2 1888 return 0;
e353a20a
LW
1889}
1890
fe69af00 1891#ifdef CONFIG_PM
1892static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1893{
e353a20a 1894 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1895 struct pxa3xx_nand_platform_data *pdata;
1896 struct mtd_info *mtd;
1897 int cs;
fe69af00 1898
453810b7 1899 pdata = dev_get_platdata(&pdev->dev);
f8155a40 1900 if (info->state) {
fe69af00 1901 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1902 return -EAGAIN;
1903 }
1904
f3c8cfc2
LW
1905 for (cs = 0; cs < pdata->num_cs; cs++) {
1906 mtd = info->host[cs]->mtd;
3fe4bae8 1907 mtd_suspend(mtd);
f3c8cfc2
LW
1908 }
1909
fe69af00 1910 return 0;
1911}
1912
1913static int pxa3xx_nand_resume(struct platform_device *pdev)
1914{
e353a20a 1915 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1916 struct pxa3xx_nand_platform_data *pdata;
1917 struct mtd_info *mtd;
1918 int cs;
051fc41c 1919
453810b7 1920 pdata = dev_get_platdata(&pdev->dev);
051fc41c
LW
1921 /* We don't want to handle interrupt without calling mtd routine */
1922 disable_int(info, NDCR_INT_MASK);
fe69af00 1923
f3c8cfc2
LW
1924 /*
1925 * Directly set the chip select to a invalid value,
1926 * then the driver would reset the timing according
1927 * to current chip select at the beginning of cmdfunc
1928 */
1929 info->cs = 0xff;
fe69af00 1930
051fc41c
LW
1931 /*
1932 * As the spec says, the NDSR would be updated to 0x1800 when
1933 * doing the nand_clk disable/enable.
1934 * To prevent it damaging state machine of the driver, clear
1935 * all status before resume
1936 */
1937 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2
LW
1938 for (cs = 0; cs < pdata->num_cs; cs++) {
1939 mtd = info->host[cs]->mtd;
ead995f8 1940 mtd_resume(mtd);
f3c8cfc2
LW
1941 }
1942
18c81b18 1943 return 0;
fe69af00 1944}
1945#else
1946#define pxa3xx_nand_suspend NULL
1947#define pxa3xx_nand_resume NULL
1948#endif
1949
1950static struct platform_driver pxa3xx_nand_driver = {
1951 .driver = {
1952 .name = "pxa3xx-nand",
5576bc7b 1953 .of_match_table = pxa3xx_nand_dt_ids,
fe69af00 1954 },
1955 .probe = pxa3xx_nand_probe,
1956 .remove = pxa3xx_nand_remove,
1957 .suspend = pxa3xx_nand_suspend,
1958 .resume = pxa3xx_nand_resume,
1959};
1960
f99640de 1961module_platform_driver(pxa3xx_nand_driver);
fe69af00 1962
1963MODULE_LICENSE("GPL");
1964MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.772786 seconds and 5 git commands to generate.