mtd: nand: pass page number to ecc->write_xxx() methods
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
776f265e 32#include <linux/of_mtd.h>
fe69af00 33
ce914e6b 34#if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
f4db2e3a
EG
35#define ARCH_HAS_DMA
36#endif
37
293b2da1 38#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 39
e5860c18
NMG
40#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 42#define PAGE_CHUNK_SIZE (2048)
fe69af00 43
62e8b851
EG
44/*
45 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 51 */
c1634097 52#define INIT_BUFFER_SIZE 2048
62e8b851 53
fe69af00 54/* registers and bit definitions */
55#define NDCR (0x00) /* Control register */
56#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58#define NDSR (0x14) /* Status Register */
59#define NDPCR (0x18) /* Page Count Register */
60#define NDBDR0 (0x1C) /* Bad Block Register 0 */
61#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 62#define NDECCCTRL (0x28) /* ECC control */
fe69af00 63#define NDDB (0x40) /* Data Buffer */
64#define NDCB0 (0x48) /* Command Buffer0 */
65#define NDCB1 (0x4C) /* Command Buffer1 */
66#define NDCB2 (0x50) /* Command Buffer2 */
67
68#define NDCR_SPARE_EN (0x1 << 31)
69#define NDCR_ECC_EN (0x1 << 30)
70#define NDCR_DMA_EN (0x1 << 29)
71#define NDCR_ND_RUN (0x1 << 28)
72#define NDCR_DWIDTH_C (0x1 << 27)
73#define NDCR_DWIDTH_M (0x1 << 26)
74#define NDCR_PAGE_SZ (0x1 << 24)
75#define NDCR_NCSX (0x1 << 23)
76#define NDCR_ND_MODE (0x3 << 21)
77#define NDCR_NAND_MODE (0x0)
78#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
79#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 81#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84#define NDCR_RA_START (0x1 << 15)
85#define NDCR_PG_PER_BLK (0x1 << 14)
86#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 87#define NDCR_INT_MASK (0xFFF)
fe69af00 88
89#define NDSR_MASK (0xfff)
87f5336e
EG
90#define NDSR_ERR_CNT_OFF (16)
91#define NDSR_ERR_CNT_MASK (0x1f)
92#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
93#define NDSR_RDY (0x1 << 12)
94#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 95#define NDSR_CS0_PAGED (0x1 << 10)
96#define NDSR_CS1_PAGED (0x1 << 9)
97#define NDSR_CS0_CMDD (0x1 << 8)
98#define NDSR_CS1_CMDD (0x1 << 7)
99#define NDSR_CS0_BBD (0x1 << 6)
100#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
101#define NDSR_UNCORERR (0x1 << 4)
102#define NDSR_CORERR (0x1 << 3)
fe69af00 103#define NDSR_WRDREQ (0x1 << 2)
104#define NDSR_RDDREQ (0x1 << 1)
105#define NDSR_WRCMDREQ (0x1)
106
41a63430 107#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 108#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 109#define NDCB0_AUTO_RS (0x1 << 25)
110#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
111#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 113#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115#define NDCB0_NC (0x1 << 20)
116#define NDCB0_DBC (0x1 << 19)
117#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119#define NDCB0_CMD2_MASK (0xff << 8)
120#define NDCB0_CMD1_MASK (0xff)
121#define NDCB0_ADDR_CYC_SHIFT (16)
122
70ed8523
EG
123#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125#define EXT_CMD_TYPE_READ 4 /* Read */
126#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127#define EXT_CMD_TYPE_FINAL 3 /* Final command */
128#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
130
b226eca2
EG
131/*
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 */
136#define READ_ID_BYTES 7
137
fe69af00 138/* macros for registers read/write */
139#define nand_writel(info, off, val) \
b7e46062 140 writel_relaxed((val), (info)->mmio_base + (off))
fe69af00 141
142#define nand_readl(info, off) \
b7e46062 143 readl_relaxed((info)->mmio_base + (off))
fe69af00 144
145/* error code and state */
146enum {
147 ERR_NONE = 0,
148 ERR_DMABUSERR = -1,
149 ERR_SENDCMD = -2,
87f5336e 150 ERR_UNCORERR = -3,
fe69af00 151 ERR_BBERR = -4,
87f5336e 152 ERR_CORERR = -5,
fe69af00 153};
154
155enum {
f8155a40 156 STATE_IDLE = 0,
d456882b 157 STATE_PREPARED,
fe69af00 158 STATE_CMD_HANDLE,
159 STATE_DMA_READING,
160 STATE_DMA_WRITING,
161 STATE_DMA_DONE,
162 STATE_PIO_READING,
163 STATE_PIO_WRITING,
f8155a40
LW
164 STATE_CMD_DONE,
165 STATE_READY,
fe69af00 166};
167
c0f3b864
EG
168enum pxa3xx_nand_variant {
169 PXA3XX_NAND_VARIANT_PXA,
170 PXA3XX_NAND_VARIANT_ARMADA370,
171};
172
d456882b
LW
173struct pxa3xx_nand_host {
174 struct nand_chip chip;
d456882b
LW
175 struct mtd_info *mtd;
176 void *info_data;
177
178 /* page size of attached chip */
d456882b 179 int use_ecc;
f3c8cfc2 180 int cs;
fe69af00 181
d456882b
LW
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles;
184 unsigned int row_addr_cycles;
d456882b
LW
185};
186
187struct pxa3xx_nand_info {
401e67e2 188 struct nand_hw_control controller;
fe69af00 189 struct platform_device *pdev;
fe69af00 190
191 struct clk *clk;
192 void __iomem *mmio_base;
8638fac8 193 unsigned long mmio_phys;
55d9fd6e 194 struct completion cmd_complete, dev_ready;
fe69af00 195
196 unsigned int buf_start;
197 unsigned int buf_count;
62e8b851 198 unsigned int buf_size;
fa543bef
EG
199 unsigned int data_buff_pos;
200 unsigned int oob_buff_pos;
fe69af00 201
202 /* DMA information */
8f5ba31a
RJ
203 struct scatterlist sg;
204 enum dma_data_direction dma_dir;
205 struct dma_chan *dma_chan;
206 dma_cookie_t dma_cookie;
fe69af00 207 int drcmr_dat;
208 int drcmr_cmd;
209
210 unsigned char *data_buff;
18c81b18 211 unsigned char *oob_buff;
fe69af00 212 dma_addr_t data_buff_phys;
fe69af00 213 int data_dma_ch;
fe69af00 214
f3c8cfc2 215 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 216 unsigned int state;
217
c0f3b864
EG
218 /*
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
221 */
222 enum pxa3xx_nand_variant variant;
223
f3c8cfc2 224 int cs;
fe69af00 225 int use_ecc; /* use HW ECC ? */
43bcfd2b 226 int ecc_bch; /* using BCH ECC? */
fe69af00 227 int use_dma; /* use DMA ? */
5bb653e8 228 int use_spare; /* use spare ? */
55d9fd6e 229 int need_wait;
fe69af00 230
2128b08c 231 unsigned int data_size; /* data to be read from FIFO */
70ed8523 232 unsigned int chunk_size; /* split commands chunk size */
d456882b 233 unsigned int oob_size;
43bcfd2b
EG
234 unsigned int spare_size;
235 unsigned int ecc_size;
87f5336e
EG
236 unsigned int ecc_err_cnt;
237 unsigned int max_bitflips;
fe69af00 238 int retcode;
fe69af00 239
48cf7efa
EG
240 /* cached register value */
241 uint32_t reg_ndcr;
242 uint32_t ndtr0cs0;
243 uint32_t ndtr1cs0;
244
fe69af00 245 /* generated NDCBx register values */
246 uint32_t ndcb0;
247 uint32_t ndcb1;
248 uint32_t ndcb2;
3a1a344a 249 uint32_t ndcb3;
fe69af00 250};
251
90ab5ee9 252static bool use_dma = 1;
fe69af00 253module_param(use_dma, bool, 0444);
25985edc 254MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 255
a9cadf72
EG
256struct pxa3xx_nand_timing {
257 unsigned int tCH; /* Enable signal hold time */
258 unsigned int tCS; /* Enable signal setup time */
259 unsigned int tWH; /* ND_nWE high duration */
260 unsigned int tWP; /* ND_nWE pulse time */
261 unsigned int tRH; /* ND_nRE high duration */
262 unsigned int tRP; /* ND_nRE pulse width */
263 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
266};
267
268struct pxa3xx_nand_flash {
269 char *name;
270 uint32_t chip_id;
271 unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
272 unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
273 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
274 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
275 unsigned int num_blocks; /* Number of physical blocks in Flash */
276
277 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
278};
279
c1f82478 280static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
285};
286
c1f82478 287static struct pxa3xx_nand_flash builtin_flash_types[] = {
4332c116
LW
288{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
289{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
290{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
291{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
292{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
293{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
294{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
295{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
296{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
d3490dfd
HZ
297};
298
776f265e
EG
299static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305 .offs = 8,
306 .len = 6,
307 .veroffs = 14,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
310};
311
312static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
320};
321
3db227b6
RG
322static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323 .eccbytes = 32,
324 .eccpos = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
330};
331
70ed8523
EG
332static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
333 .eccbytes = 64,
334 .eccpos = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree = { {6, 26}, { 64, 32} }
345};
346
347static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
348 .eccbytes = 128,
349 .eccpos = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
354 .oobfree = { }
355};
356
227a886c
LW
357/* Define a default flash type setting serve as flash detecting only */
358#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359
fe69af00 360#define NDTR0_tCH(c) (min((c), 7) << 19)
361#define NDTR0_tCS(c) (min((c), 7) << 16)
362#define NDTR0_tWH(c) (min((c), 7) << 11)
363#define NDTR0_tWP(c) (min((c), 7) << 8)
364#define NDTR0_tRH(c) (min((c), 7) << 3)
365#define NDTR0_tRP(c) (min((c), 7) << 0)
366
367#define NDTR1_tR(c) (min((c), 65535) << 16)
368#define NDTR1_tWHR(c) (min((c), 15) << 4)
369#define NDTR1_tAR(c) (min((c), 15) << 0)
370
371/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 372#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 373
17754ad6 374static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
375 {
376 .compatible = "marvell,pxa3xx-nand",
377 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
378 },
1963ff97
EG
379 {
380 .compatible = "marvell,armada370-nand",
381 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
382 },
c7e9c7e7
EG
383 {}
384};
385MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386
387static enum pxa3xx_nand_variant
388pxa3xx_nand_get_variant(struct platform_device *pdev)
389{
390 const struct of_device_id *of_id =
391 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392 if (!of_id)
393 return PXA3XX_NAND_VARIANT_PXA;
394 return (enum pxa3xx_nand_variant)of_id->data;
395}
396
d456882b 397static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 398 const struct pxa3xx_nand_timing *t)
fe69af00 399{
d456882b 400 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 401 unsigned long nand_clk = clk_get_rate(info->clk);
402 uint32_t ndtr0, ndtr1;
403
404 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
405 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
406 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
407 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
408 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
409 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410
411 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
412 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
413 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414
48cf7efa
EG
415 info->ndtr0cs0 = ndtr0;
416 info->ndtr1cs0 = ndtr1;
fe69af00 417 nand_writel(info, NDTR0CS0, ndtr0);
418 nand_writel(info, NDTR1CS0, ndtr1);
419}
420
6a3e4865
EG
421/*
422 * Set the data and OOB size, depending on the selected
423 * spare and ECC configuration.
424 * Only applicable to READ0, READOOB and PAGEPROG commands.
425 */
fa543bef
EG
426static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
427 struct mtd_info *mtd)
fe69af00 428{
48cf7efa 429 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 430
fa543bef 431 info->data_size = mtd->writesize;
43bcfd2b 432 if (!oob_enable)
9d8b1043 433 return;
9d8b1043 434
43bcfd2b
EG
435 info->oob_size = info->spare_size;
436 if (!info->use_ecc)
437 info->oob_size += info->ecc_size;
18c81b18
LW
438}
439
f8155a40
LW
440/**
441 * NOTE: it is a must to set ND_RUN firstly, then write
442 * command buffer, otherwise, it does not work.
443 * We enable all the interrupt at the same time, and
444 * let pxa3xx_nand_irq to handle all logic.
445 */
446static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
447{
448 uint32_t ndcr;
449
48cf7efa 450 ndcr = info->reg_ndcr;
cd9d1182 451
43bcfd2b 452 if (info->use_ecc) {
cd9d1182 453 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
454 if (info->ecc_bch)
455 nand_writel(info, NDECCCTRL, 0x1);
456 } else {
cd9d1182 457 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
458 if (info->ecc_bch)
459 nand_writel(info, NDECCCTRL, 0x0);
460 }
cd9d1182
EG
461
462 if (info->use_dma)
463 ndcr |= NDCR_DMA_EN;
464 else
465 ndcr &= ~NDCR_DMA_EN;
466
5bb653e8
EG
467 if (info->use_spare)
468 ndcr |= NDCR_SPARE_EN;
469 else
470 ndcr &= ~NDCR_SPARE_EN;
471
f8155a40
LW
472 ndcr |= NDCR_ND_RUN;
473
474 /* clear status bits and run */
f8155a40 475 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 476 nand_writel(info, NDCR, 0);
f8155a40
LW
477 nand_writel(info, NDCR, ndcr);
478}
479
480static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
481{
482 uint32_t ndcr;
483 int timeout = NAND_STOP_DELAY;
484
485 /* wait RUN bit in NDCR become 0 */
486 ndcr = nand_readl(info, NDCR);
487 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
488 ndcr = nand_readl(info, NDCR);
489 udelay(1);
490 }
491
492 if (timeout <= 0) {
493 ndcr &= ~NDCR_ND_RUN;
494 nand_writel(info, NDCR, ndcr);
495 }
8f5ba31a
RJ
496 if (info->dma_chan)
497 dmaengine_terminate_all(info->dma_chan);
498
f8155a40
LW
499 /* clear status bits */
500 nand_writel(info, NDSR, NDSR_MASK);
501}
502
57ff88f0
EG
503static void __maybe_unused
504enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 505{
506 uint32_t ndcr;
507
508 ndcr = nand_readl(info, NDCR);
509 nand_writel(info, NDCR, ndcr & ~int_mask);
510}
511
512static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
513{
514 uint32_t ndcr;
515
516 ndcr = nand_readl(info, NDCR);
517 nand_writel(info, NDCR, ndcr | int_mask);
518}
519
8dad0386
MR
520static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
521{
522 if (info->ecc_bch) {
afca11ec
MR
523 u32 val;
524 int ret;
8dad0386
MR
525
526 /*
527 * According to the datasheet, when reading from NDDB
528 * with BCH enabled, after each 32 bytes reads, we
529 * have to make sure that the NDSR.RDDREQ bit is set.
530 *
531 * Drain the FIFO 8 32 bits reads at a time, and skip
532 * the polling on the last read.
533 */
534 while (len > 8) {
ce914e6b 535 readsl(info->mmio_base + NDDB, data, 8);
8dad0386 536
afca11ec
MR
537 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
538 val & NDSR_RDDREQ, 1000, 5000);
539 if (ret) {
540 dev_err(&info->pdev->dev,
541 "Timeout on RDDREQ while draining the FIFO\n");
542 return;
8dad0386
MR
543 }
544
545 data += 32;
546 len -= 8;
547 }
548 }
549
ce914e6b 550 readsl(info->mmio_base + NDDB, data, len);
8dad0386
MR
551}
552
f8155a40 553static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 554{
70ed8523 555 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 556
fe69af00 557 switch (info->state) {
558 case STATE_PIO_WRITING:
ce914e6b
RH
559 writesl(info->mmio_base + NDDB,
560 info->data_buff + info->data_buff_pos,
561 DIV_ROUND_UP(do_bytes, 4));
fa543bef 562
9d8b1043 563 if (info->oob_size > 0)
ce914e6b
RH
564 writesl(info->mmio_base + NDDB,
565 info->oob_buff + info->oob_buff_pos,
566 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 567 break;
568 case STATE_PIO_READING:
8dad0386
MR
569 drain_fifo(info,
570 info->data_buff + info->data_buff_pos,
571 DIV_ROUND_UP(do_bytes, 4));
fa543bef 572
9d8b1043 573 if (info->oob_size > 0)
8dad0386
MR
574 drain_fifo(info,
575 info->oob_buff + info->oob_buff_pos,
576 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 577 break;
578 default:
da675b4e 579 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 580 info->state);
f8155a40 581 BUG();
fe69af00 582 }
fa543bef
EG
583
584 /* Update buffer pointers for multi-page read/write */
585 info->data_buff_pos += do_bytes;
586 info->oob_buff_pos += info->oob_size;
587 info->data_size -= do_bytes;
fe69af00 588}
589
8f5ba31a 590static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 591{
8f5ba31a
RJ
592 struct pxa3xx_nand_info *info = data;
593 struct dma_tx_state state;
594 enum dma_status status;
fe69af00 595
8f5ba31a
RJ
596 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
597 if (likely(status == DMA_COMPLETE)) {
598 info->state = STATE_DMA_DONE;
599 } else {
600 dev_err(&info->pdev->dev, "DMA error on data channel\n");
601 info->retcode = ERR_DMABUSERR;
602 }
603 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
604
605 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
606 enable_int(info, NDCR_INT_MASK);
607}
608
609static void start_data_dma(struct pxa3xx_nand_info *info)
610{
611 enum dma_transfer_direction direction;
612 struct dma_async_tx_descriptor *tx;
fe69af00 613
f8155a40
LW
614 switch (info->state) {
615 case STATE_DMA_WRITING:
8f5ba31a
RJ
616 info->dma_dir = DMA_TO_DEVICE;
617 direction = DMA_MEM_TO_DEV;
f8155a40
LW
618 break;
619 case STATE_DMA_READING:
8f5ba31a
RJ
620 info->dma_dir = DMA_FROM_DEVICE;
621 direction = DMA_DEV_TO_MEM;
f8155a40
LW
622 break;
623 default:
da675b4e 624 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
625 info->state);
626 BUG();
fe69af00 627 }
8f5ba31a
RJ
628 info->sg.length = info->data_size +
629 (info->oob_size ? info->spare_size + info->ecc_size : 0);
630 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
631
632 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
633 DMA_PREP_INTERRUPT);
634 if (!tx) {
635 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
636 return;
fe69af00 637 }
8f5ba31a
RJ
638 tx->callback = pxa3xx_nand_data_dma_irq;
639 tx->callback_param = info;
640 info->dma_cookie = dmaengine_submit(tx);
641 dma_async_issue_pending(info->dma_chan);
642 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
643 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 644}
645
24542257
RJ
646static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
647{
648 struct pxa3xx_nand_info *info = data;
649
650 handle_data_pio(info);
651
652 info->state = STATE_CMD_DONE;
653 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
654
655 return IRQ_HANDLED;
656}
657
fe69af00 658static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
659{
660 struct pxa3xx_nand_info *info = devid;
55d9fd6e 661 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 662 unsigned int ready, cmd_done;
24542257 663 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
664
665 if (info->cs == 0) {
666 ready = NDSR_FLASH_RDY;
667 cmd_done = NDSR_CS0_CMDD;
668 } else {
669 ready = NDSR_RDY;
670 cmd_done = NDSR_CS1_CMDD;
671 }
fe69af00 672
673 status = nand_readl(info, NDSR);
674
87f5336e
EG
675 if (status & NDSR_UNCORERR)
676 info->retcode = ERR_UNCORERR;
677 if (status & NDSR_CORERR) {
678 info->retcode = ERR_CORERR;
679 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
680 info->ecc_bch)
681 info->ecc_err_cnt = NDSR_ERR_CNT(status);
682 else
683 info->ecc_err_cnt = 1;
684
685 /*
686 * Each chunk composing a page is corrected independently,
687 * and we need to store maximum number of corrected bitflips
688 * to return it to the MTD layer in ecc.read_page().
689 */
690 info->max_bitflips = max_t(unsigned int,
691 info->max_bitflips,
692 info->ecc_err_cnt);
693 }
f8155a40
LW
694 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
695 /* whether use dma to transfer data */
fe69af00 696 if (info->use_dma) {
f8155a40
LW
697 disable_int(info, NDCR_INT_MASK);
698 info->state = (status & NDSR_RDDREQ) ?
699 STATE_DMA_READING : STATE_DMA_WRITING;
700 start_data_dma(info);
701 goto NORMAL_IRQ_EXIT;
fe69af00 702 } else {
f8155a40
LW
703 info->state = (status & NDSR_RDDREQ) ?
704 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
705 ret = IRQ_WAKE_THREAD;
706 goto NORMAL_IRQ_EXIT;
fe69af00 707 }
fe69af00 708 }
f3c8cfc2 709 if (status & cmd_done) {
f8155a40
LW
710 info->state = STATE_CMD_DONE;
711 is_completed = 1;
fe69af00 712 }
f3c8cfc2 713 if (status & ready) {
f8155a40 714 info->state = STATE_READY;
55d9fd6e 715 is_ready = 1;
401e67e2 716 }
fe69af00 717
21fc0ef9
RJ
718 /*
719 * Clear all status bit before issuing the next command, which
720 * can and will alter the status bits and will deserve a new
721 * interrupt on its own. This lets the controller exit the IRQ
722 */
723 nand_writel(info, NDSR, status);
724
f8155a40 725 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
726 status &= ~NDSR_WRCMDREQ;
727 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
728
729 /*
730 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
731 * must be loaded by writing directly either 12 or 16
732 * bytes directly to NDCB0, four bytes at a time.
733 *
734 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
735 * but each NDCBx register can be read.
736 */
f8155a40
LW
737 nand_writel(info, NDCB0, info->ndcb0);
738 nand_writel(info, NDCB0, info->ndcb1);
739 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
740
741 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
742 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
743 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 744 }
745
f8155a40
LW
746 if (is_completed)
747 complete(&info->cmd_complete);
55d9fd6e
EG
748 if (is_ready)
749 complete(&info->dev_ready);
f8155a40 750NORMAL_IRQ_EXIT:
24542257 751 return ret;
fe69af00 752}
753
fe69af00 754static inline int is_buf_blank(uint8_t *buf, size_t len)
755{
756 for (; len > 0; len--)
757 if (*buf++ != 0xff)
758 return 0;
759 return 1;
760}
761
86beebae
EG
762static void set_command_address(struct pxa3xx_nand_info *info,
763 unsigned int page_size, uint16_t column, int page_addr)
764{
765 /* small page addr setting */
766 if (page_size < PAGE_CHUNK_SIZE) {
767 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
768 | (column & 0xFF);
769
770 info->ndcb2 = 0;
771 } else {
772 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
773 | (column & 0xFFFF);
774
775 if (page_addr & 0xFF0000)
776 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
777 else
778 info->ndcb2 = 0;
779 }
780}
781
c39ff03a 782static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 783{
39f83d15
EG
784 struct pxa3xx_nand_host *host = info->host[info->cs];
785 struct mtd_info *mtd = host->mtd;
786
4eb2da89 787 /* reset data and oob column point to handle data */
401e67e2
LW
788 info->buf_start = 0;
789 info->buf_count = 0;
4eb2da89 790 info->oob_size = 0;
fa543bef
EG
791 info->data_buff_pos = 0;
792 info->oob_buff_pos = 0;
4eb2da89 793 info->use_ecc = 0;
5bb653e8 794 info->use_spare = 1;
4eb2da89 795 info->retcode = ERR_NONE;
87f5336e 796 info->ecc_err_cnt = 0;
f0e6a32e 797 info->ndcb3 = 0;
d20d0a6c 798 info->need_wait = 0;
fe69af00 799
800 switch (command) {
4eb2da89
LW
801 case NAND_CMD_READ0:
802 case NAND_CMD_PAGEPROG:
803 info->use_ecc = 1;
fe69af00 804 case NAND_CMD_READOOB:
fa543bef 805 pxa3xx_set_datasize(info, mtd);
fe69af00 806 break;
41a63430
EG
807 case NAND_CMD_PARAM:
808 info->use_spare = 0;
809 break;
4eb2da89
LW
810 default:
811 info->ndcb1 = 0;
812 info->ndcb2 = 0;
813 break;
814 }
39f83d15
EG
815
816 /*
817 * If we are about to issue a read command, or about to set
818 * the write address, then clean the data buffer.
819 */
820 if (command == NAND_CMD_READ0 ||
821 command == NAND_CMD_READOOB ||
822 command == NAND_CMD_SEQIN) {
823
824 info->buf_count = mtd->writesize + mtd->oobsize;
825 memset(info->data_buff, 0xFF, info->buf_count);
826 }
827
c39ff03a
EG
828}
829
830static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 831 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
832{
833 int addr_cycle, exec_cmd;
834 struct pxa3xx_nand_host *host;
835 struct mtd_info *mtd;
836
837 host = info->host[info->cs];
838 mtd = host->mtd;
839 addr_cycle = 0;
840 exec_cmd = 1;
841
842 if (info->cs != 0)
843 info->ndcb0 = NDCB0_CSEL;
844 else
845 info->ndcb0 = 0;
846
847 if (command == NAND_CMD_SEQIN)
848 exec_cmd = 0;
4eb2da89 849
d456882b
LW
850 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
851 + host->col_addr_cycles);
fe69af00 852
4eb2da89
LW
853 switch (command) {
854 case NAND_CMD_READOOB:
fe69af00 855 case NAND_CMD_READ0:
ec82135a
EG
856 info->buf_start = column;
857 info->ndcb0 |= NDCB0_CMD_TYPE(0)
858 | addr_cycle
859 | NAND_CMD_READ0;
860
4eb2da89 861 if (command == NAND_CMD_READOOB)
ec82135a 862 info->buf_start += mtd->writesize;
4eb2da89 863
70ed8523
EG
864 /*
865 * Multiple page read needs an 'extended command type' field,
866 * which is either naked-read or last-read according to the
867 * state.
868 */
869 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 870 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
871 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
872 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
873 | NDCB0_LEN_OVRD
874 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
875 info->ndcb3 = info->chunk_size +
876 info->oob_size;
877 }
fe69af00 878
01d9947e 879 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
880 break;
881
fe69af00 882 case NAND_CMD_SEQIN:
4eb2da89 883
e7f9a6a4
EG
884 info->buf_start = column;
885 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
886
887 /*
888 * Multiple page programming needs to execute the initial
889 * SEQIN command that sets the page address.
890 */
891 if (mtd->writesize > PAGE_CHUNK_SIZE) {
892 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
893 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
894 | addr_cycle
895 | command;
896 /* No data transfer in this case */
897 info->data_size = 0;
898 exec_cmd = 1;
899 }
fe69af00 900 break;
4eb2da89 901
fe69af00 902 case NAND_CMD_PAGEPROG:
4eb2da89
LW
903 if (is_buf_blank(info->data_buff,
904 (mtd->writesize + mtd->oobsize))) {
905 exec_cmd = 0;
906 break;
907 }
fe69af00 908
535cb57a
EG
909 /* Second command setting for large pages */
910 if (mtd->writesize > PAGE_CHUNK_SIZE) {
911 /*
912 * Multiple page write uses the 'extended command'
913 * field. This can be used to issue a command dispatch
914 * or a naked-write depending on the current stage.
915 */
916 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917 | NDCB0_LEN_OVRD
918 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
919 info->ndcb3 = info->chunk_size +
920 info->oob_size;
921
922 /*
923 * This is the command dispatch that completes a chunked
924 * page program operation.
925 */
926 if (info->data_size == 0) {
927 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
928 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
929 | command;
930 info->ndcb1 = 0;
931 info->ndcb2 = 0;
932 info->ndcb3 = 0;
933 }
934 } else {
935 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
936 | NDCB0_AUTO_RS
937 | NDCB0_ST_ROW_EN
938 | NDCB0_DBC
939 | (NAND_CMD_PAGEPROG << 8)
940 | NAND_CMD_SEQIN
941 | addr_cycle;
942 }
fe69af00 943 break;
4eb2da89 944
ce0268f6 945 case NAND_CMD_PARAM:
c1634097 946 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
947 info->ndcb0 |= NDCB0_CMD_TYPE(0)
948 | NDCB0_ADDR_CYC(1)
41a63430 949 | NDCB0_LEN_OVRD
ec82135a 950 | command;
ce0268f6 951 info->ndcb1 = (column & 0xFF);
c1634097
EG
952 info->ndcb3 = INIT_BUFFER_SIZE;
953 info->data_size = INIT_BUFFER_SIZE;
ce0268f6
EG
954 break;
955
fe69af00 956 case NAND_CMD_READID:
b226eca2 957 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
958 info->ndcb0 |= NDCB0_CMD_TYPE(3)
959 | NDCB0_ADDR_CYC(1)
ec82135a 960 | command;
d14231f1 961 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
962
963 info->data_size = 8;
964 break;
fe69af00 965 case NAND_CMD_STATUS:
4eb2da89
LW
966 info->buf_count = 1;
967 info->ndcb0 |= NDCB0_CMD_TYPE(4)
968 | NDCB0_ADDR_CYC(1)
ec82135a 969 | command;
4eb2da89
LW
970
971 info->data_size = 8;
972 break;
973
974 case NAND_CMD_ERASE1:
4eb2da89
LW
975 info->ndcb0 |= NDCB0_CMD_TYPE(2)
976 | NDCB0_AUTO_RS
977 | NDCB0_ADDR_CYC(3)
978 | NDCB0_DBC
ec82135a
EG
979 | (NAND_CMD_ERASE2 << 8)
980 | NAND_CMD_ERASE1;
4eb2da89
LW
981 info->ndcb1 = page_addr;
982 info->ndcb2 = 0;
983
fe69af00 984 break;
985 case NAND_CMD_RESET:
4eb2da89 986 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 987 | command;
4eb2da89
LW
988
989 break;
990
991 case NAND_CMD_ERASE2:
992 exec_cmd = 0;
fe69af00 993 break;
4eb2da89 994
fe69af00 995 default:
4eb2da89 996 exec_cmd = 0;
da675b4e
LW
997 dev_err(&info->pdev->dev, "non-supported command %x\n",
998 command);
fe69af00 999 break;
1000 }
1001
4eb2da89
LW
1002 return exec_cmd;
1003}
1004
5cbbdc6a
EG
1005static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1006 int column, int page_addr)
4eb2da89 1007{
d456882b
LW
1008 struct pxa3xx_nand_host *host = mtd->priv;
1009 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1010 int exec_cmd;
4eb2da89
LW
1011
1012 /*
1013 * if this is a x16 device ,then convert the input
1014 * "byte" address into a "word" address appropriate
1015 * for indexing a word-oriented device
1016 */
48cf7efa 1017 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1018 column /= 2;
1019
f3c8cfc2
LW
1020 /*
1021 * There may be different NAND chip hooked to
1022 * different chip select, so check whether
1023 * chip select has been changed, if yes, reset the timing
1024 */
1025 if (info->cs != host->cs) {
1026 info->cs = host->cs;
48cf7efa
EG
1027 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1028 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1029 }
1030
c39ff03a
EG
1031 prepare_start_command(info, command);
1032
d456882b 1033 info->state = STATE_PREPARED;
70ed8523
EG
1034 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1035
f8155a40
LW
1036 if (exec_cmd) {
1037 init_completion(&info->cmd_complete);
55d9fd6e
EG
1038 init_completion(&info->dev_ready);
1039 info->need_wait = 1;
f8155a40
LW
1040 pxa3xx_nand_start(info);
1041
e5860c18
NMG
1042 if (!wait_for_completion_timeout(&info->cmd_complete,
1043 CHIP_DELAY_TIMEOUT)) {
da675b4e 1044 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1045 /* Stop State Machine for next command cycle */
1046 pxa3xx_nand_stop(info);
1047 }
f8155a40 1048 }
d456882b 1049 info->state = STATE_IDLE;
f8155a40
LW
1050}
1051
5cbbdc6a
EG
1052static void nand_cmdfunc_extended(struct mtd_info *mtd,
1053 const unsigned command,
1054 int column, int page_addr)
70ed8523
EG
1055{
1056 struct pxa3xx_nand_host *host = mtd->priv;
1057 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1058 int exec_cmd, ext_cmd_type;
70ed8523
EG
1059
1060 /*
1061 * if this is a x16 device then convert the input
1062 * "byte" address into a "word" address appropriate
1063 * for indexing a word-oriented device
1064 */
1065 if (info->reg_ndcr & NDCR_DWIDTH_M)
1066 column /= 2;
1067
1068 /*
1069 * There may be different NAND chip hooked to
1070 * different chip select, so check whether
1071 * chip select has been changed, if yes, reset the timing
1072 */
1073 if (info->cs != host->cs) {
1074 info->cs = host->cs;
1075 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1076 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1077 }
1078
1079 /* Select the extended command for the first command */
1080 switch (command) {
1081 case NAND_CMD_READ0:
1082 case NAND_CMD_READOOB:
1083 ext_cmd_type = EXT_CMD_TYPE_MONO;
1084 break;
535cb57a
EG
1085 case NAND_CMD_SEQIN:
1086 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1087 break;
1088 case NAND_CMD_PAGEPROG:
1089 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1090 break;
70ed8523
EG
1091 default:
1092 ext_cmd_type = 0;
535cb57a 1093 break;
70ed8523
EG
1094 }
1095
1096 prepare_start_command(info, command);
1097
1098 /*
1099 * Prepare the "is ready" completion before starting a command
1100 * transaction sequence. If the command is not executed the
1101 * completion will be completed, see below.
1102 *
1103 * We can do that inside the loop because the command variable
1104 * is invariant and thus so is the exec_cmd.
1105 */
1106 info->need_wait = 1;
1107 init_completion(&info->dev_ready);
1108 do {
1109 info->state = STATE_PREPARED;
1110 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1111 column, page_addr);
1112 if (!exec_cmd) {
1113 info->need_wait = 0;
1114 complete(&info->dev_ready);
1115 break;
1116 }
1117
1118 init_completion(&info->cmd_complete);
1119 pxa3xx_nand_start(info);
1120
e5860c18
NMG
1121 if (!wait_for_completion_timeout(&info->cmd_complete,
1122 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1123 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1124 /* Stop State Machine for next command cycle */
1125 pxa3xx_nand_stop(info);
1126 break;
1127 }
1128
1129 /* Check if the sequence is complete */
535cb57a
EG
1130 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1131 break;
1132
1133 /*
1134 * After a splitted program command sequence has issued
1135 * the command dispatch, the command sequence is complete.
1136 */
1137 if (info->data_size == 0 &&
1138 command == NAND_CMD_PAGEPROG &&
1139 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1140 break;
1141
1142 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1143 /* Last read: issue a 'last naked read' */
1144 if (info->data_size == info->chunk_size)
1145 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1146 else
1147 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1148
1149 /*
1150 * If a splitted program command has no more data to transfer,
1151 * the command dispatch must be issued to complete.
1152 */
1153 } else if (command == NAND_CMD_PAGEPROG &&
1154 info->data_size == 0) {
1155 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1156 }
1157 } while (1);
1158
1159 info->state = STATE_IDLE;
1160}
1161
fdbad98d 1162static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
45aaeff9
BB
1163 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1164 int page)
f8155a40
LW
1165{
1166 chip->write_buf(mtd, buf, mtd->writesize);
1167 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1168
1169 return 0;
f8155a40
LW
1170}
1171
1172static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1173 struct nand_chip *chip, uint8_t *buf, int oob_required,
1174 int page)
f8155a40 1175{
d456882b
LW
1176 struct pxa3xx_nand_host *host = mtd->priv;
1177 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1178
1179 chip->read_buf(mtd, buf, mtd->writesize);
1180 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1181
87f5336e
EG
1182 if (info->retcode == ERR_CORERR && info->use_ecc) {
1183 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1184
1185 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1186 /*
1187 * for blank page (all 0xff), HW will calculate its ECC as
1188 * 0, which is different from the ECC information within
87f5336e 1189 * OOB, ignore such uncorrectable errors
f8155a40
LW
1190 */
1191 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1192 info->retcode = ERR_NONE;
1193 else
f8155a40 1194 mtd->ecc_stats.failed++;
fe69af00 1195 }
f8155a40 1196
87f5336e 1197 return info->max_bitflips;
fe69af00 1198}
1199
1200static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1201{
d456882b
LW
1202 struct pxa3xx_nand_host *host = mtd->priv;
1203 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1204 char retval = 0xFF;
1205
1206 if (info->buf_start < info->buf_count)
1207 /* Has just send a new command? */
1208 retval = info->data_buff[info->buf_start++];
1209
1210 return retval;
1211}
1212
1213static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1214{
d456882b
LW
1215 struct pxa3xx_nand_host *host = mtd->priv;
1216 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1217 u16 retval = 0xFFFF;
1218
1219 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1220 retval = *((u16 *)(info->data_buff+info->buf_start));
1221 info->buf_start += 2;
1222 }
1223 return retval;
1224}
1225
1226static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1227{
d456882b
LW
1228 struct pxa3xx_nand_host *host = mtd->priv;
1229 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1230 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1231
1232 memcpy(buf, info->data_buff + info->buf_start, real_len);
1233 info->buf_start += real_len;
1234}
1235
1236static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1237 const uint8_t *buf, int len)
1238{
d456882b
LW
1239 struct pxa3xx_nand_host *host = mtd->priv;
1240 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1241 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1242
1243 memcpy(info->data_buff + info->buf_start, buf, real_len);
1244 info->buf_start += real_len;
1245}
1246
fe69af00 1247static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1248{
1249 return;
1250}
1251
1252static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1253{
d456882b
LW
1254 struct pxa3xx_nand_host *host = mtd->priv;
1255 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1256
1257 if (info->need_wait) {
55d9fd6e 1258 info->need_wait = 0;
e5860c18
NMG
1259 if (!wait_for_completion_timeout(&info->dev_ready,
1260 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1261 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1262 return NAND_STATUS_FAIL;
1263 }
1264 }
fe69af00 1265
1266 /* pxa3xx_nand_send_command has waited for command complete */
1267 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1268 if (info->retcode == ERR_NONE)
1269 return 0;
55d9fd6e
EG
1270 else
1271 return NAND_STATUS_FAIL;
fe69af00 1272 }
1273
55d9fd6e 1274 return NAND_STATUS_READY;
fe69af00 1275}
1276
fe69af00 1277static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
c8c17c88 1278 const struct pxa3xx_nand_flash *f)
fe69af00 1279{
1280 struct platform_device *pdev = info->pdev;
453810b7 1281 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f3c8cfc2 1282 struct pxa3xx_nand_host *host = info->host[info->cs];
f8155a40 1283 uint32_t ndcr = 0x0; /* enable all interrupts */
fe69af00 1284
da675b4e
LW
1285 if (f->page_size != 2048 && f->page_size != 512) {
1286 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
fe69af00 1287 return -EINVAL;
da675b4e 1288 }
fe69af00 1289
da675b4e
LW
1290 if (f->flash_width != 16 && f->flash_width != 8) {
1291 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
fe69af00 1292 return -EINVAL;
da675b4e 1293 }
fe69af00 1294
fe69af00 1295 /* calculate addressing information */
d456882b 1296 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
fe69af00 1297
1298 if (f->num_blocks * f->page_per_block > 65536)
d456882b 1299 host->row_addr_cycles = 3;
fe69af00 1300 else
d456882b 1301 host->row_addr_cycles = 2;
fe69af00 1302
1303 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
d456882b 1304 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
fe69af00 1305 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1306 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1307 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1308 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1309
b226eca2 1310 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
fe69af00 1311 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1312
48cf7efa 1313 info->reg_ndcr = ndcr;
fe69af00 1314
d456882b 1315 pxa3xx_nand_set_timing(host, f->timing);
fe69af00 1316 return 0;
1317}
1318
f271049e
MR
1319static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1320{
1321 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1322
70ed8523 1323 /* Set an initial chunk size */
b226eca2 1324 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1325 info->reg_ndcr = ndcr &
1326 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
48cf7efa
EG
1327 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1328 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1329 return 0;
1330}
1331
fe69af00 1332static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1333{
1334 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1335 struct dma_slave_config config;
1336 dma_cap_mask_t mask;
1337 struct pxad_param param;
1338 int ret;
fe69af00 1339
8f5ba31a
RJ
1340 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1341 if (info->data_buff == NULL)
1342 return -ENOMEM;
1343 if (use_dma == 0)
fe69af00 1344 return 0;
fe69af00 1345
8f5ba31a
RJ
1346 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1347 if (ret)
1348 return ret;
fe69af00 1349
8f5ba31a
RJ
1350 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1351 dma_cap_zero(mask);
1352 dma_cap_set(DMA_SLAVE, mask);
1353 param.prio = PXAD_PRIO_LOWEST;
1354 param.drcmr = info->drcmr_dat;
1355 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1356 &param, &pdev->dev,
1357 "data");
1358 if (!info->dma_chan) {
1359 dev_err(&pdev->dev, "unable to request data dma channel\n");
1360 return -ENODEV;
1361 }
fe69af00 1362
8f5ba31a
RJ
1363 memset(&config, 0, sizeof(config));
1364 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1365 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1366 config.src_addr = info->mmio_phys + NDDB;
1367 config.dst_addr = info->mmio_phys + NDDB;
1368 config.src_maxburst = 32;
1369 config.dst_maxburst = 32;
1370 ret = dmaengine_slave_config(info->dma_chan, &config);
1371 if (ret < 0) {
1372 dev_err(&info->pdev->dev,
1373 "dma channel configuration failed: %d\n",
1374 ret);
1375 return ret;
fe69af00 1376 }
1377
95b26563
EG
1378 /*
1379 * Now that DMA buffers are allocated we turn on
1380 * DMA proper for I/O operations.
1381 */
1382 info->use_dma = 1;
fe69af00 1383 return 0;
1384}
1385
498b6145
EG
1386static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1387{
15b540c7 1388 if (info->use_dma) {
8f5ba31a
RJ
1389 dmaengine_terminate_all(info->dma_chan);
1390 dma_release_channel(info->dma_chan);
498b6145 1391 }
f4db2e3a
EG
1392 kfree(info->data_buff);
1393}
498b6145 1394
401e67e2
LW
1395static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1396{
f3c8cfc2 1397 struct mtd_info *mtd;
2d79ab16 1398 struct nand_chip *chip;
d456882b 1399 int ret;
2d79ab16 1400
f3c8cfc2 1401 mtd = info->host[info->cs]->mtd;
2d79ab16
EG
1402 chip = mtd->priv;
1403
401e67e2 1404 /* use the common timing to make a try */
d456882b
LW
1405 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1406 if (ret)
1407 return ret;
1408
2d79ab16 1409 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
56704d85
EG
1410 ret = chip->waitfunc(mtd, chip);
1411 if (ret & NAND_STATUS_FAIL)
1412 return -ENODEV;
d456882b 1413
56704d85 1414 return 0;
401e67e2 1415}
fe69af00 1416
43bcfd2b
EG
1417static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1418 struct nand_ecc_ctrl *ecc,
30b2afc8 1419 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1420{
30b2afc8 1421 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1422 info->chunk_size = 2048;
43bcfd2b
EG
1423 info->spare_size = 40;
1424 info->ecc_size = 24;
1425 ecc->mode = NAND_ECC_HW;
1426 ecc->size = 512;
1427 ecc->strength = 1;
43bcfd2b 1428
30b2afc8 1429 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1430 info->chunk_size = 512;
43bcfd2b
EG
1431 info->spare_size = 8;
1432 info->ecc_size = 8;
1433 ecc->mode = NAND_ECC_HW;
1434 ecc->size = 512;
1435 ecc->strength = 1;
43bcfd2b 1436
6033a949
BN
1437 /*
1438 * Required ECC: 4-bit correction per 512 bytes
1439 * Select: 16-bit correction per 2048 bytes
1440 */
3db227b6
RG
1441 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1442 info->ecc_bch = 1;
1443 info->chunk_size = 2048;
1444 info->spare_size = 32;
1445 info->ecc_size = 32;
1446 ecc->mode = NAND_ECC_HW;
1447 ecc->size = info->chunk_size;
1448 ecc->layout = &ecc_layout_2KB_bch4bit;
1449 ecc->strength = 16;
3db227b6 1450
30b2afc8 1451 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1452 info->ecc_bch = 1;
1453 info->chunk_size = 2048;
1454 info->spare_size = 32;
1455 info->ecc_size = 32;
1456 ecc->mode = NAND_ECC_HW;
1457 ecc->size = info->chunk_size;
1458 ecc->layout = &ecc_layout_4KB_bch4bit;
1459 ecc->strength = 16;
70ed8523 1460
6033a949
BN
1461 /*
1462 * Required ECC: 8-bit correction per 512 bytes
1463 * Select: 16-bit correction per 1024 bytes
1464 */
1465 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1466 info->ecc_bch = 1;
1467 info->chunk_size = 1024;
1468 info->spare_size = 0;
1469 info->ecc_size = 32;
1470 ecc->mode = NAND_ECC_HW;
1471 ecc->size = info->chunk_size;
1472 ecc->layout = &ecc_layout_4KB_bch8bit;
1473 ecc->strength = 16;
eee0166d
EG
1474 } else {
1475 dev_err(&info->pdev->dev,
1476 "ECC strength %d at page size %d is not supported\n",
1477 strength, page_size);
1478 return -ENODEV;
70ed8523 1479 }
eee0166d
EG
1480
1481 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1482 ecc->strength, ecc->size);
43bcfd2b
EG
1483 return 0;
1484}
1485
401e67e2 1486static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1487{
d456882b
LW
1488 struct pxa3xx_nand_host *host = mtd->priv;
1489 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1490 struct platform_device *pdev = info->pdev;
453810b7 1491 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
0fab028b 1492 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
401e67e2
LW
1493 const struct pxa3xx_nand_flash *f = NULL;
1494 struct nand_chip *chip = mtd->priv;
1495 uint32_t id = -1;
4332c116 1496 uint64_t chipsize;
401e67e2 1497 int i, ret, num;
30b2afc8 1498 uint16_t ecc_strength, ecc_step;
401e67e2
LW
1499
1500 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
4332c116 1501 goto KEEP_CONFIG;
401e67e2 1502
bc3e00f0
AT
1503 /* Set a default chunk size */
1504 info->chunk_size = 512;
1505
401e67e2 1506 ret = pxa3xx_nand_sensing(info);
d456882b 1507 if (ret) {
f3c8cfc2
LW
1508 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1509 info->cs);
401e67e2 1510
d456882b 1511 return ret;
401e67e2
LW
1512 }
1513
1514 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1515 id = *((uint16_t *)(info->data_buff));
1516 if (id != 0)
da675b4e 1517 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
401e67e2 1518 else {
da675b4e
LW
1519 dev_warn(&info->pdev->dev,
1520 "Read out ID 0, potential timing set wrong!!\n");
401e67e2
LW
1521
1522 return -EINVAL;
1523 }
1524
a9cadf72 1525 num = ARRAY_SIZE(builtin_flash_types) - 1;
401e67e2 1526 for (i = 0; i < num; i++) {
a9cadf72 1527 f = &builtin_flash_types[i + 1];
401e67e2
LW
1528
1529 /* find the chip in default list */
4332c116 1530 if (f->chip_id == id)
401e67e2 1531 break;
401e67e2
LW
1532 }
1533
a9cadf72 1534 if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
da675b4e 1535 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
401e67e2
LW
1536
1537 return -EINVAL;
1538 }
1539
d456882b
LW
1540 ret = pxa3xx_nand_config_flash(info, f);
1541 if (ret) {
1542 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1543 return ret;
1544 }
1545
7c2f7176
AT
1546 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1547
4332c116 1548 pxa3xx_flash_ids[0].name = f->name;
68aa352d 1549 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
4332c116
LW
1550 pxa3xx_flash_ids[0].pagesize = f->page_size;
1551 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1552 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1553 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1554 if (f->flash_width == 16)
1555 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
0fab028b
LW
1556 pxa3xx_flash_ids[1].name = NULL;
1557 def = pxa3xx_flash_ids;
4332c116 1558KEEP_CONFIG:
e971affa 1559 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa 1560 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1561 chip->options |= NAND_BUSWIDTH_16;
1562
43bcfd2b
EG
1563 /* Device detection must be done with ECC disabled */
1564 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1565 nand_writel(info, NDECCCTRL, 0x0);
1566
0fab028b 1567 if (nand_scan_ident(mtd, 1, def))
4332c116 1568 return -ENODEV;
776f265e
EG
1569
1570 if (pdata->flash_bbt) {
1571 /*
1572 * We'll use a bad block table stored in-flash and don't
1573 * allow writing the bad block marker to the flash.
1574 */
1575 chip->bbt_options |= NAND_BBT_USE_FLASH |
1576 NAND_BBT_NO_OOB_BBM;
1577 chip->bbt_td = &bbt_main_descr;
1578 chip->bbt_md = &bbt_mirror_descr;
1579 }
1580
5cbbdc6a
EG
1581 /*
1582 * If the page size is bigger than the FIFO size, let's check
1583 * we are given the right variant and then switch to the extended
1584 * (aka splitted) command handling,
1585 */
1586 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1587 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1588 chip->cmdfunc = nand_cmdfunc_extended;
1589 } else {
1590 dev_err(&info->pdev->dev,
1591 "unsupported page size on this variant\n");
1592 return -ENODEV;
1593 }
1594 }
1595
5b3e5078
EG
1596 if (pdata->ecc_strength && pdata->ecc_step_size) {
1597 ecc_strength = pdata->ecc_strength;
1598 ecc_step = pdata->ecc_step_size;
1599 } else {
1600 ecc_strength = chip->ecc_strength_ds;
1601 ecc_step = chip->ecc_step_ds;
1602 }
30b2afc8
EG
1603
1604 /* Set default ECC strength requirements on non-ONFI devices */
1605 if (ecc_strength < 1 && ecc_step < 1) {
1606 ecc_strength = 1;
1607 ecc_step = 512;
1608 }
1609
1610 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1611 ecc_step, mtd->writesize);
eee0166d
EG
1612 if (ret)
1613 return ret;
43bcfd2b 1614
4332c116 1615 /* calculate addressing information */
d456882b
LW
1616 if (mtd->writesize >= 2048)
1617 host->col_addr_cycles = 2;
1618 else
1619 host->col_addr_cycles = 1;
1620
62e8b851
EG
1621 /* release the initial buffer */
1622 kfree(info->data_buff);
1623
1624 /* allocate the real data + oob buffer */
1625 info->buf_size = mtd->writesize + mtd->oobsize;
1626 ret = pxa3xx_nand_init_buff(info);
1627 if (ret)
1628 return ret;
4332c116 1629 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1630
4332c116 1631 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1632 host->row_addr_cycles = 3;
4332c116 1633 else
d456882b 1634 host->row_addr_cycles = 2;
401e67e2 1635 return nand_scan_tail(mtd);
fe69af00 1636}
1637
d456882b 1638static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1639{
f3c8cfc2 1640 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1641 struct pxa3xx_nand_info *info;
d456882b 1642 struct pxa3xx_nand_host *host;
6e308f87 1643 struct nand_chip *chip = NULL;
fe69af00 1644 struct mtd_info *mtd;
1645 struct resource *r;
f3c8cfc2 1646 int ret, irq, cs;
fe69af00 1647
453810b7 1648 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1649 if (pdata->num_cs <= 0)
1650 return -ENODEV;
4c073cd2
EG
1651 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1652 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1653 if (!info)
d456882b 1654 return -ENOMEM;
fe69af00 1655
fe69af00 1656 info->pdev = pdev;
c7e9c7e7 1657 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1658 for (cs = 0; cs < pdata->num_cs; cs++) {
ce914e6b 1659 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
f3c8cfc2
LW
1660 chip = (struct nand_chip *)(&mtd[1]);
1661 host = (struct pxa3xx_nand_host *)chip;
1662 info->host[cs] = host;
1663 host->mtd = mtd;
1664 host->cs = cs;
1665 host->info_data = info;
1666 mtd->priv = host;
550dab5b 1667 mtd->dev.parent = &pdev->dev;
f3c8cfc2
LW
1668
1669 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1670 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1671 chip->controller = &info->controller;
1672 chip->waitfunc = pxa3xx_nand_waitfunc;
1673 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1674 chip->read_word = pxa3xx_nand_read_word;
1675 chip->read_byte = pxa3xx_nand_read_byte;
1676 chip->read_buf = pxa3xx_nand_read_buf;
1677 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1678 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1679 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1680 }
401e67e2
LW
1681
1682 spin_lock_init(&chip->controller->lock);
1683 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1684 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1685 if (IS_ERR(info->clk)) {
1686 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1687 return PTR_ERR(info->clk);
fe69af00 1688 }
1f8eaff2
EG
1689 ret = clk_prepare_enable(info->clk);
1690 if (ret < 0)
1691 return ret;
fe69af00 1692
6b45c1ee 1693 if (use_dma) {
8f5ba31a
RJ
1694 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1695 if (r == NULL) {
1696 dev_err(&pdev->dev,
1697 "no resource defined for data DMA\n");
1698 ret = -ENXIO;
1699 goto fail_disable_clk;
1e7ba630 1700 }
8f5ba31a
RJ
1701 info->drcmr_dat = r->start;
1702
1703 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1704 if (r == NULL) {
1705 dev_err(&pdev->dev,
1706 "no resource defined for cmd DMA\n");
1707 ret = -ENXIO;
1708 goto fail_disable_clk;
1709 }
1710 info->drcmr_cmd = r->start;
fe69af00 1711 }
fe69af00 1712
1713 irq = platform_get_irq(pdev, 0);
1714 if (irq < 0) {
1715 dev_err(&pdev->dev, "no IRQ resource defined\n");
1716 ret = -ENXIO;
9ca7944d 1717 goto fail_disable_clk;
fe69af00 1718 }
1719
1720 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1721 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1722 if (IS_ERR(info->mmio_base)) {
1723 ret = PTR_ERR(info->mmio_base);
9ca7944d 1724 goto fail_disable_clk;
fe69af00 1725 }
8638fac8 1726 info->mmio_phys = r->start;
fe69af00 1727
62e8b851
EG
1728 /* Allocate a buffer to allow flash detection */
1729 info->buf_size = INIT_BUFFER_SIZE;
1730 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1731 if (info->data_buff == NULL) {
1732 ret = -ENOMEM;
9ca7944d 1733 goto fail_disable_clk;
62e8b851 1734 }
fe69af00 1735
346e1259
HZ
1736 /* initialize all interrupts to be disabled */
1737 disable_int(info, NDSR_MASK);
1738
24542257
RJ
1739 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1740 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1741 pdev->name, info);
fe69af00 1742 if (ret < 0) {
1743 dev_err(&pdev->dev, "failed to request IRQ\n");
1744 goto fail_free_buf;
1745 }
1746
e353a20a 1747 platform_set_drvdata(pdev, info);
fe69af00 1748
d456882b 1749 return 0;
fe69af00 1750
fe69af00 1751fail_free_buf:
401e67e2 1752 free_irq(irq, info);
62e8b851 1753 kfree(info->data_buff);
9ca7944d 1754fail_disable_clk:
fb32061f 1755 clk_disable_unprepare(info->clk);
d456882b 1756 return ret;
fe69af00 1757}
1758
1759static int pxa3xx_nand_remove(struct platform_device *pdev)
1760{
e353a20a 1761 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1762 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1763 int irq, cs;
fe69af00 1764
d456882b
LW
1765 if (!info)
1766 return 0;
1767
453810b7 1768 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1769
dbf5986a
HZ
1770 irq = platform_get_irq(pdev, 0);
1771 if (irq >= 0)
1772 free_irq(irq, info);
498b6145 1773 pxa3xx_nand_free_buff(info);
82a72d10 1774
e971affa
RJ
1775 /*
1776 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1777 * In order to prevent a lockup of the system bus, the DFI bus
1778 * arbitration is granted to SMC upon driver removal. This is done by
1779 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1780 * access to the bus anymore.
1781 */
1782 nand_writel(info, NDCR,
1783 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1784 NFCV1_NDCR_ARB_CNTL);
fb32061f 1785 clk_disable_unprepare(info->clk);
82a72d10 1786
f3c8cfc2
LW
1787 for (cs = 0; cs < pdata->num_cs; cs++)
1788 nand_release(info->host[cs]->mtd);
fe69af00 1789 return 0;
1790}
1791
1e7ba630
DM
1792static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1793{
1794 struct pxa3xx_nand_platform_data *pdata;
1795 struct device_node *np = pdev->dev.of_node;
1796 const struct of_device_id *of_id =
1797 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1798
1799 if (!of_id)
1800 return 0;
1801
1802 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1803 if (!pdata)
1804 return -ENOMEM;
1805
1806 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1807 pdata->enable_arbiter = 1;
1808 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1809 pdata->keep_config = 1;
1810 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1811 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1812
5b3e5078
EG
1813 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1814 if (pdata->ecc_strength < 0)
1815 pdata->ecc_strength = 0;
1816
1817 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1818 if (pdata->ecc_step_size < 0)
1819 pdata->ecc_step_size = 0;
1820
1e7ba630
DM
1821 pdev->dev.platform_data = pdata;
1822
1823 return 0;
1824}
1e7ba630 1825
e353a20a
LW
1826static int pxa3xx_nand_probe(struct platform_device *pdev)
1827{
1828 struct pxa3xx_nand_platform_data *pdata;
1e7ba630 1829 struct mtd_part_parser_data ppdata = {};
e353a20a 1830 struct pxa3xx_nand_info *info;
8f5ba31a 1831 int ret, cs, probe_success, dma_available;
e353a20a 1832
8f5ba31a
RJ
1833 dma_available = IS_ENABLED(CONFIG_ARM) &&
1834 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1835 if (use_dma && !dma_available) {
f4db2e3a
EG
1836 use_dma = 0;
1837 dev_warn(&pdev->dev,
1838 "This platform can't do DMA on this device\n");
1839 }
8f5ba31a 1840
1e7ba630
DM
1841 ret = pxa3xx_nand_probe_dt(pdev);
1842 if (ret)
1843 return ret;
1844
453810b7 1845 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1846 if (!pdata) {
1847 dev_err(&pdev->dev, "no platform data defined\n");
1848 return -ENODEV;
1849 }
1850
d456882b
LW
1851 ret = alloc_nand_resource(pdev);
1852 if (ret) {
1853 dev_err(&pdev->dev, "alloc nand resource failed\n");
1854 return ret;
1855 }
e353a20a 1856
d456882b 1857 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1858 probe_success = 0;
1859 for (cs = 0; cs < pdata->num_cs; cs++) {
b7655bcb 1860 struct mtd_info *mtd = info->host[cs]->mtd;
f455578d 1861
18a84e93
EG
1862 /*
1863 * The mtd name matches the one used in 'mtdparts' kernel
1864 * parameter. This name cannot be changed or otherwise
1865 * user's mtd partitions configuration would get broken.
1866 */
1867 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1868 info->cs = cs;
b7655bcb 1869 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1870 if (ret) {
1871 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1872 cs);
1873 continue;
1874 }
1875
1e7ba630 1876 ppdata.of_node = pdev->dev.of_node;
b7655bcb 1877 ret = mtd_device_parse_register(mtd, NULL,
1e7ba630 1878 &ppdata, pdata->parts[cs],
42d7fbe2 1879 pdata->nr_parts[cs]);
f3c8cfc2
LW
1880 if (!ret)
1881 probe_success = 1;
1882 }
1883
1884 if (!probe_success) {
e353a20a
LW
1885 pxa3xx_nand_remove(pdev);
1886 return -ENODEV;
1887 }
1888
f3c8cfc2 1889 return 0;
e353a20a
LW
1890}
1891
fe69af00 1892#ifdef CONFIG_PM
1893static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1894{
e353a20a 1895 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1896 struct pxa3xx_nand_platform_data *pdata;
1897 struct mtd_info *mtd;
1898 int cs;
fe69af00 1899
453810b7 1900 pdata = dev_get_platdata(&pdev->dev);
f8155a40 1901 if (info->state) {
fe69af00 1902 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1903 return -EAGAIN;
1904 }
1905
f3c8cfc2
LW
1906 for (cs = 0; cs < pdata->num_cs; cs++) {
1907 mtd = info->host[cs]->mtd;
3fe4bae8 1908 mtd_suspend(mtd);
f3c8cfc2
LW
1909 }
1910
fe69af00 1911 return 0;
1912}
1913
1914static int pxa3xx_nand_resume(struct platform_device *pdev)
1915{
e353a20a 1916 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1917 struct pxa3xx_nand_platform_data *pdata;
1918 struct mtd_info *mtd;
1919 int cs;
051fc41c 1920
453810b7 1921 pdata = dev_get_platdata(&pdev->dev);
051fc41c
LW
1922 /* We don't want to handle interrupt without calling mtd routine */
1923 disable_int(info, NDCR_INT_MASK);
fe69af00 1924
f3c8cfc2
LW
1925 /*
1926 * Directly set the chip select to a invalid value,
1927 * then the driver would reset the timing according
1928 * to current chip select at the beginning of cmdfunc
1929 */
1930 info->cs = 0xff;
fe69af00 1931
051fc41c
LW
1932 /*
1933 * As the spec says, the NDSR would be updated to 0x1800 when
1934 * doing the nand_clk disable/enable.
1935 * To prevent it damaging state machine of the driver, clear
1936 * all status before resume
1937 */
1938 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2
LW
1939 for (cs = 0; cs < pdata->num_cs; cs++) {
1940 mtd = info->host[cs]->mtd;
ead995f8 1941 mtd_resume(mtd);
f3c8cfc2
LW
1942 }
1943
18c81b18 1944 return 0;
fe69af00 1945}
1946#else
1947#define pxa3xx_nand_suspend NULL
1948#define pxa3xx_nand_resume NULL
1949#endif
1950
1951static struct platform_driver pxa3xx_nand_driver = {
1952 .driver = {
1953 .name = "pxa3xx-nand",
5576bc7b 1954 .of_match_table = pxa3xx_nand_dt_ids,
fe69af00 1955 },
1956 .probe = pxa3xx_nand_probe,
1957 .remove = pxa3xx_nand_remove,
1958 .suspend = pxa3xx_nand_suspend,
1959 .resume = pxa3xx_nand_resume,
1960};
1961
f99640de 1962module_platform_driver(pxa3xx_nand_driver);
fe69af00 1963
1964MODULE_LICENSE("GPL");
1965MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.546376 seconds and 5 git commands to generate.