mtd: nand: make use of nand_set/get_controller_data() helpers
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
776f265e 32#include <linux/of_mtd.h>
293b2da1 33#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 34
e5860c18
NMG
35#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 37#define PAGE_CHUNK_SIZE (2048)
fe69af00 38
62e8b851
EG
39/*
40 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 46 */
c1634097 47#define INIT_BUFFER_SIZE 2048
62e8b851 48
fe69af00 49/* registers and bit definitions */
50#define NDCR (0x00) /* Control register */
51#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53#define NDSR (0x14) /* Status Register */
54#define NDPCR (0x18) /* Page Count Register */
55#define NDBDR0 (0x1C) /* Bad Block Register 0 */
56#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 57#define NDECCCTRL (0x28) /* ECC control */
fe69af00 58#define NDDB (0x40) /* Data Buffer */
59#define NDCB0 (0x48) /* Command Buffer0 */
60#define NDCB1 (0x4C) /* Command Buffer1 */
61#define NDCB2 (0x50) /* Command Buffer2 */
62
63#define NDCR_SPARE_EN (0x1 << 31)
64#define NDCR_ECC_EN (0x1 << 30)
65#define NDCR_DMA_EN (0x1 << 29)
66#define NDCR_ND_RUN (0x1 << 28)
67#define NDCR_DWIDTH_C (0x1 << 27)
68#define NDCR_DWIDTH_M (0x1 << 26)
69#define NDCR_PAGE_SZ (0x1 << 24)
70#define NDCR_NCSX (0x1 << 23)
71#define NDCR_ND_MODE (0x3 << 21)
72#define NDCR_NAND_MODE (0x0)
73#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
74#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 76#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79#define NDCR_RA_START (0x1 << 15)
80#define NDCR_PG_PER_BLK (0x1 << 14)
81#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 82#define NDCR_INT_MASK (0xFFF)
fe69af00 83
84#define NDSR_MASK (0xfff)
87f5336e
EG
85#define NDSR_ERR_CNT_OFF (16)
86#define NDSR_ERR_CNT_MASK (0x1f)
87#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
88#define NDSR_RDY (0x1 << 12)
89#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 90#define NDSR_CS0_PAGED (0x1 << 10)
91#define NDSR_CS1_PAGED (0x1 << 9)
92#define NDSR_CS0_CMDD (0x1 << 8)
93#define NDSR_CS1_CMDD (0x1 << 7)
94#define NDSR_CS0_BBD (0x1 << 6)
95#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
96#define NDSR_UNCORERR (0x1 << 4)
97#define NDSR_CORERR (0x1 << 3)
fe69af00 98#define NDSR_WRDREQ (0x1 << 2)
99#define NDSR_RDDREQ (0x1 << 1)
100#define NDSR_WRCMDREQ (0x1)
101
41a63430 102#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 103#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 104#define NDCB0_AUTO_RS (0x1 << 25)
105#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
106#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 108#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110#define NDCB0_NC (0x1 << 20)
111#define NDCB0_DBC (0x1 << 19)
112#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114#define NDCB0_CMD2_MASK (0xff << 8)
115#define NDCB0_CMD1_MASK (0xff)
116#define NDCB0_ADDR_CYC_SHIFT (16)
117
70ed8523
EG
118#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120#define EXT_CMD_TYPE_READ 4 /* Read */
121#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122#define EXT_CMD_TYPE_FINAL 3 /* Final command */
123#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
125
b226eca2
EG
126/*
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
130 */
131#define READ_ID_BYTES 7
132
fe69af00 133/* macros for registers read/write */
134#define nand_writel(info, off, val) \
b7e46062 135 writel_relaxed((val), (info)->mmio_base + (off))
fe69af00 136
137#define nand_readl(info, off) \
b7e46062 138 readl_relaxed((info)->mmio_base + (off))
fe69af00 139
140/* error code and state */
141enum {
142 ERR_NONE = 0,
143 ERR_DMABUSERR = -1,
144 ERR_SENDCMD = -2,
87f5336e 145 ERR_UNCORERR = -3,
fe69af00 146 ERR_BBERR = -4,
87f5336e 147 ERR_CORERR = -5,
fe69af00 148};
149
150enum {
f8155a40 151 STATE_IDLE = 0,
d456882b 152 STATE_PREPARED,
fe69af00 153 STATE_CMD_HANDLE,
154 STATE_DMA_READING,
155 STATE_DMA_WRITING,
156 STATE_DMA_DONE,
157 STATE_PIO_READING,
158 STATE_PIO_WRITING,
f8155a40
LW
159 STATE_CMD_DONE,
160 STATE_READY,
fe69af00 161};
162
c0f3b864
EG
163enum pxa3xx_nand_variant {
164 PXA3XX_NAND_VARIANT_PXA,
165 PXA3XX_NAND_VARIANT_ARMADA370,
166};
167
d456882b
LW
168struct pxa3xx_nand_host {
169 struct nand_chip chip;
d456882b
LW
170 void *info_data;
171
172 /* page size of attached chip */
d456882b 173 int use_ecc;
f3c8cfc2 174 int cs;
fe69af00 175
d456882b
LW
176 /* calculated from pxa3xx_nand_flash data */
177 unsigned int col_addr_cycles;
178 unsigned int row_addr_cycles;
d456882b
LW
179};
180
181struct pxa3xx_nand_info {
401e67e2 182 struct nand_hw_control controller;
fe69af00 183 struct platform_device *pdev;
fe69af00 184
185 struct clk *clk;
186 void __iomem *mmio_base;
8638fac8 187 unsigned long mmio_phys;
55d9fd6e 188 struct completion cmd_complete, dev_ready;
fe69af00 189
190 unsigned int buf_start;
191 unsigned int buf_count;
62e8b851 192 unsigned int buf_size;
fa543bef
EG
193 unsigned int data_buff_pos;
194 unsigned int oob_buff_pos;
fe69af00 195
196 /* DMA information */
8f5ba31a
RJ
197 struct scatterlist sg;
198 enum dma_data_direction dma_dir;
199 struct dma_chan *dma_chan;
200 dma_cookie_t dma_cookie;
fe69af00 201 int drcmr_dat;
202 int drcmr_cmd;
203
204 unsigned char *data_buff;
18c81b18 205 unsigned char *oob_buff;
fe69af00 206 dma_addr_t data_buff_phys;
fe69af00 207 int data_dma_ch;
fe69af00 208
f3c8cfc2 209 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 210 unsigned int state;
211
c0f3b864
EG
212 /*
213 * This driver supports NFCv1 (as found in PXA SoC)
214 * and NFCv2 (as found in Armada 370/XP SoC).
215 */
216 enum pxa3xx_nand_variant variant;
217
f3c8cfc2 218 int cs;
fe69af00 219 int use_ecc; /* use HW ECC ? */
43bcfd2b 220 int ecc_bch; /* using BCH ECC? */
fe69af00 221 int use_dma; /* use DMA ? */
5bb653e8 222 int use_spare; /* use spare ? */
55d9fd6e 223 int need_wait;
fe69af00 224
2128b08c 225 unsigned int data_size; /* data to be read from FIFO */
70ed8523 226 unsigned int chunk_size; /* split commands chunk size */
d456882b 227 unsigned int oob_size;
43bcfd2b
EG
228 unsigned int spare_size;
229 unsigned int ecc_size;
87f5336e
EG
230 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips;
fe69af00 232 int retcode;
fe69af00 233
48cf7efa
EG
234 /* cached register value */
235 uint32_t reg_ndcr;
236 uint32_t ndtr0cs0;
237 uint32_t ndtr1cs0;
238
fe69af00 239 /* generated NDCBx register values */
240 uint32_t ndcb0;
241 uint32_t ndcb1;
242 uint32_t ndcb2;
3a1a344a 243 uint32_t ndcb3;
fe69af00 244};
245
90ab5ee9 246static bool use_dma = 1;
fe69af00 247module_param(use_dma, bool, 0444);
25985edc 248MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 249
a9cadf72
EG
250struct pxa3xx_nand_timing {
251 unsigned int tCH; /* Enable signal hold time */
252 unsigned int tCS; /* Enable signal setup time */
253 unsigned int tWH; /* ND_nWE high duration */
254 unsigned int tWP; /* ND_nWE pulse time */
255 unsigned int tRH; /* ND_nRE high duration */
256 unsigned int tRP; /* ND_nRE pulse width */
257 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
258 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
259 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
260};
261
262struct pxa3xx_nand_flash {
a9cadf72 263 uint32_t chip_id;
a9cadf72
EG
264 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
265 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
a9cadf72
EG
266 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
267};
268
c1f82478 269static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
270 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
271 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
272 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
273 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
274};
275
c1f82478 276static struct pxa3xx_nand_flash builtin_flash_types[] = {
89c1702d
AT
277 { 0x46ec, 16, 16, &timing[1] },
278 { 0xdaec, 8, 8, &timing[1] },
279 { 0xd7ec, 8, 8, &timing[1] },
280 { 0xa12c, 8, 8, &timing[2] },
281 { 0xb12c, 16, 16, &timing[2] },
282 { 0xdc2c, 8, 8, &timing[2] },
283 { 0xcc2c, 16, 16, &timing[2] },
284 { 0xba20, 16, 16, &timing[3] },
d3490dfd
HZ
285};
286
776f265e
EG
287static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
288static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
289
290static struct nand_bbt_descr bbt_main_descr = {
291 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
292 | NAND_BBT_2BIT | NAND_BBT_VERSION,
293 .offs = 8,
294 .len = 6,
295 .veroffs = 14,
296 .maxblocks = 8, /* Last 8 blocks in each chip */
297 .pattern = bbt_pattern
298};
299
300static struct nand_bbt_descr bbt_mirror_descr = {
301 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
302 | NAND_BBT_2BIT | NAND_BBT_VERSION,
303 .offs = 8,
304 .len = 6,
305 .veroffs = 14,
306 .maxblocks = 8, /* Last 8 blocks in each chip */
307 .pattern = bbt_mirror_pattern
308};
309
3db227b6
RG
310static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
311 .eccbytes = 32,
312 .eccpos = {
313 32, 33, 34, 35, 36, 37, 38, 39,
314 40, 41, 42, 43, 44, 45, 46, 47,
315 48, 49, 50, 51, 52, 53, 54, 55,
316 56, 57, 58, 59, 60, 61, 62, 63},
317 .oobfree = { {2, 30} }
318};
319
70ed8523
EG
320static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
321 .eccbytes = 64,
322 .eccpos = {
323 32, 33, 34, 35, 36, 37, 38, 39,
324 40, 41, 42, 43, 44, 45, 46, 47,
325 48, 49, 50, 51, 52, 53, 54, 55,
326 56, 57, 58, 59, 60, 61, 62, 63,
327 96, 97, 98, 99, 100, 101, 102, 103,
328 104, 105, 106, 107, 108, 109, 110, 111,
329 112, 113, 114, 115, 116, 117, 118, 119,
330 120, 121, 122, 123, 124, 125, 126, 127},
331 /* Bootrom looks in bytes 0 & 5 for bad blocks */
332 .oobfree = { {6, 26}, { 64, 32} }
333};
334
335static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
336 .eccbytes = 128,
337 .eccpos = {
338 32, 33, 34, 35, 36, 37, 38, 39,
339 40, 41, 42, 43, 44, 45, 46, 47,
340 48, 49, 50, 51, 52, 53, 54, 55,
341 56, 57, 58, 59, 60, 61, 62, 63},
342 .oobfree = { }
343};
344
fe69af00 345#define NDTR0_tCH(c) (min((c), 7) << 19)
346#define NDTR0_tCS(c) (min((c), 7) << 16)
347#define NDTR0_tWH(c) (min((c), 7) << 11)
348#define NDTR0_tWP(c) (min((c), 7) << 8)
349#define NDTR0_tRH(c) (min((c), 7) << 3)
350#define NDTR0_tRP(c) (min((c), 7) << 0)
351
352#define NDTR1_tR(c) (min((c), 65535) << 16)
353#define NDTR1_tWHR(c) (min((c), 15) << 4)
354#define NDTR1_tAR(c) (min((c), 15) << 0)
355
356/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 357#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 358
17754ad6 359static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
360 {
361 .compatible = "marvell,pxa3xx-nand",
362 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
363 },
1963ff97
EG
364 {
365 .compatible = "marvell,armada370-nand",
366 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
367 },
c7e9c7e7
EG
368 {}
369};
370MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
371
372static enum pxa3xx_nand_variant
373pxa3xx_nand_get_variant(struct platform_device *pdev)
374{
375 const struct of_device_id *of_id =
376 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
377 if (!of_id)
378 return PXA3XX_NAND_VARIANT_PXA;
379 return (enum pxa3xx_nand_variant)of_id->data;
380}
381
d456882b 382static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 383 const struct pxa3xx_nand_timing *t)
fe69af00 384{
d456882b 385 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 386 unsigned long nand_clk = clk_get_rate(info->clk);
387 uint32_t ndtr0, ndtr1;
388
389 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
390 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
391 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
392 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
393 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
394 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
395
396 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
397 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
398 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
399
48cf7efa
EG
400 info->ndtr0cs0 = ndtr0;
401 info->ndtr1cs0 = ndtr1;
fe69af00 402 nand_writel(info, NDTR0CS0, ndtr0);
403 nand_writel(info, NDTR1CS0, ndtr1);
404}
405
3f225b7f
AT
406static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
407 const struct nand_sdr_timings *t)
408{
409 struct pxa3xx_nand_info *info = host->info_data;
410 struct nand_chip *chip = &host->chip;
411 unsigned long nand_clk = clk_get_rate(info->clk);
412 uint32_t ndtr0, ndtr1;
413
414 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
415 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
416 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
417 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
418 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
419 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
420 u32 tR = chip->chip_delay * 1000;
421 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
422 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
423
424 /* fallback to a default value if tR = 0 */
425 if (!tR)
426 tR = 20000;
427
428 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
429 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
430 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
431 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
432 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
433 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
434
435 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
436 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
437 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
438
439 info->ndtr0cs0 = ndtr0;
440 info->ndtr1cs0 = ndtr1;
441 nand_writel(info, NDTR0CS0, ndtr0);
442 nand_writel(info, NDTR1CS0, ndtr1);
443}
444
445static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
446 unsigned int *flash_width,
447 unsigned int *dfc_width)
448{
449 struct nand_chip *chip = &host->chip;
450 struct pxa3xx_nand_info *info = host->info_data;
451 const struct pxa3xx_nand_flash *f = NULL;
063294a3 452 struct mtd_info *mtd = nand_to_mtd(&host->chip);
3f225b7f
AT
453 int i, id, ntypes;
454
455 ntypes = ARRAY_SIZE(builtin_flash_types);
456
063294a3 457 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3f225b7f 458
063294a3
BB
459 id = chip->read_byte(mtd);
460 id |= chip->read_byte(mtd) << 0x8;
3f225b7f
AT
461
462 for (i = 0; i < ntypes; i++) {
463 f = &builtin_flash_types[i];
464
465 if (f->chip_id == id)
466 break;
467 }
468
469 if (i == ntypes) {
470 dev_err(&info->pdev->dev, "Error: timings not found\n");
471 return -EINVAL;
472 }
473
474 pxa3xx_nand_set_timing(host, f->timing);
475
476 *flash_width = f->flash_width;
477 *dfc_width = f->dfc_width;
478
479 return 0;
480}
481
482static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
483 int mode)
484{
485 const struct nand_sdr_timings *timings;
486
487 mode = fls(mode) - 1;
488 if (mode < 0)
489 mode = 0;
490
491 timings = onfi_async_timing_mode_to_sdr_timings(mode);
492 if (IS_ERR(timings))
493 return PTR_ERR(timings);
494
495 pxa3xx_nand_set_sdr_timing(host, timings);
496
497 return 0;
498}
499
500static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
501{
502 struct nand_chip *chip = &host->chip;
503 struct pxa3xx_nand_info *info = host->info_data;
504 unsigned int flash_width = 0, dfc_width = 0;
505 int mode, err;
506
507 mode = onfi_get_async_timing_mode(chip);
508 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
509 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
510 &dfc_width);
511 if (err)
512 return err;
513
514 if (flash_width == 16) {
515 info->reg_ndcr |= NDCR_DWIDTH_M;
516 chip->options |= NAND_BUSWIDTH_16;
517 }
518
519 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
520 } else {
521 err = pxa3xx_nand_init_timings_onfi(host, mode);
522 if (err)
523 return err;
524 }
525
526 return 0;
527}
528
6a3e4865
EG
529/*
530 * Set the data and OOB size, depending on the selected
531 * spare and ECC configuration.
532 * Only applicable to READ0, READOOB and PAGEPROG commands.
533 */
fa543bef
EG
534static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
535 struct mtd_info *mtd)
fe69af00 536{
48cf7efa 537 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 538
fa543bef 539 info->data_size = mtd->writesize;
43bcfd2b 540 if (!oob_enable)
9d8b1043 541 return;
9d8b1043 542
43bcfd2b
EG
543 info->oob_size = info->spare_size;
544 if (!info->use_ecc)
545 info->oob_size += info->ecc_size;
18c81b18
LW
546}
547
f8155a40
LW
548/**
549 * NOTE: it is a must to set ND_RUN firstly, then write
550 * command buffer, otherwise, it does not work.
551 * We enable all the interrupt at the same time, and
552 * let pxa3xx_nand_irq to handle all logic.
553 */
554static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
555{
556 uint32_t ndcr;
557
48cf7efa 558 ndcr = info->reg_ndcr;
cd9d1182 559
43bcfd2b 560 if (info->use_ecc) {
cd9d1182 561 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
562 if (info->ecc_bch)
563 nand_writel(info, NDECCCTRL, 0x1);
564 } else {
cd9d1182 565 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
566 if (info->ecc_bch)
567 nand_writel(info, NDECCCTRL, 0x0);
568 }
cd9d1182
EG
569
570 if (info->use_dma)
571 ndcr |= NDCR_DMA_EN;
572 else
573 ndcr &= ~NDCR_DMA_EN;
574
5bb653e8
EG
575 if (info->use_spare)
576 ndcr |= NDCR_SPARE_EN;
577 else
578 ndcr &= ~NDCR_SPARE_EN;
579
f8155a40
LW
580 ndcr |= NDCR_ND_RUN;
581
582 /* clear status bits and run */
f8155a40 583 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 584 nand_writel(info, NDCR, 0);
f8155a40
LW
585 nand_writel(info, NDCR, ndcr);
586}
587
588static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
589{
590 uint32_t ndcr;
591 int timeout = NAND_STOP_DELAY;
592
593 /* wait RUN bit in NDCR become 0 */
594 ndcr = nand_readl(info, NDCR);
595 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
596 ndcr = nand_readl(info, NDCR);
597 udelay(1);
598 }
599
600 if (timeout <= 0) {
601 ndcr &= ~NDCR_ND_RUN;
602 nand_writel(info, NDCR, ndcr);
603 }
8f5ba31a
RJ
604 if (info->dma_chan)
605 dmaengine_terminate_all(info->dma_chan);
606
f8155a40
LW
607 /* clear status bits */
608 nand_writel(info, NDSR, NDSR_MASK);
609}
610
57ff88f0
EG
611static void __maybe_unused
612enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 613{
614 uint32_t ndcr;
615
616 ndcr = nand_readl(info, NDCR);
617 nand_writel(info, NDCR, ndcr & ~int_mask);
618}
619
620static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
621{
622 uint32_t ndcr;
623
624 ndcr = nand_readl(info, NDCR);
625 nand_writel(info, NDCR, ndcr | int_mask);
626}
627
8dad0386
MR
628static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
629{
630 if (info->ecc_bch) {
afca11ec
MR
631 u32 val;
632 int ret;
8dad0386
MR
633
634 /*
635 * According to the datasheet, when reading from NDDB
636 * with BCH enabled, after each 32 bytes reads, we
637 * have to make sure that the NDSR.RDDREQ bit is set.
638 *
639 * Drain the FIFO 8 32 bits reads at a time, and skip
640 * the polling on the last read.
641 */
642 while (len > 8) {
ab53a571 643 ioread32_rep(info->mmio_base + NDDB, data, 8);
8dad0386 644
afca11ec
MR
645 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
646 val & NDSR_RDDREQ, 1000, 5000);
647 if (ret) {
648 dev_err(&info->pdev->dev,
649 "Timeout on RDDREQ while draining the FIFO\n");
650 return;
8dad0386
MR
651 }
652
653 data += 32;
654 len -= 8;
655 }
656 }
657
ab53a571 658 ioread32_rep(info->mmio_base + NDDB, data, len);
8dad0386
MR
659}
660
f8155a40 661static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 662{
70ed8523 663 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 664
fe69af00 665 switch (info->state) {
666 case STATE_PIO_WRITING:
ce914e6b
RH
667 writesl(info->mmio_base + NDDB,
668 info->data_buff + info->data_buff_pos,
669 DIV_ROUND_UP(do_bytes, 4));
fa543bef 670
9d8b1043 671 if (info->oob_size > 0)
ce914e6b
RH
672 writesl(info->mmio_base + NDDB,
673 info->oob_buff + info->oob_buff_pos,
674 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 675 break;
676 case STATE_PIO_READING:
8dad0386
MR
677 drain_fifo(info,
678 info->data_buff + info->data_buff_pos,
679 DIV_ROUND_UP(do_bytes, 4));
fa543bef 680
9d8b1043 681 if (info->oob_size > 0)
8dad0386
MR
682 drain_fifo(info,
683 info->oob_buff + info->oob_buff_pos,
684 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 685 break;
686 default:
da675b4e 687 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 688 info->state);
f8155a40 689 BUG();
fe69af00 690 }
fa543bef
EG
691
692 /* Update buffer pointers for multi-page read/write */
693 info->data_buff_pos += do_bytes;
694 info->oob_buff_pos += info->oob_size;
695 info->data_size -= do_bytes;
fe69af00 696}
697
8f5ba31a 698static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 699{
8f5ba31a
RJ
700 struct pxa3xx_nand_info *info = data;
701 struct dma_tx_state state;
702 enum dma_status status;
fe69af00 703
8f5ba31a
RJ
704 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
705 if (likely(status == DMA_COMPLETE)) {
706 info->state = STATE_DMA_DONE;
707 } else {
708 dev_err(&info->pdev->dev, "DMA error on data channel\n");
709 info->retcode = ERR_DMABUSERR;
710 }
711 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
712
713 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
714 enable_int(info, NDCR_INT_MASK);
715}
716
717static void start_data_dma(struct pxa3xx_nand_info *info)
718{
719 enum dma_transfer_direction direction;
720 struct dma_async_tx_descriptor *tx;
fe69af00 721
f8155a40
LW
722 switch (info->state) {
723 case STATE_DMA_WRITING:
8f5ba31a
RJ
724 info->dma_dir = DMA_TO_DEVICE;
725 direction = DMA_MEM_TO_DEV;
f8155a40
LW
726 break;
727 case STATE_DMA_READING:
8f5ba31a
RJ
728 info->dma_dir = DMA_FROM_DEVICE;
729 direction = DMA_DEV_TO_MEM;
f8155a40
LW
730 break;
731 default:
da675b4e 732 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
733 info->state);
734 BUG();
fe69af00 735 }
8f5ba31a
RJ
736 info->sg.length = info->data_size +
737 (info->oob_size ? info->spare_size + info->ecc_size : 0);
738 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
739
740 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
741 DMA_PREP_INTERRUPT);
742 if (!tx) {
743 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
744 return;
fe69af00 745 }
8f5ba31a
RJ
746 tx->callback = pxa3xx_nand_data_dma_irq;
747 tx->callback_param = info;
748 info->dma_cookie = dmaengine_submit(tx);
749 dma_async_issue_pending(info->dma_chan);
750 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
751 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 752}
753
24542257
RJ
754static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
755{
756 struct pxa3xx_nand_info *info = data;
757
758 handle_data_pio(info);
759
760 info->state = STATE_CMD_DONE;
761 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
762
763 return IRQ_HANDLED;
764}
765
fe69af00 766static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
767{
768 struct pxa3xx_nand_info *info = devid;
55d9fd6e 769 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 770 unsigned int ready, cmd_done;
24542257 771 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
772
773 if (info->cs == 0) {
774 ready = NDSR_FLASH_RDY;
775 cmd_done = NDSR_CS0_CMDD;
776 } else {
777 ready = NDSR_RDY;
778 cmd_done = NDSR_CS1_CMDD;
779 }
fe69af00 780
781 status = nand_readl(info, NDSR);
782
87f5336e
EG
783 if (status & NDSR_UNCORERR)
784 info->retcode = ERR_UNCORERR;
785 if (status & NDSR_CORERR) {
786 info->retcode = ERR_CORERR;
787 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
788 info->ecc_bch)
789 info->ecc_err_cnt = NDSR_ERR_CNT(status);
790 else
791 info->ecc_err_cnt = 1;
792
793 /*
794 * Each chunk composing a page is corrected independently,
795 * and we need to store maximum number of corrected bitflips
796 * to return it to the MTD layer in ecc.read_page().
797 */
798 info->max_bitflips = max_t(unsigned int,
799 info->max_bitflips,
800 info->ecc_err_cnt);
801 }
f8155a40
LW
802 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
803 /* whether use dma to transfer data */
fe69af00 804 if (info->use_dma) {
f8155a40
LW
805 disable_int(info, NDCR_INT_MASK);
806 info->state = (status & NDSR_RDDREQ) ?
807 STATE_DMA_READING : STATE_DMA_WRITING;
808 start_data_dma(info);
809 goto NORMAL_IRQ_EXIT;
fe69af00 810 } else {
f8155a40
LW
811 info->state = (status & NDSR_RDDREQ) ?
812 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
813 ret = IRQ_WAKE_THREAD;
814 goto NORMAL_IRQ_EXIT;
fe69af00 815 }
fe69af00 816 }
f3c8cfc2 817 if (status & cmd_done) {
f8155a40
LW
818 info->state = STATE_CMD_DONE;
819 is_completed = 1;
fe69af00 820 }
f3c8cfc2 821 if (status & ready) {
f8155a40 822 info->state = STATE_READY;
55d9fd6e 823 is_ready = 1;
401e67e2 824 }
fe69af00 825
21fc0ef9
RJ
826 /*
827 * Clear all status bit before issuing the next command, which
828 * can and will alter the status bits and will deserve a new
829 * interrupt on its own. This lets the controller exit the IRQ
830 */
831 nand_writel(info, NDSR, status);
832
f8155a40 833 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
834 status &= ~NDSR_WRCMDREQ;
835 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
836
837 /*
838 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
839 * must be loaded by writing directly either 12 or 16
840 * bytes directly to NDCB0, four bytes at a time.
841 *
842 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
843 * but each NDCBx register can be read.
844 */
f8155a40
LW
845 nand_writel(info, NDCB0, info->ndcb0);
846 nand_writel(info, NDCB0, info->ndcb1);
847 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
848
849 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
850 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
851 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 852 }
853
f8155a40
LW
854 if (is_completed)
855 complete(&info->cmd_complete);
55d9fd6e
EG
856 if (is_ready)
857 complete(&info->dev_ready);
f8155a40 858NORMAL_IRQ_EXIT:
24542257 859 return ret;
fe69af00 860}
861
fe69af00 862static inline int is_buf_blank(uint8_t *buf, size_t len)
863{
864 for (; len > 0; len--)
865 if (*buf++ != 0xff)
866 return 0;
867 return 1;
868}
869
86beebae
EG
870static void set_command_address(struct pxa3xx_nand_info *info,
871 unsigned int page_size, uint16_t column, int page_addr)
872{
873 /* small page addr setting */
874 if (page_size < PAGE_CHUNK_SIZE) {
875 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
876 | (column & 0xFF);
877
878 info->ndcb2 = 0;
879 } else {
880 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
881 | (column & 0xFFFF);
882
883 if (page_addr & 0xFF0000)
884 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
885 else
886 info->ndcb2 = 0;
887 }
888}
889
c39ff03a 890static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 891{
39f83d15 892 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3 893 struct mtd_info *mtd = nand_to_mtd(&host->chip);
39f83d15 894
4eb2da89 895 /* reset data and oob column point to handle data */
401e67e2
LW
896 info->buf_start = 0;
897 info->buf_count = 0;
4eb2da89 898 info->oob_size = 0;
fa543bef
EG
899 info->data_buff_pos = 0;
900 info->oob_buff_pos = 0;
4eb2da89 901 info->use_ecc = 0;
5bb653e8 902 info->use_spare = 1;
4eb2da89 903 info->retcode = ERR_NONE;
87f5336e 904 info->ecc_err_cnt = 0;
f0e6a32e 905 info->ndcb3 = 0;
d20d0a6c 906 info->need_wait = 0;
fe69af00 907
908 switch (command) {
4eb2da89
LW
909 case NAND_CMD_READ0:
910 case NAND_CMD_PAGEPROG:
911 info->use_ecc = 1;
fe69af00 912 case NAND_CMD_READOOB:
fa543bef 913 pxa3xx_set_datasize(info, mtd);
fe69af00 914 break;
41a63430
EG
915 case NAND_CMD_PARAM:
916 info->use_spare = 0;
917 break;
4eb2da89
LW
918 default:
919 info->ndcb1 = 0;
920 info->ndcb2 = 0;
921 break;
922 }
39f83d15
EG
923
924 /*
925 * If we are about to issue a read command, or about to set
926 * the write address, then clean the data buffer.
927 */
928 if (command == NAND_CMD_READ0 ||
929 command == NAND_CMD_READOOB ||
930 command == NAND_CMD_SEQIN) {
931
932 info->buf_count = mtd->writesize + mtd->oobsize;
933 memset(info->data_buff, 0xFF, info->buf_count);
934 }
935
c39ff03a
EG
936}
937
938static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 939 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
940{
941 int addr_cycle, exec_cmd;
942 struct pxa3xx_nand_host *host;
943 struct mtd_info *mtd;
944
945 host = info->host[info->cs];
063294a3 946 mtd = nand_to_mtd(&host->chip);
c39ff03a
EG
947 addr_cycle = 0;
948 exec_cmd = 1;
949
950 if (info->cs != 0)
951 info->ndcb0 = NDCB0_CSEL;
952 else
953 info->ndcb0 = 0;
954
955 if (command == NAND_CMD_SEQIN)
956 exec_cmd = 0;
4eb2da89 957
d456882b
LW
958 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
959 + host->col_addr_cycles);
fe69af00 960
4eb2da89
LW
961 switch (command) {
962 case NAND_CMD_READOOB:
fe69af00 963 case NAND_CMD_READ0:
ec82135a
EG
964 info->buf_start = column;
965 info->ndcb0 |= NDCB0_CMD_TYPE(0)
966 | addr_cycle
967 | NAND_CMD_READ0;
968
4eb2da89 969 if (command == NAND_CMD_READOOB)
ec82135a 970 info->buf_start += mtd->writesize;
4eb2da89 971
70ed8523
EG
972 /*
973 * Multiple page read needs an 'extended command type' field,
974 * which is either naked-read or last-read according to the
975 * state.
976 */
977 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 978 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
979 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
980 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
981 | NDCB0_LEN_OVRD
982 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
983 info->ndcb3 = info->chunk_size +
984 info->oob_size;
985 }
fe69af00 986
01d9947e 987 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
988 break;
989
fe69af00 990 case NAND_CMD_SEQIN:
4eb2da89 991
e7f9a6a4
EG
992 info->buf_start = column;
993 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
994
995 /*
996 * Multiple page programming needs to execute the initial
997 * SEQIN command that sets the page address.
998 */
999 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1000 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1001 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1002 | addr_cycle
1003 | command;
1004 /* No data transfer in this case */
1005 info->data_size = 0;
1006 exec_cmd = 1;
1007 }
fe69af00 1008 break;
4eb2da89 1009
fe69af00 1010 case NAND_CMD_PAGEPROG:
4eb2da89
LW
1011 if (is_buf_blank(info->data_buff,
1012 (mtd->writesize + mtd->oobsize))) {
1013 exec_cmd = 0;
1014 break;
1015 }
fe69af00 1016
535cb57a
EG
1017 /* Second command setting for large pages */
1018 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1019 /*
1020 * Multiple page write uses the 'extended command'
1021 * field. This can be used to issue a command dispatch
1022 * or a naked-write depending on the current stage.
1023 */
1024 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1025 | NDCB0_LEN_OVRD
1026 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1027 info->ndcb3 = info->chunk_size +
1028 info->oob_size;
1029
1030 /*
1031 * This is the command dispatch that completes a chunked
1032 * page program operation.
1033 */
1034 if (info->data_size == 0) {
1035 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1036 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1037 | command;
1038 info->ndcb1 = 0;
1039 info->ndcb2 = 0;
1040 info->ndcb3 = 0;
1041 }
1042 } else {
1043 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1044 | NDCB0_AUTO_RS
1045 | NDCB0_ST_ROW_EN
1046 | NDCB0_DBC
1047 | (NAND_CMD_PAGEPROG << 8)
1048 | NAND_CMD_SEQIN
1049 | addr_cycle;
1050 }
fe69af00 1051 break;
4eb2da89 1052
ce0268f6 1053 case NAND_CMD_PARAM:
c1634097 1054 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
1055 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1056 | NDCB0_ADDR_CYC(1)
41a63430 1057 | NDCB0_LEN_OVRD
ec82135a 1058 | command;
ce0268f6 1059 info->ndcb1 = (column & 0xFF);
c1634097
EG
1060 info->ndcb3 = INIT_BUFFER_SIZE;
1061 info->data_size = INIT_BUFFER_SIZE;
ce0268f6
EG
1062 break;
1063
fe69af00 1064 case NAND_CMD_READID:
b226eca2 1065 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
1066 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1067 | NDCB0_ADDR_CYC(1)
ec82135a 1068 | command;
d14231f1 1069 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
1070
1071 info->data_size = 8;
1072 break;
fe69af00 1073 case NAND_CMD_STATUS:
4eb2da89
LW
1074 info->buf_count = 1;
1075 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1076 | NDCB0_ADDR_CYC(1)
ec82135a 1077 | command;
4eb2da89
LW
1078
1079 info->data_size = 8;
1080 break;
1081
1082 case NAND_CMD_ERASE1:
4eb2da89
LW
1083 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1084 | NDCB0_AUTO_RS
1085 | NDCB0_ADDR_CYC(3)
1086 | NDCB0_DBC
ec82135a
EG
1087 | (NAND_CMD_ERASE2 << 8)
1088 | NAND_CMD_ERASE1;
4eb2da89
LW
1089 info->ndcb1 = page_addr;
1090 info->ndcb2 = 0;
1091
fe69af00 1092 break;
1093 case NAND_CMD_RESET:
4eb2da89 1094 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 1095 | command;
4eb2da89
LW
1096
1097 break;
1098
1099 case NAND_CMD_ERASE2:
1100 exec_cmd = 0;
fe69af00 1101 break;
4eb2da89 1102
fe69af00 1103 default:
4eb2da89 1104 exec_cmd = 0;
da675b4e
LW
1105 dev_err(&info->pdev->dev, "non-supported command %x\n",
1106 command);
fe69af00 1107 break;
1108 }
1109
4eb2da89
LW
1110 return exec_cmd;
1111}
1112
5cbbdc6a
EG
1113static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1114 int column, int page_addr)
4eb2da89 1115{
4bd4ebcc 1116 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1117 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1118 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1119 int exec_cmd;
4eb2da89
LW
1120
1121 /*
1122 * if this is a x16 device ,then convert the input
1123 * "byte" address into a "word" address appropriate
1124 * for indexing a word-oriented device
1125 */
48cf7efa 1126 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1127 column /= 2;
1128
f3c8cfc2
LW
1129 /*
1130 * There may be different NAND chip hooked to
1131 * different chip select, so check whether
1132 * chip select has been changed, if yes, reset the timing
1133 */
1134 if (info->cs != host->cs) {
1135 info->cs = host->cs;
48cf7efa
EG
1136 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1137 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1138 }
1139
c39ff03a
EG
1140 prepare_start_command(info, command);
1141
d456882b 1142 info->state = STATE_PREPARED;
70ed8523
EG
1143 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1144
f8155a40
LW
1145 if (exec_cmd) {
1146 init_completion(&info->cmd_complete);
55d9fd6e
EG
1147 init_completion(&info->dev_ready);
1148 info->need_wait = 1;
f8155a40
LW
1149 pxa3xx_nand_start(info);
1150
e5860c18
NMG
1151 if (!wait_for_completion_timeout(&info->cmd_complete,
1152 CHIP_DELAY_TIMEOUT)) {
da675b4e 1153 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1154 /* Stop State Machine for next command cycle */
1155 pxa3xx_nand_stop(info);
1156 }
f8155a40 1157 }
d456882b 1158 info->state = STATE_IDLE;
f8155a40
LW
1159}
1160
5cbbdc6a
EG
1161static void nand_cmdfunc_extended(struct mtd_info *mtd,
1162 const unsigned command,
1163 int column, int page_addr)
70ed8523 1164{
4bd4ebcc 1165 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1166 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
70ed8523 1167 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1168 int exec_cmd, ext_cmd_type;
70ed8523
EG
1169
1170 /*
1171 * if this is a x16 device then convert the input
1172 * "byte" address into a "word" address appropriate
1173 * for indexing a word-oriented device
1174 */
1175 if (info->reg_ndcr & NDCR_DWIDTH_M)
1176 column /= 2;
1177
1178 /*
1179 * There may be different NAND chip hooked to
1180 * different chip select, so check whether
1181 * chip select has been changed, if yes, reset the timing
1182 */
1183 if (info->cs != host->cs) {
1184 info->cs = host->cs;
1185 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1186 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1187 }
1188
1189 /* Select the extended command for the first command */
1190 switch (command) {
1191 case NAND_CMD_READ0:
1192 case NAND_CMD_READOOB:
1193 ext_cmd_type = EXT_CMD_TYPE_MONO;
1194 break;
535cb57a
EG
1195 case NAND_CMD_SEQIN:
1196 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1197 break;
1198 case NAND_CMD_PAGEPROG:
1199 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1200 break;
70ed8523
EG
1201 default:
1202 ext_cmd_type = 0;
535cb57a 1203 break;
70ed8523
EG
1204 }
1205
1206 prepare_start_command(info, command);
1207
1208 /*
1209 * Prepare the "is ready" completion before starting a command
1210 * transaction sequence. If the command is not executed the
1211 * completion will be completed, see below.
1212 *
1213 * We can do that inside the loop because the command variable
1214 * is invariant and thus so is the exec_cmd.
1215 */
1216 info->need_wait = 1;
1217 init_completion(&info->dev_ready);
1218 do {
1219 info->state = STATE_PREPARED;
1220 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1221 column, page_addr);
1222 if (!exec_cmd) {
1223 info->need_wait = 0;
1224 complete(&info->dev_ready);
1225 break;
1226 }
1227
1228 init_completion(&info->cmd_complete);
1229 pxa3xx_nand_start(info);
1230
e5860c18
NMG
1231 if (!wait_for_completion_timeout(&info->cmd_complete,
1232 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1233 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1234 /* Stop State Machine for next command cycle */
1235 pxa3xx_nand_stop(info);
1236 break;
1237 }
1238
1239 /* Check if the sequence is complete */
535cb57a
EG
1240 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1241 break;
1242
1243 /*
1244 * After a splitted program command sequence has issued
1245 * the command dispatch, the command sequence is complete.
1246 */
1247 if (info->data_size == 0 &&
1248 command == NAND_CMD_PAGEPROG &&
1249 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1250 break;
1251
1252 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1253 /* Last read: issue a 'last naked read' */
1254 if (info->data_size == info->chunk_size)
1255 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1256 else
1257 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1258
1259 /*
1260 * If a splitted program command has no more data to transfer,
1261 * the command dispatch must be issued to complete.
1262 */
1263 } else if (command == NAND_CMD_PAGEPROG &&
1264 info->data_size == 0) {
1265 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1266 }
1267 } while (1);
1268
1269 info->state = STATE_IDLE;
1270}
1271
fdbad98d 1272static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
45aaeff9
BB
1273 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1274 int page)
f8155a40
LW
1275{
1276 chip->write_buf(mtd, buf, mtd->writesize);
1277 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1278
1279 return 0;
f8155a40
LW
1280}
1281
1282static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1283 struct nand_chip *chip, uint8_t *buf, int oob_required,
1284 int page)
f8155a40 1285{
d699ed25 1286 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1287 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1288
1289 chip->read_buf(mtd, buf, mtd->writesize);
1290 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1291
87f5336e
EG
1292 if (info->retcode == ERR_CORERR && info->use_ecc) {
1293 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1294
1295 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1296 /*
1297 * for blank page (all 0xff), HW will calculate its ECC as
1298 * 0, which is different from the ECC information within
87f5336e 1299 * OOB, ignore such uncorrectable errors
f8155a40
LW
1300 */
1301 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1302 info->retcode = ERR_NONE;
1303 else
f8155a40 1304 mtd->ecc_stats.failed++;
fe69af00 1305 }
f8155a40 1306
87f5336e 1307 return info->max_bitflips;
fe69af00 1308}
1309
1310static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1311{
4bd4ebcc 1312 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1313 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1314 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1315 char retval = 0xFF;
1316
1317 if (info->buf_start < info->buf_count)
1318 /* Has just send a new command? */
1319 retval = info->data_buff[info->buf_start++];
1320
1321 return retval;
1322}
1323
1324static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1325{
4bd4ebcc 1326 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1327 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1328 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1329 u16 retval = 0xFFFF;
1330
1331 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1332 retval = *((u16 *)(info->data_buff+info->buf_start));
1333 info->buf_start += 2;
1334 }
1335 return retval;
1336}
1337
1338static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1339{
4bd4ebcc 1340 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1341 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1342 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1343 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1344
1345 memcpy(buf, info->data_buff + info->buf_start, real_len);
1346 info->buf_start += real_len;
1347}
1348
1349static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1350 const uint8_t *buf, int len)
1351{
4bd4ebcc 1352 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1353 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1354 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1355 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1356
1357 memcpy(info->data_buff + info->buf_start, buf, real_len);
1358 info->buf_start += real_len;
1359}
1360
fe69af00 1361static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1362{
1363 return;
1364}
1365
1366static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1367{
4bd4ebcc 1368 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1369 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1370 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1371
1372 if (info->need_wait) {
55d9fd6e 1373 info->need_wait = 0;
e5860c18
NMG
1374 if (!wait_for_completion_timeout(&info->dev_ready,
1375 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1376 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1377 return NAND_STATUS_FAIL;
1378 }
1379 }
fe69af00 1380
1381 /* pxa3xx_nand_send_command has waited for command complete */
1382 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1383 if (info->retcode == ERR_NONE)
1384 return 0;
55d9fd6e
EG
1385 else
1386 return NAND_STATUS_FAIL;
fe69af00 1387 }
1388
55d9fd6e 1389 return NAND_STATUS_READY;
fe69af00 1390}
1391
66e8e47e 1392static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
fe69af00 1393{
b1e48577 1394 struct pxa3xx_nand_host *host = info->host[info->cs];
fe69af00 1395 struct platform_device *pdev = info->pdev;
453810b7 1396 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
b1e48577 1397 const struct nand_sdr_timings *timings;
fe69af00 1398
66e8e47e
EG
1399 /* Configure default flash values */
1400 info->chunk_size = PAGE_CHUNK_SIZE;
f19fe983
AT
1401 info->reg_ndcr = 0x0; /* enable all interrupts */
1402 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1403 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
66e8e47e
EG
1404 info->reg_ndcr |= NDCR_SPARE_EN;
1405
b1e48577
EG
1406 /* use the common timing to make a try */
1407 timings = onfi_async_timing_mode_to_sdr_timings(0);
1408 if (IS_ERR(timings))
1409 return PTR_ERR(timings);
1410
1411 pxa3xx_nand_set_sdr_timing(host, timings);
66e8e47e
EG
1412 return 0;
1413}
1414
1415static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1416{
1417 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3
BB
1418 struct nand_chip *chip = &host->chip;
1419 struct mtd_info *mtd = nand_to_mtd(chip);
66e8e47e 1420
f19fe983
AT
1421 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1422 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1423 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
fe69af00 1424}
1425
154f50fb 1426static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
f271049e 1427{
66e8e47e
EG
1428 struct platform_device *pdev = info->pdev;
1429 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f271049e 1430 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1431
70ed8523 1432 /* Set an initial chunk size */
b226eca2 1433 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1434 info->reg_ndcr = ndcr &
1435 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
66e8e47e 1436 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa
EG
1437 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1438 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1439}
1440
fe69af00 1441static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1442{
1443 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1444 struct dma_slave_config config;
1445 dma_cap_mask_t mask;
1446 struct pxad_param param;
1447 int ret;
fe69af00 1448
8f5ba31a
RJ
1449 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1450 if (info->data_buff == NULL)
1451 return -ENOMEM;
1452 if (use_dma == 0)
fe69af00 1453 return 0;
fe69af00 1454
8f5ba31a
RJ
1455 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1456 if (ret)
1457 return ret;
fe69af00 1458
8f5ba31a
RJ
1459 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1460 dma_cap_zero(mask);
1461 dma_cap_set(DMA_SLAVE, mask);
1462 param.prio = PXAD_PRIO_LOWEST;
1463 param.drcmr = info->drcmr_dat;
1464 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1465 &param, &pdev->dev,
1466 "data");
1467 if (!info->dma_chan) {
1468 dev_err(&pdev->dev, "unable to request data dma channel\n");
1469 return -ENODEV;
1470 }
fe69af00 1471
8f5ba31a
RJ
1472 memset(&config, 0, sizeof(config));
1473 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1474 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1475 config.src_addr = info->mmio_phys + NDDB;
1476 config.dst_addr = info->mmio_phys + NDDB;
1477 config.src_maxburst = 32;
1478 config.dst_maxburst = 32;
1479 ret = dmaengine_slave_config(info->dma_chan, &config);
1480 if (ret < 0) {
1481 dev_err(&info->pdev->dev,
1482 "dma channel configuration failed: %d\n",
1483 ret);
1484 return ret;
fe69af00 1485 }
1486
95b26563
EG
1487 /*
1488 * Now that DMA buffers are allocated we turn on
1489 * DMA proper for I/O operations.
1490 */
1491 info->use_dma = 1;
fe69af00 1492 return 0;
1493}
1494
498b6145
EG
1495static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1496{
15b540c7 1497 if (info->use_dma) {
8f5ba31a
RJ
1498 dmaengine_terminate_all(info->dma_chan);
1499 dma_release_channel(info->dma_chan);
498b6145 1500 }
f4db2e3a
EG
1501 kfree(info->data_buff);
1502}
498b6145 1503
43bcfd2b
EG
1504static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1505 struct nand_ecc_ctrl *ecc,
30b2afc8 1506 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1507{
30b2afc8 1508 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1509 info->chunk_size = 2048;
43bcfd2b
EG
1510 info->spare_size = 40;
1511 info->ecc_size = 24;
1512 ecc->mode = NAND_ECC_HW;
1513 ecc->size = 512;
1514 ecc->strength = 1;
43bcfd2b 1515
30b2afc8 1516 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1517 info->chunk_size = 512;
43bcfd2b
EG
1518 info->spare_size = 8;
1519 info->ecc_size = 8;
1520 ecc->mode = NAND_ECC_HW;
1521 ecc->size = 512;
1522 ecc->strength = 1;
43bcfd2b 1523
6033a949
BN
1524 /*
1525 * Required ECC: 4-bit correction per 512 bytes
1526 * Select: 16-bit correction per 2048 bytes
1527 */
3db227b6
RG
1528 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1529 info->ecc_bch = 1;
1530 info->chunk_size = 2048;
1531 info->spare_size = 32;
1532 info->ecc_size = 32;
1533 ecc->mode = NAND_ECC_HW;
1534 ecc->size = info->chunk_size;
1535 ecc->layout = &ecc_layout_2KB_bch4bit;
1536 ecc->strength = 16;
3db227b6 1537
30b2afc8 1538 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1539 info->ecc_bch = 1;
1540 info->chunk_size = 2048;
1541 info->spare_size = 32;
1542 info->ecc_size = 32;
1543 ecc->mode = NAND_ECC_HW;
1544 ecc->size = info->chunk_size;
1545 ecc->layout = &ecc_layout_4KB_bch4bit;
1546 ecc->strength = 16;
70ed8523 1547
6033a949
BN
1548 /*
1549 * Required ECC: 8-bit correction per 512 bytes
1550 * Select: 16-bit correction per 1024 bytes
1551 */
1552 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1553 info->ecc_bch = 1;
1554 info->chunk_size = 1024;
1555 info->spare_size = 0;
1556 info->ecc_size = 32;
1557 ecc->mode = NAND_ECC_HW;
1558 ecc->size = info->chunk_size;
1559 ecc->layout = &ecc_layout_4KB_bch8bit;
1560 ecc->strength = 16;
eee0166d
EG
1561 } else {
1562 dev_err(&info->pdev->dev,
1563 "ECC strength %d at page size %d is not supported\n",
1564 strength, page_size);
1565 return -ENODEV;
70ed8523 1566 }
eee0166d
EG
1567
1568 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1569 ecc->strength, ecc->size);
43bcfd2b
EG
1570 return 0;
1571}
1572
401e67e2 1573static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1574{
4bd4ebcc 1575 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1576 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1577 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1578 struct platform_device *pdev = info->pdev;
453810b7 1579 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f19fe983 1580 int ret;
30b2afc8 1581 uint16_t ecc_strength, ecc_step;
401e67e2 1582
154f50fb
EG
1583 if (pdata->keep_config) {
1584 pxa3xx_nand_detect_config(info);
1585 } else {
1586 ret = pxa3xx_nand_config_ident(info);
1587 if (ret)
1588 return ret;
401e67e2
LW
1589 }
1590
48cf7efa 1591 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1592 chip->options |= NAND_BUSWIDTH_16;
1593
43bcfd2b
EG
1594 /* Device detection must be done with ECC disabled */
1595 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1596 nand_writel(info, NDECCCTRL, 0x0);
1597
f19fe983 1598 if (nand_scan_ident(mtd, 1, NULL))
4332c116 1599 return -ENODEV;
776f265e 1600
f19fe983
AT
1601 if (!pdata->keep_config) {
1602 ret = pxa3xx_nand_init(host);
1603 if (ret) {
1604 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1605 ret);
1606 return ret;
1607 }
1608 }
1609
776f265e
EG
1610 if (pdata->flash_bbt) {
1611 /*
1612 * We'll use a bad block table stored in-flash and don't
1613 * allow writing the bad block marker to the flash.
1614 */
1615 chip->bbt_options |= NAND_BBT_USE_FLASH |
1616 NAND_BBT_NO_OOB_BBM;
1617 chip->bbt_td = &bbt_main_descr;
1618 chip->bbt_md = &bbt_mirror_descr;
1619 }
1620
5cbbdc6a
EG
1621 /*
1622 * If the page size is bigger than the FIFO size, let's check
1623 * we are given the right variant and then switch to the extended
1624 * (aka splitted) command handling,
1625 */
1626 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1627 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1628 chip->cmdfunc = nand_cmdfunc_extended;
1629 } else {
1630 dev_err(&info->pdev->dev,
1631 "unsupported page size on this variant\n");
1632 return -ENODEV;
1633 }
1634 }
1635
5b3e5078
EG
1636 if (pdata->ecc_strength && pdata->ecc_step_size) {
1637 ecc_strength = pdata->ecc_strength;
1638 ecc_step = pdata->ecc_step_size;
1639 } else {
1640 ecc_strength = chip->ecc_strength_ds;
1641 ecc_step = chip->ecc_step_ds;
1642 }
30b2afc8
EG
1643
1644 /* Set default ECC strength requirements on non-ONFI devices */
1645 if (ecc_strength < 1 && ecc_step < 1) {
1646 ecc_strength = 1;
1647 ecc_step = 512;
1648 }
1649
1650 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1651 ecc_step, mtd->writesize);
eee0166d
EG
1652 if (ret)
1653 return ret;
43bcfd2b 1654
4332c116 1655 /* calculate addressing information */
d456882b
LW
1656 if (mtd->writesize >= 2048)
1657 host->col_addr_cycles = 2;
1658 else
1659 host->col_addr_cycles = 1;
1660
62e8b851
EG
1661 /* release the initial buffer */
1662 kfree(info->data_buff);
1663
1664 /* allocate the real data + oob buffer */
1665 info->buf_size = mtd->writesize + mtd->oobsize;
1666 ret = pxa3xx_nand_init_buff(info);
1667 if (ret)
1668 return ret;
4332c116 1669 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1670
4332c116 1671 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1672 host->row_addr_cycles = 3;
4332c116 1673 else
d456882b 1674 host->row_addr_cycles = 2;
66e8e47e
EG
1675
1676 if (!pdata->keep_config)
1677 pxa3xx_nand_config_tail(info);
1678
401e67e2 1679 return nand_scan_tail(mtd);
fe69af00 1680}
1681
d456882b 1682static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1683{
a61ae81a 1684 struct device_node *np = pdev->dev.of_node;
f3c8cfc2 1685 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1686 struct pxa3xx_nand_info *info;
d456882b 1687 struct pxa3xx_nand_host *host;
6e308f87 1688 struct nand_chip *chip = NULL;
fe69af00 1689 struct mtd_info *mtd;
1690 struct resource *r;
f3c8cfc2 1691 int ret, irq, cs;
fe69af00 1692
453810b7 1693 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1694 if (pdata->num_cs <= 0)
1695 return -ENODEV;
063294a3
BB
1696 info = devm_kzalloc(&pdev->dev,
1697 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1698 GFP_KERNEL);
4c073cd2 1699 if (!info)
d456882b 1700 return -ENOMEM;
fe69af00 1701
fe69af00 1702 info->pdev = pdev;
c7e9c7e7 1703 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1704 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3
BB
1705 host = (void *)&info[1] + sizeof(*host) * cs;
1706 chip = &host->chip;
d699ed25 1707 nand_set_controller_data(chip, host);
063294a3 1708 mtd = nand_to_mtd(chip);
f3c8cfc2 1709 info->host[cs] = host;
f3c8cfc2
LW
1710 host->cs = cs;
1711 host->info_data = info;
550dab5b 1712 mtd->dev.parent = &pdev->dev;
a61ae81a
BN
1713 /* FIXME: all chips use the same device tree partitions */
1714 nand_set_flash_node(chip, np);
f3c8cfc2 1715
d699ed25 1716 nand_set_controller_data(chip, host);
f3c8cfc2
LW
1717 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1718 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1719 chip->controller = &info->controller;
1720 chip->waitfunc = pxa3xx_nand_waitfunc;
1721 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1722 chip->read_word = pxa3xx_nand_read_word;
1723 chip->read_byte = pxa3xx_nand_read_byte;
1724 chip->read_buf = pxa3xx_nand_read_buf;
1725 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1726 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1727 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1728 }
401e67e2
LW
1729
1730 spin_lock_init(&chip->controller->lock);
1731 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1732 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1733 if (IS_ERR(info->clk)) {
1734 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1735 return PTR_ERR(info->clk);
fe69af00 1736 }
1f8eaff2
EG
1737 ret = clk_prepare_enable(info->clk);
1738 if (ret < 0)
1739 return ret;
fe69af00 1740
6b45c1ee 1741 if (use_dma) {
8f5ba31a
RJ
1742 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1743 if (r == NULL) {
1744 dev_err(&pdev->dev,
1745 "no resource defined for data DMA\n");
1746 ret = -ENXIO;
1747 goto fail_disable_clk;
1e7ba630 1748 }
8f5ba31a
RJ
1749 info->drcmr_dat = r->start;
1750
1751 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1752 if (r == NULL) {
1753 dev_err(&pdev->dev,
1754 "no resource defined for cmd DMA\n");
1755 ret = -ENXIO;
1756 goto fail_disable_clk;
1757 }
1758 info->drcmr_cmd = r->start;
fe69af00 1759 }
fe69af00 1760
1761 irq = platform_get_irq(pdev, 0);
1762 if (irq < 0) {
1763 dev_err(&pdev->dev, "no IRQ resource defined\n");
1764 ret = -ENXIO;
9ca7944d 1765 goto fail_disable_clk;
fe69af00 1766 }
1767
1768 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1769 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1770 if (IS_ERR(info->mmio_base)) {
1771 ret = PTR_ERR(info->mmio_base);
9ca7944d 1772 goto fail_disable_clk;
fe69af00 1773 }
8638fac8 1774 info->mmio_phys = r->start;
fe69af00 1775
62e8b851
EG
1776 /* Allocate a buffer to allow flash detection */
1777 info->buf_size = INIT_BUFFER_SIZE;
1778 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1779 if (info->data_buff == NULL) {
1780 ret = -ENOMEM;
9ca7944d 1781 goto fail_disable_clk;
62e8b851 1782 }
fe69af00 1783
346e1259
HZ
1784 /* initialize all interrupts to be disabled */
1785 disable_int(info, NDSR_MASK);
1786
24542257
RJ
1787 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1788 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1789 pdev->name, info);
fe69af00 1790 if (ret < 0) {
1791 dev_err(&pdev->dev, "failed to request IRQ\n");
1792 goto fail_free_buf;
1793 }
1794
e353a20a 1795 platform_set_drvdata(pdev, info);
fe69af00 1796
d456882b 1797 return 0;
fe69af00 1798
fe69af00 1799fail_free_buf:
401e67e2 1800 free_irq(irq, info);
62e8b851 1801 kfree(info->data_buff);
9ca7944d 1802fail_disable_clk:
fb32061f 1803 clk_disable_unprepare(info->clk);
d456882b 1804 return ret;
fe69af00 1805}
1806
1807static int pxa3xx_nand_remove(struct platform_device *pdev)
1808{
e353a20a 1809 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1810 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1811 int irq, cs;
fe69af00 1812
d456882b
LW
1813 if (!info)
1814 return 0;
1815
453810b7 1816 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1817
dbf5986a
HZ
1818 irq = platform_get_irq(pdev, 0);
1819 if (irq >= 0)
1820 free_irq(irq, info);
498b6145 1821 pxa3xx_nand_free_buff(info);
82a72d10 1822
e971affa
RJ
1823 /*
1824 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1825 * In order to prevent a lockup of the system bus, the DFI bus
1826 * arbitration is granted to SMC upon driver removal. This is done by
1827 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1828 * access to the bus anymore.
1829 */
1830 nand_writel(info, NDCR,
1831 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1832 NFCV1_NDCR_ARB_CNTL);
fb32061f 1833 clk_disable_unprepare(info->clk);
82a72d10 1834
f3c8cfc2 1835 for (cs = 0; cs < pdata->num_cs; cs++)
063294a3 1836 nand_release(nand_to_mtd(&info->host[cs]->chip));
fe69af00 1837 return 0;
1838}
1839
1e7ba630
DM
1840static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1841{
1842 struct pxa3xx_nand_platform_data *pdata;
1843 struct device_node *np = pdev->dev.of_node;
1844 const struct of_device_id *of_id =
1845 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1846
1847 if (!of_id)
1848 return 0;
1849
1850 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1851 if (!pdata)
1852 return -ENOMEM;
1853
1854 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1855 pdata->enable_arbiter = 1;
1856 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1857 pdata->keep_config = 1;
1858 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1859 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1860
5b3e5078
EG
1861 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1862 if (pdata->ecc_strength < 0)
1863 pdata->ecc_strength = 0;
1864
1865 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1866 if (pdata->ecc_step_size < 0)
1867 pdata->ecc_step_size = 0;
1868
1e7ba630
DM
1869 pdev->dev.platform_data = pdata;
1870
1871 return 0;
1872}
1e7ba630 1873
e353a20a
LW
1874static int pxa3xx_nand_probe(struct platform_device *pdev)
1875{
1876 struct pxa3xx_nand_platform_data *pdata;
1877 struct pxa3xx_nand_info *info;
8f5ba31a 1878 int ret, cs, probe_success, dma_available;
e353a20a 1879
8f5ba31a
RJ
1880 dma_available = IS_ENABLED(CONFIG_ARM) &&
1881 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1882 if (use_dma && !dma_available) {
f4db2e3a
EG
1883 use_dma = 0;
1884 dev_warn(&pdev->dev,
1885 "This platform can't do DMA on this device\n");
1886 }
8f5ba31a 1887
1e7ba630
DM
1888 ret = pxa3xx_nand_probe_dt(pdev);
1889 if (ret)
1890 return ret;
1891
453810b7 1892 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1893 if (!pdata) {
1894 dev_err(&pdev->dev, "no platform data defined\n");
1895 return -ENODEV;
1896 }
1897
d456882b
LW
1898 ret = alloc_nand_resource(pdev);
1899 if (ret) {
1900 dev_err(&pdev->dev, "alloc nand resource failed\n");
1901 return ret;
1902 }
e353a20a 1903
d456882b 1904 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1905 probe_success = 0;
1906 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3 1907 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
f455578d 1908
18a84e93
EG
1909 /*
1910 * The mtd name matches the one used in 'mtdparts' kernel
1911 * parameter. This name cannot be changed or otherwise
1912 * user's mtd partitions configuration would get broken.
1913 */
1914 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1915 info->cs = cs;
b7655bcb 1916 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1917 if (ret) {
1918 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1919 cs);
1920 continue;
1921 }
1922
a61ae81a
BN
1923 ret = mtd_device_register(mtd, pdata->parts[cs],
1924 pdata->nr_parts[cs]);
f3c8cfc2
LW
1925 if (!ret)
1926 probe_success = 1;
1927 }
1928
1929 if (!probe_success) {
e353a20a
LW
1930 pxa3xx_nand_remove(pdev);
1931 return -ENODEV;
1932 }
1933
f3c8cfc2 1934 return 0;
e353a20a
LW
1935}
1936
fe69af00 1937#ifdef CONFIG_PM
d3e94f3f 1938static int pxa3xx_nand_suspend(struct device *dev)
fe69af00 1939{
d3e94f3f 1940 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
fe69af00 1941
f8155a40 1942 if (info->state) {
d3e94f3f 1943 dev_err(dev, "driver busy, state = %d\n", info->state);
fe69af00 1944 return -EAGAIN;
1945 }
1946
d55d31a6 1947 clk_disable(info->clk);
fe69af00 1948 return 0;
1949}
1950
d3e94f3f 1951static int pxa3xx_nand_resume(struct device *dev)
fe69af00 1952{
d3e94f3f 1953 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
d55d31a6
EG
1954 int ret;
1955
1956 ret = clk_enable(info->clk);
1957 if (ret < 0)
1958 return ret;
051fc41c
LW
1959
1960 /* We don't want to handle interrupt without calling mtd routine */
1961 disable_int(info, NDCR_INT_MASK);
fe69af00 1962
f3c8cfc2
LW
1963 /*
1964 * Directly set the chip select to a invalid value,
1965 * then the driver would reset the timing according
1966 * to current chip select at the beginning of cmdfunc
1967 */
1968 info->cs = 0xff;
fe69af00 1969
051fc41c
LW
1970 /*
1971 * As the spec says, the NDSR would be updated to 0x1800 when
1972 * doing the nand_clk disable/enable.
1973 * To prevent it damaging state machine of the driver, clear
1974 * all status before resume
1975 */
1976 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2 1977
18c81b18 1978 return 0;
fe69af00 1979}
1980#else
1981#define pxa3xx_nand_suspend NULL
1982#define pxa3xx_nand_resume NULL
1983#endif
1984
d3e94f3f
BN
1985static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
1986 .suspend = pxa3xx_nand_suspend,
1987 .resume = pxa3xx_nand_resume,
1988};
1989
fe69af00 1990static struct platform_driver pxa3xx_nand_driver = {
1991 .driver = {
1992 .name = "pxa3xx-nand",
5576bc7b 1993 .of_match_table = pxa3xx_nand_dt_ids,
d3e94f3f 1994 .pm = &pxa3xx_nand_pm_ops,
fe69af00 1995 },
1996 .probe = pxa3xx_nand_probe,
1997 .remove = pxa3xx_nand_remove,
fe69af00 1998};
1999
f99640de 2000module_platform_driver(pxa3xx_nand_driver);
fe69af00 2001
2002MODULE_LICENSE("GPL");
2003MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.56279 seconds and 5 git commands to generate.