mtd: nand: sunxi: remove direct mtd->priv accesses
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
8f5ba31a 18#include <linux/dmaengine.h>
fe69af00 19#include <linux/dma-mapping.h>
8f5ba31a 20#include <linux/dma/pxa-dma.h>
fe69af00 21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
a1c06ee1 26#include <linux/io.h>
afca11ec 27#include <linux/iopoll.h>
a1c06ee1 28#include <linux/irq.h>
5a0e3ad6 29#include <linux/slab.h>
1e7ba630
DM
30#include <linux/of.h>
31#include <linux/of_device.h>
776f265e 32#include <linux/of_mtd.h>
293b2da1 33#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 34
e5860c18
NMG
35#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 37#define PAGE_CHUNK_SIZE (2048)
fe69af00 38
62e8b851
EG
39/*
40 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 46 */
c1634097 47#define INIT_BUFFER_SIZE 2048
62e8b851 48
fe69af00 49/* registers and bit definitions */
50#define NDCR (0x00) /* Control register */
51#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53#define NDSR (0x14) /* Status Register */
54#define NDPCR (0x18) /* Page Count Register */
55#define NDBDR0 (0x1C) /* Bad Block Register 0 */
56#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 57#define NDECCCTRL (0x28) /* ECC control */
fe69af00 58#define NDDB (0x40) /* Data Buffer */
59#define NDCB0 (0x48) /* Command Buffer0 */
60#define NDCB1 (0x4C) /* Command Buffer1 */
61#define NDCB2 (0x50) /* Command Buffer2 */
62
63#define NDCR_SPARE_EN (0x1 << 31)
64#define NDCR_ECC_EN (0x1 << 30)
65#define NDCR_DMA_EN (0x1 << 29)
66#define NDCR_ND_RUN (0x1 << 28)
67#define NDCR_DWIDTH_C (0x1 << 27)
68#define NDCR_DWIDTH_M (0x1 << 26)
69#define NDCR_PAGE_SZ (0x1 << 24)
70#define NDCR_NCSX (0x1 << 23)
71#define NDCR_ND_MODE (0x3 << 21)
72#define NDCR_NAND_MODE (0x0)
73#define NDCR_CLR_PG_CNT (0x1 << 20)
e971affa
RJ
74#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 76#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79#define NDCR_RA_START (0x1 << 15)
80#define NDCR_PG_PER_BLK (0x1 << 14)
81#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 82#define NDCR_INT_MASK (0xFFF)
fe69af00 83
84#define NDSR_MASK (0xfff)
87f5336e
EG
85#define NDSR_ERR_CNT_OFF (16)
86#define NDSR_ERR_CNT_MASK (0x1f)
87#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
88#define NDSR_RDY (0x1 << 12)
89#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 90#define NDSR_CS0_PAGED (0x1 << 10)
91#define NDSR_CS1_PAGED (0x1 << 9)
92#define NDSR_CS0_CMDD (0x1 << 8)
93#define NDSR_CS1_CMDD (0x1 << 7)
94#define NDSR_CS0_BBD (0x1 << 6)
95#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
96#define NDSR_UNCORERR (0x1 << 4)
97#define NDSR_CORERR (0x1 << 3)
fe69af00 98#define NDSR_WRDREQ (0x1 << 2)
99#define NDSR_RDDREQ (0x1 << 1)
100#define NDSR_WRCMDREQ (0x1)
101
41a63430 102#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 103#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 104#define NDCB0_AUTO_RS (0x1 << 25)
105#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
106#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 108#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110#define NDCB0_NC (0x1 << 20)
111#define NDCB0_DBC (0x1 << 19)
112#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114#define NDCB0_CMD2_MASK (0xff << 8)
115#define NDCB0_CMD1_MASK (0xff)
116#define NDCB0_ADDR_CYC_SHIFT (16)
117
70ed8523
EG
118#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120#define EXT_CMD_TYPE_READ 4 /* Read */
121#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122#define EXT_CMD_TYPE_FINAL 3 /* Final command */
123#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
125
b226eca2
EG
126/*
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
130 */
131#define READ_ID_BYTES 7
132
fe69af00 133/* macros for registers read/write */
26d072e3
RJ
134#define nand_writel(info, off, val) \
135 do { \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
140 } while (0)
fe69af00 141
26d072e3
RJ
142#define nand_readl(info, off) \
143 ({ \
144 unsigned int _v; \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
149 _v; \
150 })
fe69af00 151
152/* error code and state */
153enum {
154 ERR_NONE = 0,
155 ERR_DMABUSERR = -1,
156 ERR_SENDCMD = -2,
87f5336e 157 ERR_UNCORERR = -3,
fe69af00 158 ERR_BBERR = -4,
87f5336e 159 ERR_CORERR = -5,
fe69af00 160};
161
162enum {
f8155a40 163 STATE_IDLE = 0,
d456882b 164 STATE_PREPARED,
fe69af00 165 STATE_CMD_HANDLE,
166 STATE_DMA_READING,
167 STATE_DMA_WRITING,
168 STATE_DMA_DONE,
169 STATE_PIO_READING,
170 STATE_PIO_WRITING,
f8155a40
LW
171 STATE_CMD_DONE,
172 STATE_READY,
fe69af00 173};
174
c0f3b864
EG
175enum pxa3xx_nand_variant {
176 PXA3XX_NAND_VARIANT_PXA,
177 PXA3XX_NAND_VARIANT_ARMADA370,
178};
179
d456882b
LW
180struct pxa3xx_nand_host {
181 struct nand_chip chip;
d456882b
LW
182 void *info_data;
183
184 /* page size of attached chip */
d456882b 185 int use_ecc;
f3c8cfc2 186 int cs;
fe69af00 187
d456882b
LW
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles;
190 unsigned int row_addr_cycles;
d456882b
LW
191};
192
193struct pxa3xx_nand_info {
401e67e2 194 struct nand_hw_control controller;
fe69af00 195 struct platform_device *pdev;
fe69af00 196
197 struct clk *clk;
198 void __iomem *mmio_base;
8638fac8 199 unsigned long mmio_phys;
55d9fd6e 200 struct completion cmd_complete, dev_ready;
fe69af00 201
202 unsigned int buf_start;
203 unsigned int buf_count;
62e8b851 204 unsigned int buf_size;
fa543bef
EG
205 unsigned int data_buff_pos;
206 unsigned int oob_buff_pos;
fe69af00 207
208 /* DMA information */
8f5ba31a
RJ
209 struct scatterlist sg;
210 enum dma_data_direction dma_dir;
211 struct dma_chan *dma_chan;
212 dma_cookie_t dma_cookie;
fe69af00 213 int drcmr_dat;
214 int drcmr_cmd;
215
216 unsigned char *data_buff;
18c81b18 217 unsigned char *oob_buff;
fe69af00 218 dma_addr_t data_buff_phys;
fe69af00 219 int data_dma_ch;
fe69af00 220
f3c8cfc2 221 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 222 unsigned int state;
223
c0f3b864
EG
224 /*
225 * This driver supports NFCv1 (as found in PXA SoC)
226 * and NFCv2 (as found in Armada 370/XP SoC).
227 */
228 enum pxa3xx_nand_variant variant;
229
f3c8cfc2 230 int cs;
fe69af00 231 int use_ecc; /* use HW ECC ? */
43bcfd2b 232 int ecc_bch; /* using BCH ECC? */
fe69af00 233 int use_dma; /* use DMA ? */
5bb653e8 234 int use_spare; /* use spare ? */
55d9fd6e 235 int need_wait;
fe69af00 236
c2cdace7
TP
237 /* Amount of real data per full chunk */
238 unsigned int chunk_size;
239
240 /* Amount of spare data per full chunk */
43bcfd2b 241 unsigned int spare_size;
c2cdace7
TP
242
243 /* Number of full chunks (i.e chunk_size + spare_size) */
244 unsigned int nfullchunks;
245
246 /*
247 * Total number of chunks. If equal to nfullchunks, then there
248 * are only full chunks. Otherwise, there is one last chunk of
249 * size (last_chunk_size + last_spare_size)
250 */
251 unsigned int ntotalchunks;
252
253 /* Amount of real data in the last chunk */
254 unsigned int last_chunk_size;
255
256 /* Amount of spare data in the last chunk */
257 unsigned int last_spare_size;
258
43bcfd2b 259 unsigned int ecc_size;
87f5336e
EG
260 unsigned int ecc_err_cnt;
261 unsigned int max_bitflips;
fe69af00 262 int retcode;
fe69af00 263
c2cdace7
TP
264 /*
265 * Variables only valid during command
266 * execution. step_chunk_size and step_spare_size is the
267 * amount of real data and spare data in the current
268 * chunk. cur_chunk is the current chunk being
269 * read/programmed.
270 */
271 unsigned int step_chunk_size;
272 unsigned int step_spare_size;
273 unsigned int cur_chunk;
274
48cf7efa
EG
275 /* cached register value */
276 uint32_t reg_ndcr;
277 uint32_t ndtr0cs0;
278 uint32_t ndtr1cs0;
279
fe69af00 280 /* generated NDCBx register values */
281 uint32_t ndcb0;
282 uint32_t ndcb1;
283 uint32_t ndcb2;
3a1a344a 284 uint32_t ndcb3;
fe69af00 285};
286
90ab5ee9 287static bool use_dma = 1;
fe69af00 288module_param(use_dma, bool, 0444);
25985edc 289MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 290
a9cadf72
EG
291struct pxa3xx_nand_timing {
292 unsigned int tCH; /* Enable signal hold time */
293 unsigned int tCS; /* Enable signal setup time */
294 unsigned int tWH; /* ND_nWE high duration */
295 unsigned int tWP; /* ND_nWE pulse time */
296 unsigned int tRH; /* ND_nRE high duration */
297 unsigned int tRP; /* ND_nRE pulse width */
298 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
299 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
300 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
301};
302
303struct pxa3xx_nand_flash {
a9cadf72 304 uint32_t chip_id;
a9cadf72
EG
305 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
306 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
a9cadf72
EG
307 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
308};
309
c1f82478 310static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
311 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
312 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
313 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
314 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
315};
316
c1f82478 317static struct pxa3xx_nand_flash builtin_flash_types[] = {
89c1702d
AT
318 { 0x46ec, 16, 16, &timing[1] },
319 { 0xdaec, 8, 8, &timing[1] },
320 { 0xd7ec, 8, 8, &timing[1] },
321 { 0xa12c, 8, 8, &timing[2] },
322 { 0xb12c, 16, 16, &timing[2] },
323 { 0xdc2c, 8, 8, &timing[2] },
324 { 0xcc2c, 16, 16, &timing[2] },
325 { 0xba20, 16, 16, &timing[3] },
d3490dfd
HZ
326};
327
776f265e
EG
328static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
329static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
330
331static struct nand_bbt_descr bbt_main_descr = {
332 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
333 | NAND_BBT_2BIT | NAND_BBT_VERSION,
334 .offs = 8,
335 .len = 6,
336 .veroffs = 14,
337 .maxblocks = 8, /* Last 8 blocks in each chip */
338 .pattern = bbt_pattern
339};
340
341static struct nand_bbt_descr bbt_mirror_descr = {
342 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
343 | NAND_BBT_2BIT | NAND_BBT_VERSION,
344 .offs = 8,
345 .len = 6,
346 .veroffs = 14,
347 .maxblocks = 8, /* Last 8 blocks in each chip */
348 .pattern = bbt_mirror_pattern
349};
350
3db227b6
RG
351static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
352 .eccbytes = 32,
353 .eccpos = {
354 32, 33, 34, 35, 36, 37, 38, 39,
355 40, 41, 42, 43, 44, 45, 46, 47,
356 48, 49, 50, 51, 52, 53, 54, 55,
357 56, 57, 58, 59, 60, 61, 62, 63},
358 .oobfree = { {2, 30} }
359};
360
70ed8523
EG
361static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
362 .eccbytes = 64,
363 .eccpos = {
364 32, 33, 34, 35, 36, 37, 38, 39,
365 40, 41, 42, 43, 44, 45, 46, 47,
366 48, 49, 50, 51, 52, 53, 54, 55,
367 56, 57, 58, 59, 60, 61, 62, 63,
368 96, 97, 98, 99, 100, 101, 102, 103,
369 104, 105, 106, 107, 108, 109, 110, 111,
370 112, 113, 114, 115, 116, 117, 118, 119,
371 120, 121, 122, 123, 124, 125, 126, 127},
372 /* Bootrom looks in bytes 0 & 5 for bad blocks */
373 .oobfree = { {6, 26}, { 64, 32} }
374};
375
376static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
377 .eccbytes = 128,
378 .eccpos = {
379 32, 33, 34, 35, 36, 37, 38, 39,
380 40, 41, 42, 43, 44, 45, 46, 47,
381 48, 49, 50, 51, 52, 53, 54, 55,
382 56, 57, 58, 59, 60, 61, 62, 63},
383 .oobfree = { }
384};
385
fe69af00 386#define NDTR0_tCH(c) (min((c), 7) << 19)
387#define NDTR0_tCS(c) (min((c), 7) << 16)
388#define NDTR0_tWH(c) (min((c), 7) << 11)
389#define NDTR0_tWP(c) (min((c), 7) << 8)
390#define NDTR0_tRH(c) (min((c), 7) << 3)
391#define NDTR0_tRP(c) (min((c), 7) << 0)
392
393#define NDTR1_tR(c) (min((c), 65535) << 16)
394#define NDTR1_tWHR(c) (min((c), 15) << 4)
395#define NDTR1_tAR(c) (min((c), 15) << 0)
396
397/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 398#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 399
17754ad6 400static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
401 {
402 .compatible = "marvell,pxa3xx-nand",
403 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
404 },
1963ff97
EG
405 {
406 .compatible = "marvell,armada370-nand",
407 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
408 },
c7e9c7e7
EG
409 {}
410};
411MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
412
413static enum pxa3xx_nand_variant
414pxa3xx_nand_get_variant(struct platform_device *pdev)
415{
416 const struct of_device_id *of_id =
417 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
418 if (!of_id)
419 return PXA3XX_NAND_VARIANT_PXA;
420 return (enum pxa3xx_nand_variant)of_id->data;
421}
422
d456882b 423static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 424 const struct pxa3xx_nand_timing *t)
fe69af00 425{
d456882b 426 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 427 unsigned long nand_clk = clk_get_rate(info->clk);
428 uint32_t ndtr0, ndtr1;
429
430 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
431 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
432 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
433 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
434 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
435 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
436
437 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
438 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
439 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
440
48cf7efa
EG
441 info->ndtr0cs0 = ndtr0;
442 info->ndtr1cs0 = ndtr1;
fe69af00 443 nand_writel(info, NDTR0CS0, ndtr0);
444 nand_writel(info, NDTR1CS0, ndtr1);
445}
446
3f225b7f
AT
447static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
448 const struct nand_sdr_timings *t)
449{
450 struct pxa3xx_nand_info *info = host->info_data;
451 struct nand_chip *chip = &host->chip;
452 unsigned long nand_clk = clk_get_rate(info->clk);
453 uint32_t ndtr0, ndtr1;
454
455 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
456 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
457 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
458 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
459 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
460 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
461 u32 tR = chip->chip_delay * 1000;
462 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
463 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
464
465 /* fallback to a default value if tR = 0 */
466 if (!tR)
467 tR = 20000;
468
469 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
470 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
471 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
472 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
473 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
474 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
475
476 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
477 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
478 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
479
480 info->ndtr0cs0 = ndtr0;
481 info->ndtr1cs0 = ndtr1;
482 nand_writel(info, NDTR0CS0, ndtr0);
483 nand_writel(info, NDTR1CS0, ndtr1);
484}
485
486static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
487 unsigned int *flash_width,
488 unsigned int *dfc_width)
489{
490 struct nand_chip *chip = &host->chip;
491 struct pxa3xx_nand_info *info = host->info_data;
492 const struct pxa3xx_nand_flash *f = NULL;
063294a3 493 struct mtd_info *mtd = nand_to_mtd(&host->chip);
3f225b7f
AT
494 int i, id, ntypes;
495
496 ntypes = ARRAY_SIZE(builtin_flash_types);
497
063294a3 498 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3f225b7f 499
063294a3
BB
500 id = chip->read_byte(mtd);
501 id |= chip->read_byte(mtd) << 0x8;
3f225b7f
AT
502
503 for (i = 0; i < ntypes; i++) {
504 f = &builtin_flash_types[i];
505
506 if (f->chip_id == id)
507 break;
508 }
509
510 if (i == ntypes) {
511 dev_err(&info->pdev->dev, "Error: timings not found\n");
512 return -EINVAL;
513 }
514
515 pxa3xx_nand_set_timing(host, f->timing);
516
517 *flash_width = f->flash_width;
518 *dfc_width = f->dfc_width;
519
520 return 0;
521}
522
523static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
524 int mode)
525{
526 const struct nand_sdr_timings *timings;
527
528 mode = fls(mode) - 1;
529 if (mode < 0)
530 mode = 0;
531
532 timings = onfi_async_timing_mode_to_sdr_timings(mode);
533 if (IS_ERR(timings))
534 return PTR_ERR(timings);
535
536 pxa3xx_nand_set_sdr_timing(host, timings);
537
538 return 0;
539}
540
541static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
542{
543 struct nand_chip *chip = &host->chip;
544 struct pxa3xx_nand_info *info = host->info_data;
545 unsigned int flash_width = 0, dfc_width = 0;
546 int mode, err;
547
548 mode = onfi_get_async_timing_mode(chip);
549 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
550 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
551 &dfc_width);
552 if (err)
553 return err;
554
555 if (flash_width == 16) {
556 info->reg_ndcr |= NDCR_DWIDTH_M;
557 chip->options |= NAND_BUSWIDTH_16;
558 }
559
560 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
561 } else {
562 err = pxa3xx_nand_init_timings_onfi(host, mode);
563 if (err)
564 return err;
565 }
566
567 return 0;
568}
569
f8155a40
LW
570/**
571 * NOTE: it is a must to set ND_RUN firstly, then write
572 * command buffer, otherwise, it does not work.
573 * We enable all the interrupt at the same time, and
574 * let pxa3xx_nand_irq to handle all logic.
575 */
576static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
577{
578 uint32_t ndcr;
579
48cf7efa 580 ndcr = info->reg_ndcr;
cd9d1182 581
43bcfd2b 582 if (info->use_ecc) {
cd9d1182 583 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
584 if (info->ecc_bch)
585 nand_writel(info, NDECCCTRL, 0x1);
586 } else {
cd9d1182 587 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
588 if (info->ecc_bch)
589 nand_writel(info, NDECCCTRL, 0x0);
590 }
cd9d1182
EG
591
592 if (info->use_dma)
593 ndcr |= NDCR_DMA_EN;
594 else
595 ndcr &= ~NDCR_DMA_EN;
596
5bb653e8
EG
597 if (info->use_spare)
598 ndcr |= NDCR_SPARE_EN;
599 else
600 ndcr &= ~NDCR_SPARE_EN;
601
f8155a40
LW
602 ndcr |= NDCR_ND_RUN;
603
604 /* clear status bits and run */
f8155a40 605 nand_writel(info, NDSR, NDSR_MASK);
0b14392d 606 nand_writel(info, NDCR, 0);
f8155a40
LW
607 nand_writel(info, NDCR, ndcr);
608}
609
610static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
611{
612 uint32_t ndcr;
613 int timeout = NAND_STOP_DELAY;
614
615 /* wait RUN bit in NDCR become 0 */
616 ndcr = nand_readl(info, NDCR);
617 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
618 ndcr = nand_readl(info, NDCR);
619 udelay(1);
620 }
621
622 if (timeout <= 0) {
623 ndcr &= ~NDCR_ND_RUN;
624 nand_writel(info, NDCR, ndcr);
625 }
8f5ba31a
RJ
626 if (info->dma_chan)
627 dmaengine_terminate_all(info->dma_chan);
628
f8155a40
LW
629 /* clear status bits */
630 nand_writel(info, NDSR, NDSR_MASK);
631}
632
57ff88f0
EG
633static void __maybe_unused
634enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 635{
636 uint32_t ndcr;
637
638 ndcr = nand_readl(info, NDCR);
639 nand_writel(info, NDCR, ndcr & ~int_mask);
640}
641
642static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
643{
644 uint32_t ndcr;
645
646 ndcr = nand_readl(info, NDCR);
647 nand_writel(info, NDCR, ndcr | int_mask);
648}
649
8dad0386
MR
650static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
651{
652 if (info->ecc_bch) {
afca11ec
MR
653 u32 val;
654 int ret;
8dad0386
MR
655
656 /*
657 * According to the datasheet, when reading from NDDB
658 * with BCH enabled, after each 32 bytes reads, we
659 * have to make sure that the NDSR.RDDREQ bit is set.
660 *
661 * Drain the FIFO 8 32 bits reads at a time, and skip
662 * the polling on the last read.
663 */
664 while (len > 8) {
ab53a571 665 ioread32_rep(info->mmio_base + NDDB, data, 8);
8dad0386 666
afca11ec
MR
667 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
668 val & NDSR_RDDREQ, 1000, 5000);
669 if (ret) {
670 dev_err(&info->pdev->dev,
671 "Timeout on RDDREQ while draining the FIFO\n");
672 return;
8dad0386
MR
673 }
674
675 data += 32;
676 len -= 8;
677 }
678 }
679
ab53a571 680 ioread32_rep(info->mmio_base + NDDB, data, len);
8dad0386
MR
681}
682
f8155a40 683static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 684{
fe69af00 685 switch (info->state) {
686 case STATE_PIO_WRITING:
c2cdace7
TP
687 if (info->step_chunk_size)
688 writesl(info->mmio_base + NDDB,
689 info->data_buff + info->data_buff_pos,
690 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 691
c2cdace7 692 if (info->step_spare_size)
ce914e6b
RH
693 writesl(info->mmio_base + NDDB,
694 info->oob_buff + info->oob_buff_pos,
c2cdace7 695 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 696 break;
697 case STATE_PIO_READING:
c2cdace7
TP
698 if (info->step_chunk_size)
699 drain_fifo(info,
700 info->data_buff + info->data_buff_pos,
701 DIV_ROUND_UP(info->step_chunk_size, 4));
fa543bef 702
c2cdace7 703 if (info->step_spare_size)
8dad0386
MR
704 drain_fifo(info,
705 info->oob_buff + info->oob_buff_pos,
c2cdace7 706 DIV_ROUND_UP(info->step_spare_size, 4));
fe69af00 707 break;
708 default:
da675b4e 709 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 710 info->state);
f8155a40 711 BUG();
fe69af00 712 }
fa543bef
EG
713
714 /* Update buffer pointers for multi-page read/write */
c2cdace7
TP
715 info->data_buff_pos += info->step_chunk_size;
716 info->oob_buff_pos += info->step_spare_size;
fe69af00 717}
718
8f5ba31a 719static void pxa3xx_nand_data_dma_irq(void *data)
fe69af00 720{
8f5ba31a
RJ
721 struct pxa3xx_nand_info *info = data;
722 struct dma_tx_state state;
723 enum dma_status status;
fe69af00 724
8f5ba31a
RJ
725 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
726 if (likely(status == DMA_COMPLETE)) {
727 info->state = STATE_DMA_DONE;
728 } else {
729 dev_err(&info->pdev->dev, "DMA error on data channel\n");
730 info->retcode = ERR_DMABUSERR;
731 }
732 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
733
734 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
735 enable_int(info, NDCR_INT_MASK);
736}
737
738static void start_data_dma(struct pxa3xx_nand_info *info)
739{
740 enum dma_transfer_direction direction;
741 struct dma_async_tx_descriptor *tx;
fe69af00 742
f8155a40
LW
743 switch (info->state) {
744 case STATE_DMA_WRITING:
8f5ba31a
RJ
745 info->dma_dir = DMA_TO_DEVICE;
746 direction = DMA_MEM_TO_DEV;
f8155a40
LW
747 break;
748 case STATE_DMA_READING:
8f5ba31a
RJ
749 info->dma_dir = DMA_FROM_DEVICE;
750 direction = DMA_DEV_TO_MEM;
f8155a40
LW
751 break;
752 default:
da675b4e 753 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
754 info->state);
755 BUG();
fe69af00 756 }
c2cdace7
TP
757 info->sg.length = info->chunk_size;
758 if (info->use_spare)
759 info->sg.length += info->spare_size + info->ecc_size;
8f5ba31a
RJ
760 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
761
762 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
763 DMA_PREP_INTERRUPT);
764 if (!tx) {
765 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
766 return;
fe69af00 767 }
8f5ba31a
RJ
768 tx->callback = pxa3xx_nand_data_dma_irq;
769 tx->callback_param = info;
770 info->dma_cookie = dmaengine_submit(tx);
771 dma_async_issue_pending(info->dma_chan);
772 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
773 __func__, direction, info->dma_cookie, info->sg.length);
fe69af00 774}
775
24542257
RJ
776static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
777{
778 struct pxa3xx_nand_info *info = data;
779
780 handle_data_pio(info);
781
782 info->state = STATE_CMD_DONE;
783 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
784
785 return IRQ_HANDLED;
786}
787
fe69af00 788static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
789{
790 struct pxa3xx_nand_info *info = devid;
55d9fd6e 791 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 792 unsigned int ready, cmd_done;
24542257 793 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
794
795 if (info->cs == 0) {
796 ready = NDSR_FLASH_RDY;
797 cmd_done = NDSR_CS0_CMDD;
798 } else {
799 ready = NDSR_RDY;
800 cmd_done = NDSR_CS1_CMDD;
801 }
fe69af00 802
803 status = nand_readl(info, NDSR);
804
87f5336e
EG
805 if (status & NDSR_UNCORERR)
806 info->retcode = ERR_UNCORERR;
807 if (status & NDSR_CORERR) {
808 info->retcode = ERR_CORERR;
809 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
810 info->ecc_bch)
811 info->ecc_err_cnt = NDSR_ERR_CNT(status);
812 else
813 info->ecc_err_cnt = 1;
814
815 /*
816 * Each chunk composing a page is corrected independently,
817 * and we need to store maximum number of corrected bitflips
818 * to return it to the MTD layer in ecc.read_page().
819 */
820 info->max_bitflips = max_t(unsigned int,
821 info->max_bitflips,
822 info->ecc_err_cnt);
823 }
f8155a40
LW
824 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
825 /* whether use dma to transfer data */
fe69af00 826 if (info->use_dma) {
f8155a40
LW
827 disable_int(info, NDCR_INT_MASK);
828 info->state = (status & NDSR_RDDREQ) ?
829 STATE_DMA_READING : STATE_DMA_WRITING;
830 start_data_dma(info);
831 goto NORMAL_IRQ_EXIT;
fe69af00 832 } else {
f8155a40
LW
833 info->state = (status & NDSR_RDDREQ) ?
834 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
835 ret = IRQ_WAKE_THREAD;
836 goto NORMAL_IRQ_EXIT;
fe69af00 837 }
fe69af00 838 }
f3c8cfc2 839 if (status & cmd_done) {
f8155a40
LW
840 info->state = STATE_CMD_DONE;
841 is_completed = 1;
fe69af00 842 }
f3c8cfc2 843 if (status & ready) {
f8155a40 844 info->state = STATE_READY;
55d9fd6e 845 is_ready = 1;
401e67e2 846 }
fe69af00 847
21fc0ef9
RJ
848 /*
849 * Clear all status bit before issuing the next command, which
850 * can and will alter the status bits and will deserve a new
851 * interrupt on its own. This lets the controller exit the IRQ
852 */
853 nand_writel(info, NDSR, status);
854
f8155a40 855 if (status & NDSR_WRCMDREQ) {
f8155a40
LW
856 status &= ~NDSR_WRCMDREQ;
857 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
858
859 /*
860 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
861 * must be loaded by writing directly either 12 or 16
862 * bytes directly to NDCB0, four bytes at a time.
863 *
864 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
865 * but each NDCBx register can be read.
866 */
f8155a40
LW
867 nand_writel(info, NDCB0, info->ndcb0);
868 nand_writel(info, NDCB0, info->ndcb1);
869 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
870
871 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
872 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
873 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 874 }
875
f8155a40
LW
876 if (is_completed)
877 complete(&info->cmd_complete);
55d9fd6e
EG
878 if (is_ready)
879 complete(&info->dev_ready);
f8155a40 880NORMAL_IRQ_EXIT:
24542257 881 return ret;
fe69af00 882}
883
fe69af00 884static inline int is_buf_blank(uint8_t *buf, size_t len)
885{
886 for (; len > 0; len--)
887 if (*buf++ != 0xff)
888 return 0;
889 return 1;
890}
891
86beebae
EG
892static void set_command_address(struct pxa3xx_nand_info *info,
893 unsigned int page_size, uint16_t column, int page_addr)
894{
895 /* small page addr setting */
896 if (page_size < PAGE_CHUNK_SIZE) {
897 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
898 | (column & 0xFF);
899
900 info->ndcb2 = 0;
901 } else {
902 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
903 | (column & 0xFFFF);
904
905 if (page_addr & 0xFF0000)
906 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
907 else
908 info->ndcb2 = 0;
909 }
910}
911
c39ff03a 912static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 913{
39f83d15 914 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3 915 struct mtd_info *mtd = nand_to_mtd(&host->chip);
39f83d15 916
4eb2da89 917 /* reset data and oob column point to handle data */
401e67e2
LW
918 info->buf_start = 0;
919 info->buf_count = 0;
fa543bef
EG
920 info->data_buff_pos = 0;
921 info->oob_buff_pos = 0;
c2cdace7
TP
922 info->step_chunk_size = 0;
923 info->step_spare_size = 0;
924 info->cur_chunk = 0;
4eb2da89 925 info->use_ecc = 0;
5bb653e8 926 info->use_spare = 1;
4eb2da89 927 info->retcode = ERR_NONE;
87f5336e 928 info->ecc_err_cnt = 0;
f0e6a32e 929 info->ndcb3 = 0;
d20d0a6c 930 info->need_wait = 0;
fe69af00 931
932 switch (command) {
4eb2da89
LW
933 case NAND_CMD_READ0:
934 case NAND_CMD_PAGEPROG:
935 info->use_ecc = 1;
fe69af00 936 break;
41a63430
EG
937 case NAND_CMD_PARAM:
938 info->use_spare = 0;
939 break;
4eb2da89
LW
940 default:
941 info->ndcb1 = 0;
942 info->ndcb2 = 0;
943 break;
944 }
39f83d15
EG
945
946 /*
947 * If we are about to issue a read command, or about to set
948 * the write address, then clean the data buffer.
949 */
950 if (command == NAND_CMD_READ0 ||
951 command == NAND_CMD_READOOB ||
952 command == NAND_CMD_SEQIN) {
953
954 info->buf_count = mtd->writesize + mtd->oobsize;
955 memset(info->data_buff, 0xFF, info->buf_count);
956 }
957
c39ff03a
EG
958}
959
960static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 961 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
962{
963 int addr_cycle, exec_cmd;
964 struct pxa3xx_nand_host *host;
965 struct mtd_info *mtd;
966
967 host = info->host[info->cs];
063294a3 968 mtd = nand_to_mtd(&host->chip);
c39ff03a
EG
969 addr_cycle = 0;
970 exec_cmd = 1;
971
972 if (info->cs != 0)
973 info->ndcb0 = NDCB0_CSEL;
974 else
975 info->ndcb0 = 0;
976
977 if (command == NAND_CMD_SEQIN)
978 exec_cmd = 0;
4eb2da89 979
d456882b
LW
980 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
981 + host->col_addr_cycles);
fe69af00 982
4eb2da89
LW
983 switch (command) {
984 case NAND_CMD_READOOB:
fe69af00 985 case NAND_CMD_READ0:
ec82135a
EG
986 info->buf_start = column;
987 info->ndcb0 |= NDCB0_CMD_TYPE(0)
988 | addr_cycle
989 | NAND_CMD_READ0;
990
4eb2da89 991 if (command == NAND_CMD_READOOB)
ec82135a 992 info->buf_start += mtd->writesize;
4eb2da89 993
c2cdace7
TP
994 if (info->cur_chunk < info->nfullchunks) {
995 info->step_chunk_size = info->chunk_size;
996 info->step_spare_size = info->spare_size;
997 } else {
998 info->step_chunk_size = info->last_chunk_size;
999 info->step_spare_size = info->last_spare_size;
1000 }
1001
70ed8523
EG
1002 /*
1003 * Multiple page read needs an 'extended command type' field,
1004 * which is either naked-read or last-read according to the
1005 * state.
1006 */
1007 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 1008 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
1009 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1010 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1011 | NDCB0_LEN_OVRD
1012 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1013 info->ndcb3 = info->step_chunk_size +
1014 info->step_spare_size;
70ed8523 1015 }
fe69af00 1016
01d9947e 1017 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
1018 break;
1019
fe69af00 1020 case NAND_CMD_SEQIN:
4eb2da89 1021
e7f9a6a4
EG
1022 info->buf_start = column;
1023 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
1024
1025 /*
1026 * Multiple page programming needs to execute the initial
1027 * SEQIN command that sets the page address.
1028 */
1029 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1030 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1031 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1032 | addr_cycle
1033 | command;
535cb57a
EG
1034 exec_cmd = 1;
1035 }
fe69af00 1036 break;
4eb2da89 1037
fe69af00 1038 case NAND_CMD_PAGEPROG:
4eb2da89
LW
1039 if (is_buf_blank(info->data_buff,
1040 (mtd->writesize + mtd->oobsize))) {
1041 exec_cmd = 0;
1042 break;
1043 }
fe69af00 1044
c2cdace7
TP
1045 if (info->cur_chunk < info->nfullchunks) {
1046 info->step_chunk_size = info->chunk_size;
1047 info->step_spare_size = info->spare_size;
1048 } else {
1049 info->step_chunk_size = info->last_chunk_size;
1050 info->step_spare_size = info->last_spare_size;
1051 }
1052
535cb57a
EG
1053 /* Second command setting for large pages */
1054 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1055 /*
1056 * Multiple page write uses the 'extended command'
1057 * field. This can be used to issue a command dispatch
1058 * or a naked-write depending on the current stage.
1059 */
1060 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1061 | NDCB0_LEN_OVRD
1062 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
c2cdace7
TP
1063 info->ndcb3 = info->step_chunk_size +
1064 info->step_spare_size;
535cb57a
EG
1065
1066 /*
1067 * This is the command dispatch that completes a chunked
1068 * page program operation.
1069 */
c2cdace7 1070 if (info->cur_chunk == info->ntotalchunks) {
535cb57a
EG
1071 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1072 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1073 | command;
1074 info->ndcb1 = 0;
1075 info->ndcb2 = 0;
1076 info->ndcb3 = 0;
1077 }
1078 } else {
1079 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1080 | NDCB0_AUTO_RS
1081 | NDCB0_ST_ROW_EN
1082 | NDCB0_DBC
1083 | (NAND_CMD_PAGEPROG << 8)
1084 | NAND_CMD_SEQIN
1085 | addr_cycle;
1086 }
fe69af00 1087 break;
4eb2da89 1088
ce0268f6 1089 case NAND_CMD_PARAM:
c1634097 1090 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
1091 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1092 | NDCB0_ADDR_CYC(1)
41a63430 1093 | NDCB0_LEN_OVRD
ec82135a 1094 | command;
ce0268f6 1095 info->ndcb1 = (column & 0xFF);
c1634097 1096 info->ndcb3 = INIT_BUFFER_SIZE;
c2cdace7 1097 info->step_chunk_size = INIT_BUFFER_SIZE;
ce0268f6
EG
1098 break;
1099
fe69af00 1100 case NAND_CMD_READID:
b226eca2 1101 info->buf_count = READ_ID_BYTES;
4eb2da89
LW
1102 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1103 | NDCB0_ADDR_CYC(1)
ec82135a 1104 | command;
d14231f1 1105 info->ndcb1 = (column & 0xFF);
4eb2da89 1106
c2cdace7 1107 info->step_chunk_size = 8;
4eb2da89 1108 break;
fe69af00 1109 case NAND_CMD_STATUS:
4eb2da89
LW
1110 info->buf_count = 1;
1111 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1112 | NDCB0_ADDR_CYC(1)
ec82135a 1113 | command;
4eb2da89 1114
c2cdace7 1115 info->step_chunk_size = 8;
4eb2da89
LW
1116 break;
1117
1118 case NAND_CMD_ERASE1:
4eb2da89
LW
1119 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1120 | NDCB0_AUTO_RS
1121 | NDCB0_ADDR_CYC(3)
1122 | NDCB0_DBC
ec82135a
EG
1123 | (NAND_CMD_ERASE2 << 8)
1124 | NAND_CMD_ERASE1;
4eb2da89
LW
1125 info->ndcb1 = page_addr;
1126 info->ndcb2 = 0;
1127
fe69af00 1128 break;
1129 case NAND_CMD_RESET:
4eb2da89 1130 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 1131 | command;
4eb2da89
LW
1132
1133 break;
1134
1135 case NAND_CMD_ERASE2:
1136 exec_cmd = 0;
fe69af00 1137 break;
4eb2da89 1138
fe69af00 1139 default:
4eb2da89 1140 exec_cmd = 0;
da675b4e
LW
1141 dev_err(&info->pdev->dev, "non-supported command %x\n",
1142 command);
fe69af00 1143 break;
1144 }
1145
4eb2da89
LW
1146 return exec_cmd;
1147}
1148
5cbbdc6a
EG
1149static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1150 int column, int page_addr)
4eb2da89 1151{
4bd4ebcc 1152 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1153 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1154 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1155 int exec_cmd;
4eb2da89
LW
1156
1157 /*
1158 * if this is a x16 device ,then convert the input
1159 * "byte" address into a "word" address appropriate
1160 * for indexing a word-oriented device
1161 */
48cf7efa 1162 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
1163 column /= 2;
1164
f3c8cfc2
LW
1165 /*
1166 * There may be different NAND chip hooked to
1167 * different chip select, so check whether
1168 * chip select has been changed, if yes, reset the timing
1169 */
1170 if (info->cs != host->cs) {
1171 info->cs = host->cs;
48cf7efa
EG
1172 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1173 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
1174 }
1175
c39ff03a
EG
1176 prepare_start_command(info, command);
1177
d456882b 1178 info->state = STATE_PREPARED;
70ed8523
EG
1179 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1180
f8155a40
LW
1181 if (exec_cmd) {
1182 init_completion(&info->cmd_complete);
55d9fd6e
EG
1183 init_completion(&info->dev_ready);
1184 info->need_wait = 1;
f8155a40
LW
1185 pxa3xx_nand_start(info);
1186
e5860c18
NMG
1187 if (!wait_for_completion_timeout(&info->cmd_complete,
1188 CHIP_DELAY_TIMEOUT)) {
da675b4e 1189 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1190 /* Stop State Machine for next command cycle */
1191 pxa3xx_nand_stop(info);
1192 }
f8155a40 1193 }
d456882b 1194 info->state = STATE_IDLE;
f8155a40
LW
1195}
1196
5cbbdc6a
EG
1197static void nand_cmdfunc_extended(struct mtd_info *mtd,
1198 const unsigned command,
1199 int column, int page_addr)
70ed8523 1200{
4bd4ebcc 1201 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1202 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
70ed8523 1203 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1204 int exec_cmd, ext_cmd_type;
70ed8523
EG
1205
1206 /*
1207 * if this is a x16 device then convert the input
1208 * "byte" address into a "word" address appropriate
1209 * for indexing a word-oriented device
1210 */
1211 if (info->reg_ndcr & NDCR_DWIDTH_M)
1212 column /= 2;
1213
1214 /*
1215 * There may be different NAND chip hooked to
1216 * different chip select, so check whether
1217 * chip select has been changed, if yes, reset the timing
1218 */
1219 if (info->cs != host->cs) {
1220 info->cs = host->cs;
1221 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1222 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1223 }
1224
1225 /* Select the extended command for the first command */
1226 switch (command) {
1227 case NAND_CMD_READ0:
1228 case NAND_CMD_READOOB:
1229 ext_cmd_type = EXT_CMD_TYPE_MONO;
1230 break;
535cb57a
EG
1231 case NAND_CMD_SEQIN:
1232 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1233 break;
1234 case NAND_CMD_PAGEPROG:
1235 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1236 break;
70ed8523
EG
1237 default:
1238 ext_cmd_type = 0;
535cb57a 1239 break;
70ed8523
EG
1240 }
1241
1242 prepare_start_command(info, command);
1243
1244 /*
1245 * Prepare the "is ready" completion before starting a command
1246 * transaction sequence. If the command is not executed the
1247 * completion will be completed, see below.
1248 *
1249 * We can do that inside the loop because the command variable
1250 * is invariant and thus so is the exec_cmd.
1251 */
1252 info->need_wait = 1;
1253 init_completion(&info->dev_ready);
1254 do {
1255 info->state = STATE_PREPARED;
c2cdace7 1256
70ed8523
EG
1257 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1258 column, page_addr);
1259 if (!exec_cmd) {
1260 info->need_wait = 0;
1261 complete(&info->dev_ready);
1262 break;
1263 }
1264
1265 init_completion(&info->cmd_complete);
1266 pxa3xx_nand_start(info);
1267
e5860c18
NMG
1268 if (!wait_for_completion_timeout(&info->cmd_complete,
1269 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1270 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1271 /* Stop State Machine for next command cycle */
1272 pxa3xx_nand_stop(info);
1273 break;
1274 }
1275
c2cdace7
TP
1276 /* Only a few commands need several steps */
1277 if (command != NAND_CMD_PAGEPROG &&
1278 command != NAND_CMD_READ0 &&
1279 command != NAND_CMD_READOOB)
1280 break;
1281
1282 info->cur_chunk++;
1283
70ed8523 1284 /* Check if the sequence is complete */
c2cdace7 1285 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
535cb57a
EG
1286 break;
1287
1288 /*
1289 * After a splitted program command sequence has issued
1290 * the command dispatch, the command sequence is complete.
1291 */
c2cdace7 1292 if (info->cur_chunk == (info->ntotalchunks + 1) &&
535cb57a
EG
1293 command == NAND_CMD_PAGEPROG &&
1294 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1295 break;
1296
1297 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1298 /* Last read: issue a 'last naked read' */
c2cdace7 1299 if (info->cur_chunk == info->ntotalchunks - 1)
70ed8523
EG
1300 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1301 else
1302 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1303
1304 /*
1305 * If a splitted program command has no more data to transfer,
1306 * the command dispatch must be issued to complete.
1307 */
1308 } else if (command == NAND_CMD_PAGEPROG &&
c2cdace7 1309 info->cur_chunk == info->ntotalchunks) {
535cb57a 1310 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1311 }
1312 } while (1);
1313
1314 info->state = STATE_IDLE;
1315}
1316
fdbad98d 1317static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
45aaeff9
BB
1318 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1319 int page)
f8155a40
LW
1320{
1321 chip->write_buf(mtd, buf, mtd->writesize);
1322 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1323
1324 return 0;
f8155a40
LW
1325}
1326
1327static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1328 struct nand_chip *chip, uint8_t *buf, int oob_required,
1329 int page)
f8155a40 1330{
d699ed25 1331 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1332 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1333
1334 chip->read_buf(mtd, buf, mtd->writesize);
1335 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1336
87f5336e
EG
1337 if (info->retcode == ERR_CORERR && info->use_ecc) {
1338 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1339
1340 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1341 /*
1342 * for blank page (all 0xff), HW will calculate its ECC as
1343 * 0, which is different from the ECC information within
87f5336e 1344 * OOB, ignore such uncorrectable errors
f8155a40
LW
1345 */
1346 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1347 info->retcode = ERR_NONE;
1348 else
f8155a40 1349 mtd->ecc_stats.failed++;
fe69af00 1350 }
f8155a40 1351
87f5336e 1352 return info->max_bitflips;
fe69af00 1353}
1354
1355static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1356{
4bd4ebcc 1357 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1358 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1359 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1360 char retval = 0xFF;
1361
1362 if (info->buf_start < info->buf_count)
1363 /* Has just send a new command? */
1364 retval = info->data_buff[info->buf_start++];
1365
1366 return retval;
1367}
1368
1369static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1370{
4bd4ebcc 1371 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1372 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1373 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1374 u16 retval = 0xFFFF;
1375
1376 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1377 retval = *((u16 *)(info->data_buff+info->buf_start));
1378 info->buf_start += 2;
1379 }
1380 return retval;
1381}
1382
1383static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1384{
4bd4ebcc 1385 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1386 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1387 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1388 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1389
1390 memcpy(buf, info->data_buff + info->buf_start, real_len);
1391 info->buf_start += real_len;
1392}
1393
1394static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1395 const uint8_t *buf, int len)
1396{
4bd4ebcc 1397 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1398 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1399 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1400 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1401
1402 memcpy(info->data_buff + info->buf_start, buf, real_len);
1403 info->buf_start += real_len;
1404}
1405
fe69af00 1406static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1407{
1408 return;
1409}
1410
1411static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1412{
4bd4ebcc 1413 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1414 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1415 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1416
1417 if (info->need_wait) {
55d9fd6e 1418 info->need_wait = 0;
e5860c18
NMG
1419 if (!wait_for_completion_timeout(&info->dev_ready,
1420 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1421 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1422 return NAND_STATUS_FAIL;
1423 }
1424 }
fe69af00 1425
1426 /* pxa3xx_nand_send_command has waited for command complete */
1427 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1428 if (info->retcode == ERR_NONE)
1429 return 0;
55d9fd6e
EG
1430 else
1431 return NAND_STATUS_FAIL;
fe69af00 1432 }
1433
55d9fd6e 1434 return NAND_STATUS_READY;
fe69af00 1435}
1436
66e8e47e 1437static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
fe69af00 1438{
b1e48577 1439 struct pxa3xx_nand_host *host = info->host[info->cs];
fe69af00 1440 struct platform_device *pdev = info->pdev;
453810b7 1441 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
b1e48577 1442 const struct nand_sdr_timings *timings;
fe69af00 1443
66e8e47e
EG
1444 /* Configure default flash values */
1445 info->chunk_size = PAGE_CHUNK_SIZE;
f19fe983
AT
1446 info->reg_ndcr = 0x0; /* enable all interrupts */
1447 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1448 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
66e8e47e
EG
1449 info->reg_ndcr |= NDCR_SPARE_EN;
1450
b1e48577
EG
1451 /* use the common timing to make a try */
1452 timings = onfi_async_timing_mode_to_sdr_timings(0);
1453 if (IS_ERR(timings))
1454 return PTR_ERR(timings);
1455
1456 pxa3xx_nand_set_sdr_timing(host, timings);
66e8e47e
EG
1457 return 0;
1458}
1459
1460static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1461{
1462 struct pxa3xx_nand_host *host = info->host[info->cs];
063294a3
BB
1463 struct nand_chip *chip = &host->chip;
1464 struct mtd_info *mtd = nand_to_mtd(chip);
66e8e47e 1465
f19fe983
AT
1466 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1467 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1468 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
fe69af00 1469}
1470
154f50fb 1471static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
f271049e 1472{
66e8e47e
EG
1473 struct platform_device *pdev = info->pdev;
1474 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f271049e 1475 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1476
70ed8523 1477 /* Set an initial chunk size */
b226eca2 1478 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
e971affa
RJ
1479 info->reg_ndcr = ndcr &
1480 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
66e8e47e 1481 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
48cf7efa
EG
1482 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1483 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1484}
1485
fe69af00 1486static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1487{
1488 struct platform_device *pdev = info->pdev;
8f5ba31a
RJ
1489 struct dma_slave_config config;
1490 dma_cap_mask_t mask;
1491 struct pxad_param param;
1492 int ret;
fe69af00 1493
8f5ba31a
RJ
1494 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1495 if (info->data_buff == NULL)
1496 return -ENOMEM;
1497 if (use_dma == 0)
fe69af00 1498 return 0;
fe69af00 1499
8f5ba31a
RJ
1500 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1501 if (ret)
1502 return ret;
fe69af00 1503
8f5ba31a
RJ
1504 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1505 dma_cap_zero(mask);
1506 dma_cap_set(DMA_SLAVE, mask);
1507 param.prio = PXAD_PRIO_LOWEST;
1508 param.drcmr = info->drcmr_dat;
1509 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1510 &param, &pdev->dev,
1511 "data");
1512 if (!info->dma_chan) {
1513 dev_err(&pdev->dev, "unable to request data dma channel\n");
1514 return -ENODEV;
1515 }
fe69af00 1516
8f5ba31a
RJ
1517 memset(&config, 0, sizeof(config));
1518 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1519 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1520 config.src_addr = info->mmio_phys + NDDB;
1521 config.dst_addr = info->mmio_phys + NDDB;
1522 config.src_maxburst = 32;
1523 config.dst_maxburst = 32;
1524 ret = dmaengine_slave_config(info->dma_chan, &config);
1525 if (ret < 0) {
1526 dev_err(&info->pdev->dev,
1527 "dma channel configuration failed: %d\n",
1528 ret);
1529 return ret;
fe69af00 1530 }
1531
95b26563
EG
1532 /*
1533 * Now that DMA buffers are allocated we turn on
1534 * DMA proper for I/O operations.
1535 */
1536 info->use_dma = 1;
fe69af00 1537 return 0;
1538}
1539
498b6145
EG
1540static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1541{
15b540c7 1542 if (info->use_dma) {
8f5ba31a
RJ
1543 dmaengine_terminate_all(info->dma_chan);
1544 dma_release_channel(info->dma_chan);
498b6145 1545 }
f4db2e3a
EG
1546 kfree(info->data_buff);
1547}
498b6145 1548
43bcfd2b
EG
1549static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1550 struct nand_ecc_ctrl *ecc,
30b2afc8 1551 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1552{
30b2afc8 1553 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
c2cdace7
TP
1554 info->nfullchunks = 1;
1555 info->ntotalchunks = 1;
70ed8523 1556 info->chunk_size = 2048;
43bcfd2b
EG
1557 info->spare_size = 40;
1558 info->ecc_size = 24;
1559 ecc->mode = NAND_ECC_HW;
1560 ecc->size = 512;
1561 ecc->strength = 1;
43bcfd2b 1562
30b2afc8 1563 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
c2cdace7
TP
1564 info->nfullchunks = 1;
1565 info->ntotalchunks = 1;
70ed8523 1566 info->chunk_size = 512;
43bcfd2b
EG
1567 info->spare_size = 8;
1568 info->ecc_size = 8;
1569 ecc->mode = NAND_ECC_HW;
1570 ecc->size = 512;
1571 ecc->strength = 1;
43bcfd2b 1572
6033a949
BN
1573 /*
1574 * Required ECC: 4-bit correction per 512 bytes
1575 * Select: 16-bit correction per 2048 bytes
1576 */
3db227b6
RG
1577 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1578 info->ecc_bch = 1;
c2cdace7
TP
1579 info->nfullchunks = 1;
1580 info->ntotalchunks = 1;
3db227b6
RG
1581 info->chunk_size = 2048;
1582 info->spare_size = 32;
1583 info->ecc_size = 32;
1584 ecc->mode = NAND_ECC_HW;
1585 ecc->size = info->chunk_size;
1586 ecc->layout = &ecc_layout_2KB_bch4bit;
1587 ecc->strength = 16;
3db227b6 1588
30b2afc8 1589 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1590 info->ecc_bch = 1;
c2cdace7
TP
1591 info->nfullchunks = 2;
1592 info->ntotalchunks = 2;
70ed8523
EG
1593 info->chunk_size = 2048;
1594 info->spare_size = 32;
1595 info->ecc_size = 32;
1596 ecc->mode = NAND_ECC_HW;
1597 ecc->size = info->chunk_size;
1598 ecc->layout = &ecc_layout_4KB_bch4bit;
1599 ecc->strength = 16;
70ed8523 1600
6033a949
BN
1601 /*
1602 * Required ECC: 8-bit correction per 512 bytes
1603 * Select: 16-bit correction per 1024 bytes
1604 */
1605 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523 1606 info->ecc_bch = 1;
c2cdace7
TP
1607 info->nfullchunks = 4;
1608 info->ntotalchunks = 5;
70ed8523
EG
1609 info->chunk_size = 1024;
1610 info->spare_size = 0;
c2cdace7
TP
1611 info->last_chunk_size = 0;
1612 info->last_spare_size = 64;
70ed8523
EG
1613 info->ecc_size = 32;
1614 ecc->mode = NAND_ECC_HW;
1615 ecc->size = info->chunk_size;
1616 ecc->layout = &ecc_layout_4KB_bch8bit;
1617 ecc->strength = 16;
eee0166d
EG
1618 } else {
1619 dev_err(&info->pdev->dev,
1620 "ECC strength %d at page size %d is not supported\n",
1621 strength, page_size);
1622 return -ENODEV;
70ed8523 1623 }
eee0166d
EG
1624
1625 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1626 ecc->strength, ecc->size);
43bcfd2b
EG
1627 return 0;
1628}
1629
401e67e2 1630static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1631{
4bd4ebcc 1632 struct nand_chip *chip = mtd_to_nand(mtd);
d699ed25 1633 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
d456882b 1634 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1635 struct platform_device *pdev = info->pdev;
453810b7 1636 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f19fe983 1637 int ret;
30b2afc8 1638 uint16_t ecc_strength, ecc_step;
401e67e2 1639
154f50fb
EG
1640 if (pdata->keep_config) {
1641 pxa3xx_nand_detect_config(info);
1642 } else {
1643 ret = pxa3xx_nand_config_ident(info);
1644 if (ret)
1645 return ret;
401e67e2
LW
1646 }
1647
48cf7efa 1648 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1649 chip->options |= NAND_BUSWIDTH_16;
1650
43bcfd2b
EG
1651 /* Device detection must be done with ECC disabled */
1652 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1653 nand_writel(info, NDECCCTRL, 0x0);
1654
f19fe983 1655 if (nand_scan_ident(mtd, 1, NULL))
4332c116 1656 return -ENODEV;
776f265e 1657
f19fe983
AT
1658 if (!pdata->keep_config) {
1659 ret = pxa3xx_nand_init(host);
1660 if (ret) {
1661 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1662 ret);
1663 return ret;
1664 }
1665 }
1666
776f265e
EG
1667 if (pdata->flash_bbt) {
1668 /*
1669 * We'll use a bad block table stored in-flash and don't
1670 * allow writing the bad block marker to the flash.
1671 */
1672 chip->bbt_options |= NAND_BBT_USE_FLASH |
1673 NAND_BBT_NO_OOB_BBM;
1674 chip->bbt_td = &bbt_main_descr;
1675 chip->bbt_md = &bbt_mirror_descr;
1676 }
1677
5cbbdc6a
EG
1678 /*
1679 * If the page size is bigger than the FIFO size, let's check
1680 * we are given the right variant and then switch to the extended
1681 * (aka splitted) command handling,
1682 */
1683 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1684 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1685 chip->cmdfunc = nand_cmdfunc_extended;
1686 } else {
1687 dev_err(&info->pdev->dev,
1688 "unsupported page size on this variant\n");
1689 return -ENODEV;
1690 }
1691 }
1692
5b3e5078
EG
1693 if (pdata->ecc_strength && pdata->ecc_step_size) {
1694 ecc_strength = pdata->ecc_strength;
1695 ecc_step = pdata->ecc_step_size;
1696 } else {
1697 ecc_strength = chip->ecc_strength_ds;
1698 ecc_step = chip->ecc_step_ds;
1699 }
30b2afc8
EG
1700
1701 /* Set default ECC strength requirements on non-ONFI devices */
1702 if (ecc_strength < 1 && ecc_step < 1) {
1703 ecc_strength = 1;
1704 ecc_step = 512;
1705 }
1706
1707 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1708 ecc_step, mtd->writesize);
eee0166d
EG
1709 if (ret)
1710 return ret;
43bcfd2b 1711
4332c116 1712 /* calculate addressing information */
d456882b
LW
1713 if (mtd->writesize >= 2048)
1714 host->col_addr_cycles = 2;
1715 else
1716 host->col_addr_cycles = 1;
1717
62e8b851
EG
1718 /* release the initial buffer */
1719 kfree(info->data_buff);
1720
1721 /* allocate the real data + oob buffer */
1722 info->buf_size = mtd->writesize + mtd->oobsize;
1723 ret = pxa3xx_nand_init_buff(info);
1724 if (ret)
1725 return ret;
4332c116 1726 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1727
4332c116 1728 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1729 host->row_addr_cycles = 3;
4332c116 1730 else
d456882b 1731 host->row_addr_cycles = 2;
66e8e47e
EG
1732
1733 if (!pdata->keep_config)
1734 pxa3xx_nand_config_tail(info);
1735
401e67e2 1736 return nand_scan_tail(mtd);
fe69af00 1737}
1738
d456882b 1739static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1740{
a61ae81a 1741 struct device_node *np = pdev->dev.of_node;
f3c8cfc2 1742 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1743 struct pxa3xx_nand_info *info;
d456882b 1744 struct pxa3xx_nand_host *host;
6e308f87 1745 struct nand_chip *chip = NULL;
fe69af00 1746 struct mtd_info *mtd;
1747 struct resource *r;
f3c8cfc2 1748 int ret, irq, cs;
fe69af00 1749
453810b7 1750 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1751 if (pdata->num_cs <= 0)
1752 return -ENODEV;
063294a3
BB
1753 info = devm_kzalloc(&pdev->dev,
1754 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1755 GFP_KERNEL);
4c073cd2 1756 if (!info)
d456882b 1757 return -ENOMEM;
fe69af00 1758
fe69af00 1759 info->pdev = pdev;
c7e9c7e7 1760 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1761 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3
BB
1762 host = (void *)&info[1] + sizeof(*host) * cs;
1763 chip = &host->chip;
d699ed25 1764 nand_set_controller_data(chip, host);
063294a3 1765 mtd = nand_to_mtd(chip);
f3c8cfc2 1766 info->host[cs] = host;
f3c8cfc2
LW
1767 host->cs = cs;
1768 host->info_data = info;
550dab5b 1769 mtd->dev.parent = &pdev->dev;
a61ae81a
BN
1770 /* FIXME: all chips use the same device tree partitions */
1771 nand_set_flash_node(chip, np);
f3c8cfc2 1772
d699ed25 1773 nand_set_controller_data(chip, host);
f3c8cfc2
LW
1774 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1775 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1776 chip->controller = &info->controller;
1777 chip->waitfunc = pxa3xx_nand_waitfunc;
1778 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1779 chip->read_word = pxa3xx_nand_read_word;
1780 chip->read_byte = pxa3xx_nand_read_byte;
1781 chip->read_buf = pxa3xx_nand_read_buf;
1782 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1783 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1784 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1785 }
401e67e2
LW
1786
1787 spin_lock_init(&chip->controller->lock);
1788 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1789 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1790 if (IS_ERR(info->clk)) {
1791 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1792 return PTR_ERR(info->clk);
fe69af00 1793 }
1f8eaff2
EG
1794 ret = clk_prepare_enable(info->clk);
1795 if (ret < 0)
1796 return ret;
fe69af00 1797
6b45c1ee 1798 if (use_dma) {
8f5ba31a
RJ
1799 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1800 if (r == NULL) {
1801 dev_err(&pdev->dev,
1802 "no resource defined for data DMA\n");
1803 ret = -ENXIO;
1804 goto fail_disable_clk;
1e7ba630 1805 }
8f5ba31a
RJ
1806 info->drcmr_dat = r->start;
1807
1808 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1809 if (r == NULL) {
1810 dev_err(&pdev->dev,
1811 "no resource defined for cmd DMA\n");
1812 ret = -ENXIO;
1813 goto fail_disable_clk;
1814 }
1815 info->drcmr_cmd = r->start;
fe69af00 1816 }
fe69af00 1817
1818 irq = platform_get_irq(pdev, 0);
1819 if (irq < 0) {
1820 dev_err(&pdev->dev, "no IRQ resource defined\n");
1821 ret = -ENXIO;
9ca7944d 1822 goto fail_disable_clk;
fe69af00 1823 }
1824
1825 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1826 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1827 if (IS_ERR(info->mmio_base)) {
1828 ret = PTR_ERR(info->mmio_base);
9ca7944d 1829 goto fail_disable_clk;
fe69af00 1830 }
8638fac8 1831 info->mmio_phys = r->start;
fe69af00 1832
62e8b851
EG
1833 /* Allocate a buffer to allow flash detection */
1834 info->buf_size = INIT_BUFFER_SIZE;
1835 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1836 if (info->data_buff == NULL) {
1837 ret = -ENOMEM;
9ca7944d 1838 goto fail_disable_clk;
62e8b851 1839 }
fe69af00 1840
346e1259
HZ
1841 /* initialize all interrupts to be disabled */
1842 disable_int(info, NDSR_MASK);
1843
24542257
RJ
1844 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1845 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1846 pdev->name, info);
fe69af00 1847 if (ret < 0) {
1848 dev_err(&pdev->dev, "failed to request IRQ\n");
1849 goto fail_free_buf;
1850 }
1851
e353a20a 1852 platform_set_drvdata(pdev, info);
fe69af00 1853
d456882b 1854 return 0;
fe69af00 1855
fe69af00 1856fail_free_buf:
401e67e2 1857 free_irq(irq, info);
62e8b851 1858 kfree(info->data_buff);
9ca7944d 1859fail_disable_clk:
fb32061f 1860 clk_disable_unprepare(info->clk);
d456882b 1861 return ret;
fe69af00 1862}
1863
1864static int pxa3xx_nand_remove(struct platform_device *pdev)
1865{
e353a20a 1866 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1867 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1868 int irq, cs;
fe69af00 1869
d456882b
LW
1870 if (!info)
1871 return 0;
1872
453810b7 1873 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1874
dbf5986a
HZ
1875 irq = platform_get_irq(pdev, 0);
1876 if (irq >= 0)
1877 free_irq(irq, info);
498b6145 1878 pxa3xx_nand_free_buff(info);
82a72d10 1879
e971affa
RJ
1880 /*
1881 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1882 * In order to prevent a lockup of the system bus, the DFI bus
1883 * arbitration is granted to SMC upon driver removal. This is done by
1884 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1885 * access to the bus anymore.
1886 */
1887 nand_writel(info, NDCR,
1888 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1889 NFCV1_NDCR_ARB_CNTL);
fb32061f 1890 clk_disable_unprepare(info->clk);
82a72d10 1891
f3c8cfc2 1892 for (cs = 0; cs < pdata->num_cs; cs++)
063294a3 1893 nand_release(nand_to_mtd(&info->host[cs]->chip));
fe69af00 1894 return 0;
1895}
1896
1e7ba630
DM
1897static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1898{
1899 struct pxa3xx_nand_platform_data *pdata;
1900 struct device_node *np = pdev->dev.of_node;
1901 const struct of_device_id *of_id =
1902 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1903
1904 if (!of_id)
1905 return 0;
1906
1907 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1908 if (!pdata)
1909 return -ENOMEM;
1910
1911 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1912 pdata->enable_arbiter = 1;
1913 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1914 pdata->keep_config = 1;
1915 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1916 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1917
5b3e5078
EG
1918 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1919 if (pdata->ecc_strength < 0)
1920 pdata->ecc_strength = 0;
1921
1922 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1923 if (pdata->ecc_step_size < 0)
1924 pdata->ecc_step_size = 0;
1925
1e7ba630
DM
1926 pdev->dev.platform_data = pdata;
1927
1928 return 0;
1929}
1e7ba630 1930
e353a20a
LW
1931static int pxa3xx_nand_probe(struct platform_device *pdev)
1932{
1933 struct pxa3xx_nand_platform_data *pdata;
1934 struct pxa3xx_nand_info *info;
8f5ba31a 1935 int ret, cs, probe_success, dma_available;
e353a20a 1936
8f5ba31a
RJ
1937 dma_available = IS_ENABLED(CONFIG_ARM) &&
1938 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1939 if (use_dma && !dma_available) {
f4db2e3a
EG
1940 use_dma = 0;
1941 dev_warn(&pdev->dev,
1942 "This platform can't do DMA on this device\n");
1943 }
8f5ba31a 1944
1e7ba630
DM
1945 ret = pxa3xx_nand_probe_dt(pdev);
1946 if (ret)
1947 return ret;
1948
453810b7 1949 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1950 if (!pdata) {
1951 dev_err(&pdev->dev, "no platform data defined\n");
1952 return -ENODEV;
1953 }
1954
d456882b
LW
1955 ret = alloc_nand_resource(pdev);
1956 if (ret) {
1957 dev_err(&pdev->dev, "alloc nand resource failed\n");
1958 return ret;
1959 }
e353a20a 1960
d456882b 1961 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1962 probe_success = 0;
1963 for (cs = 0; cs < pdata->num_cs; cs++) {
063294a3 1964 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
f455578d 1965
18a84e93
EG
1966 /*
1967 * The mtd name matches the one used in 'mtdparts' kernel
1968 * parameter. This name cannot be changed or otherwise
1969 * user's mtd partitions configuration would get broken.
1970 */
1971 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1972 info->cs = cs;
b7655bcb 1973 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1974 if (ret) {
1975 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1976 cs);
1977 continue;
1978 }
1979
a61ae81a
BN
1980 ret = mtd_device_register(mtd, pdata->parts[cs],
1981 pdata->nr_parts[cs]);
f3c8cfc2
LW
1982 if (!ret)
1983 probe_success = 1;
1984 }
1985
1986 if (!probe_success) {
e353a20a
LW
1987 pxa3xx_nand_remove(pdev);
1988 return -ENODEV;
1989 }
1990
f3c8cfc2 1991 return 0;
e353a20a
LW
1992}
1993
fe69af00 1994#ifdef CONFIG_PM
d3e94f3f 1995static int pxa3xx_nand_suspend(struct device *dev)
fe69af00 1996{
d3e94f3f 1997 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
fe69af00 1998
f8155a40 1999 if (info->state) {
d3e94f3f 2000 dev_err(dev, "driver busy, state = %d\n", info->state);
fe69af00 2001 return -EAGAIN;
2002 }
2003
d55d31a6 2004 clk_disable(info->clk);
fe69af00 2005 return 0;
2006}
2007
d3e94f3f 2008static int pxa3xx_nand_resume(struct device *dev)
fe69af00 2009{
d3e94f3f 2010 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
d55d31a6
EG
2011 int ret;
2012
2013 ret = clk_enable(info->clk);
2014 if (ret < 0)
2015 return ret;
051fc41c
LW
2016
2017 /* We don't want to handle interrupt without calling mtd routine */
2018 disable_int(info, NDCR_INT_MASK);
fe69af00 2019
f3c8cfc2
LW
2020 /*
2021 * Directly set the chip select to a invalid value,
2022 * then the driver would reset the timing according
2023 * to current chip select at the beginning of cmdfunc
2024 */
2025 info->cs = 0xff;
fe69af00 2026
051fc41c
LW
2027 /*
2028 * As the spec says, the NDSR would be updated to 0x1800 when
2029 * doing the nand_clk disable/enable.
2030 * To prevent it damaging state machine of the driver, clear
2031 * all status before resume
2032 */
2033 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2 2034
18c81b18 2035 return 0;
fe69af00 2036}
2037#else
2038#define pxa3xx_nand_suspend NULL
2039#define pxa3xx_nand_resume NULL
2040#endif
2041
d3e94f3f
BN
2042static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2043 .suspend = pxa3xx_nand_suspend,
2044 .resume = pxa3xx_nand_resume,
2045};
2046
fe69af00 2047static struct platform_driver pxa3xx_nand_driver = {
2048 .driver = {
2049 .name = "pxa3xx-nand",
5576bc7b 2050 .of_match_table = pxa3xx_nand_dt_ids,
d3e94f3f 2051 .pm = &pxa3xx_nand_pm_ops,
fe69af00 2052 },
2053 .probe = pxa3xx_nand_probe,
2054 .remove = pxa3xx_nand_remove,
fe69af00 2055};
2056
f99640de 2057module_platform_driver(pxa3xx_nand_driver);
fe69af00 2058
2059MODULE_LICENSE("GPL");
2060MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.527007 seconds and 5 git commands to generate.