mtd: nand: pass page number to ecc->write_xxx() methods
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
35 #define ARCH_HAS_DMA
36 #endif
37
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
39
40 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE (2048)
43
44 /*
45 * Define a buffer size for the initial command that detects the flash device:
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
51 */
52 #define INIT_BUFFER_SIZE 2048
53
54 /* registers and bit definitions */
55 #define NDCR (0x00) /* Control register */
56 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR (0x14) /* Status Register */
59 #define NDPCR (0x18) /* Page Count Register */
60 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1 (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL (0x28) /* ECC control */
63 #define NDDB (0x40) /* Data Buffer */
64 #define NDCB0 (0x48) /* Command Buffer0 */
65 #define NDCB1 (0x4C) /* Command Buffer1 */
66 #define NDCB2 (0x50) /* Command Buffer2 */
67
68 #define NDCR_SPARE_EN (0x1 << 31)
69 #define NDCR_ECC_EN (0x1 << 30)
70 #define NDCR_DMA_EN (0x1 << 29)
71 #define NDCR_ND_RUN (0x1 << 28)
72 #define NDCR_DWIDTH_C (0x1 << 27)
73 #define NDCR_DWIDTH_M (0x1 << 26)
74 #define NDCR_PAGE_SZ (0x1 << 24)
75 #define NDCR_NCSX (0x1 << 23)
76 #define NDCR_ND_MODE (0x3 << 21)
77 #define NDCR_NAND_MODE (0x0)
78 #define NDCR_CLR_PG_CNT (0x1 << 20)
79 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
80 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
81 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
82 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83
84 #define NDCR_RA_START (0x1 << 15)
85 #define NDCR_PG_PER_BLK (0x1 << 14)
86 #define NDCR_ND_ARB_EN (0x1 << 12)
87 #define NDCR_INT_MASK (0xFFF)
88
89 #define NDSR_MASK (0xfff)
90 #define NDSR_ERR_CNT_OFF (16)
91 #define NDSR_ERR_CNT_MASK (0x1f)
92 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
93 #define NDSR_RDY (0x1 << 12)
94 #define NDSR_FLASH_RDY (0x1 << 11)
95 #define NDSR_CS0_PAGED (0x1 << 10)
96 #define NDSR_CS1_PAGED (0x1 << 9)
97 #define NDSR_CS0_CMDD (0x1 << 8)
98 #define NDSR_CS1_CMDD (0x1 << 7)
99 #define NDSR_CS0_BBD (0x1 << 6)
100 #define NDSR_CS1_BBD (0x1 << 5)
101 #define NDSR_UNCORERR (0x1 << 4)
102 #define NDSR_CORERR (0x1 << 3)
103 #define NDSR_WRDREQ (0x1 << 2)
104 #define NDSR_RDDREQ (0x1 << 1)
105 #define NDSR_WRCMDREQ (0x1)
106
107 #define NDCB0_LEN_OVRD (0x1 << 28)
108 #define NDCB0_ST_ROW_EN (0x1 << 26)
109 #define NDCB0_AUTO_RS (0x1 << 25)
110 #define NDCB0_CSEL (0x1 << 24)
111 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
112 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
113 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
114 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
115 #define NDCB0_NC (0x1 << 20)
116 #define NDCB0_DBC (0x1 << 19)
117 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
118 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
119 #define NDCB0_CMD2_MASK (0xff << 8)
120 #define NDCB0_CMD1_MASK (0xff)
121 #define NDCB0_ADDR_CYC_SHIFT (16)
122
123 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
124 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
125 #define EXT_CMD_TYPE_READ 4 /* Read */
126 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
127 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
128 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
129 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
130
131 /*
132 * This should be large enough to read 'ONFI' and 'JEDEC'.
133 * Let's use 7 bytes, which is the maximum ID count supported
134 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 */
136 #define READ_ID_BYTES 7
137
138 /* macros for registers read/write */
139 #define nand_writel(info, off, val) \
140 writel_relaxed((val), (info)->mmio_base + (off))
141
142 #define nand_readl(info, off) \
143 readl_relaxed((info)->mmio_base + (off))
144
145 /* error code and state */
146 enum {
147 ERR_NONE = 0,
148 ERR_DMABUSERR = -1,
149 ERR_SENDCMD = -2,
150 ERR_UNCORERR = -3,
151 ERR_BBERR = -4,
152 ERR_CORERR = -5,
153 };
154
155 enum {
156 STATE_IDLE = 0,
157 STATE_PREPARED,
158 STATE_CMD_HANDLE,
159 STATE_DMA_READING,
160 STATE_DMA_WRITING,
161 STATE_DMA_DONE,
162 STATE_PIO_READING,
163 STATE_PIO_WRITING,
164 STATE_CMD_DONE,
165 STATE_READY,
166 };
167
168 enum pxa3xx_nand_variant {
169 PXA3XX_NAND_VARIANT_PXA,
170 PXA3XX_NAND_VARIANT_ARMADA370,
171 };
172
173 struct pxa3xx_nand_host {
174 struct nand_chip chip;
175 struct mtd_info *mtd;
176 void *info_data;
177
178 /* page size of attached chip */
179 int use_ecc;
180 int cs;
181
182 /* calculated from pxa3xx_nand_flash data */
183 unsigned int col_addr_cycles;
184 unsigned int row_addr_cycles;
185 };
186
187 struct pxa3xx_nand_info {
188 struct nand_hw_control controller;
189 struct platform_device *pdev;
190
191 struct clk *clk;
192 void __iomem *mmio_base;
193 unsigned long mmio_phys;
194 struct completion cmd_complete, dev_ready;
195
196 unsigned int buf_start;
197 unsigned int buf_count;
198 unsigned int buf_size;
199 unsigned int data_buff_pos;
200 unsigned int oob_buff_pos;
201
202 /* DMA information */
203 struct scatterlist sg;
204 enum dma_data_direction dma_dir;
205 struct dma_chan *dma_chan;
206 dma_cookie_t dma_cookie;
207 int drcmr_dat;
208 int drcmr_cmd;
209
210 unsigned char *data_buff;
211 unsigned char *oob_buff;
212 dma_addr_t data_buff_phys;
213 int data_dma_ch;
214
215 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
216 unsigned int state;
217
218 /*
219 * This driver supports NFCv1 (as found in PXA SoC)
220 * and NFCv2 (as found in Armada 370/XP SoC).
221 */
222 enum pxa3xx_nand_variant variant;
223
224 int cs;
225 int use_ecc; /* use HW ECC ? */
226 int ecc_bch; /* using BCH ECC? */
227 int use_dma; /* use DMA ? */
228 int use_spare; /* use spare ? */
229 int need_wait;
230
231 unsigned int data_size; /* data to be read from FIFO */
232 unsigned int chunk_size; /* split commands chunk size */
233 unsigned int oob_size;
234 unsigned int spare_size;
235 unsigned int ecc_size;
236 unsigned int ecc_err_cnt;
237 unsigned int max_bitflips;
238 int retcode;
239
240 /* cached register value */
241 uint32_t reg_ndcr;
242 uint32_t ndtr0cs0;
243 uint32_t ndtr1cs0;
244
245 /* generated NDCBx register values */
246 uint32_t ndcb0;
247 uint32_t ndcb1;
248 uint32_t ndcb2;
249 uint32_t ndcb3;
250 };
251
252 static bool use_dma = 1;
253 module_param(use_dma, bool, 0444);
254 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255
256 struct pxa3xx_nand_timing {
257 unsigned int tCH; /* Enable signal hold time */
258 unsigned int tCS; /* Enable signal setup time */
259 unsigned int tWH; /* ND_nWE high duration */
260 unsigned int tWP; /* ND_nWE pulse time */
261 unsigned int tRH; /* ND_nRE high duration */
262 unsigned int tRP; /* ND_nRE pulse width */
263 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
264 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
265 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
266 };
267
268 struct pxa3xx_nand_flash {
269 char *name;
270 uint32_t chip_id;
271 unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
272 unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
273 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
274 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
275 unsigned int num_blocks; /* Number of physical blocks in Flash */
276
277 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
278 };
279
280 static struct pxa3xx_nand_timing timing[] = {
281 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
282 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
283 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
284 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
285 };
286
287 static struct pxa3xx_nand_flash builtin_flash_types[] = {
288 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
289 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
290 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
291 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
292 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
293 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
294 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
295 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
296 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
297 };
298
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302 static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305 .offs = 8,
306 .len = 6,
307 .veroffs = 14,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
310 };
311
312 static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
320 };
321
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323 .eccbytes = 32,
324 .eccpos = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
330 };
331
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
333 .eccbytes = 64,
334 .eccpos = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree = { {6, 26}, { 64, 32} }
345 };
346
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
348 .eccbytes = 128,
349 .eccpos = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
354 .oobfree = { }
355 };
356
357 /* Define a default flash type setting serve as flash detecting only */
358 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359
360 #define NDTR0_tCH(c) (min((c), 7) << 19)
361 #define NDTR0_tCS(c) (min((c), 7) << 16)
362 #define NDTR0_tWH(c) (min((c), 7) << 11)
363 #define NDTR0_tWP(c) (min((c), 7) << 8)
364 #define NDTR0_tRH(c) (min((c), 7) << 3)
365 #define NDTR0_tRP(c) (min((c), 7) << 0)
366
367 #define NDTR1_tR(c) (min((c), 65535) << 16)
368 #define NDTR1_tWHR(c) (min((c), 15) << 4)
369 #define NDTR1_tAR(c) (min((c), 15) << 0)
370
371 /* convert nano-seconds to nand flash controller clock cycles */
372 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
373
374 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
375 {
376 .compatible = "marvell,pxa3xx-nand",
377 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
378 },
379 {
380 .compatible = "marvell,armada370-nand",
381 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
382 },
383 {}
384 };
385 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386
387 static enum pxa3xx_nand_variant
388 pxa3xx_nand_get_variant(struct platform_device *pdev)
389 {
390 const struct of_device_id *of_id =
391 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392 if (!of_id)
393 return PXA3XX_NAND_VARIANT_PXA;
394 return (enum pxa3xx_nand_variant)of_id->data;
395 }
396
397 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
398 const struct pxa3xx_nand_timing *t)
399 {
400 struct pxa3xx_nand_info *info = host->info_data;
401 unsigned long nand_clk = clk_get_rate(info->clk);
402 uint32_t ndtr0, ndtr1;
403
404 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
405 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
406 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
407 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
408 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
409 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410
411 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
412 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
413 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414
415 info->ndtr0cs0 = ndtr0;
416 info->ndtr1cs0 = ndtr1;
417 nand_writel(info, NDTR0CS0, ndtr0);
418 nand_writel(info, NDTR1CS0, ndtr1);
419 }
420
421 /*
422 * Set the data and OOB size, depending on the selected
423 * spare and ECC configuration.
424 * Only applicable to READ0, READOOB and PAGEPROG commands.
425 */
426 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
427 struct mtd_info *mtd)
428 {
429 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
430
431 info->data_size = mtd->writesize;
432 if (!oob_enable)
433 return;
434
435 info->oob_size = info->spare_size;
436 if (!info->use_ecc)
437 info->oob_size += info->ecc_size;
438 }
439
440 /**
441 * NOTE: it is a must to set ND_RUN firstly, then write
442 * command buffer, otherwise, it does not work.
443 * We enable all the interrupt at the same time, and
444 * let pxa3xx_nand_irq to handle all logic.
445 */
446 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
447 {
448 uint32_t ndcr;
449
450 ndcr = info->reg_ndcr;
451
452 if (info->use_ecc) {
453 ndcr |= NDCR_ECC_EN;
454 if (info->ecc_bch)
455 nand_writel(info, NDECCCTRL, 0x1);
456 } else {
457 ndcr &= ~NDCR_ECC_EN;
458 if (info->ecc_bch)
459 nand_writel(info, NDECCCTRL, 0x0);
460 }
461
462 if (info->use_dma)
463 ndcr |= NDCR_DMA_EN;
464 else
465 ndcr &= ~NDCR_DMA_EN;
466
467 if (info->use_spare)
468 ndcr |= NDCR_SPARE_EN;
469 else
470 ndcr &= ~NDCR_SPARE_EN;
471
472 ndcr |= NDCR_ND_RUN;
473
474 /* clear status bits and run */
475 nand_writel(info, NDSR, NDSR_MASK);
476 nand_writel(info, NDCR, 0);
477 nand_writel(info, NDCR, ndcr);
478 }
479
480 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
481 {
482 uint32_t ndcr;
483 int timeout = NAND_STOP_DELAY;
484
485 /* wait RUN bit in NDCR become 0 */
486 ndcr = nand_readl(info, NDCR);
487 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
488 ndcr = nand_readl(info, NDCR);
489 udelay(1);
490 }
491
492 if (timeout <= 0) {
493 ndcr &= ~NDCR_ND_RUN;
494 nand_writel(info, NDCR, ndcr);
495 }
496 if (info->dma_chan)
497 dmaengine_terminate_all(info->dma_chan);
498
499 /* clear status bits */
500 nand_writel(info, NDSR, NDSR_MASK);
501 }
502
503 static void __maybe_unused
504 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
505 {
506 uint32_t ndcr;
507
508 ndcr = nand_readl(info, NDCR);
509 nand_writel(info, NDCR, ndcr & ~int_mask);
510 }
511
512 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
513 {
514 uint32_t ndcr;
515
516 ndcr = nand_readl(info, NDCR);
517 nand_writel(info, NDCR, ndcr | int_mask);
518 }
519
520 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
521 {
522 if (info->ecc_bch) {
523 u32 val;
524 int ret;
525
526 /*
527 * According to the datasheet, when reading from NDDB
528 * with BCH enabled, after each 32 bytes reads, we
529 * have to make sure that the NDSR.RDDREQ bit is set.
530 *
531 * Drain the FIFO 8 32 bits reads at a time, and skip
532 * the polling on the last read.
533 */
534 while (len > 8) {
535 readsl(info->mmio_base + NDDB, data, 8);
536
537 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
538 val & NDSR_RDDREQ, 1000, 5000);
539 if (ret) {
540 dev_err(&info->pdev->dev,
541 "Timeout on RDDREQ while draining the FIFO\n");
542 return;
543 }
544
545 data += 32;
546 len -= 8;
547 }
548 }
549
550 readsl(info->mmio_base + NDDB, data, len);
551 }
552
553 static void handle_data_pio(struct pxa3xx_nand_info *info)
554 {
555 unsigned int do_bytes = min(info->data_size, info->chunk_size);
556
557 switch (info->state) {
558 case STATE_PIO_WRITING:
559 writesl(info->mmio_base + NDDB,
560 info->data_buff + info->data_buff_pos,
561 DIV_ROUND_UP(do_bytes, 4));
562
563 if (info->oob_size > 0)
564 writesl(info->mmio_base + NDDB,
565 info->oob_buff + info->oob_buff_pos,
566 DIV_ROUND_UP(info->oob_size, 4));
567 break;
568 case STATE_PIO_READING:
569 drain_fifo(info,
570 info->data_buff + info->data_buff_pos,
571 DIV_ROUND_UP(do_bytes, 4));
572
573 if (info->oob_size > 0)
574 drain_fifo(info,
575 info->oob_buff + info->oob_buff_pos,
576 DIV_ROUND_UP(info->oob_size, 4));
577 break;
578 default:
579 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
580 info->state);
581 BUG();
582 }
583
584 /* Update buffer pointers for multi-page read/write */
585 info->data_buff_pos += do_bytes;
586 info->oob_buff_pos += info->oob_size;
587 info->data_size -= do_bytes;
588 }
589
590 static void pxa3xx_nand_data_dma_irq(void *data)
591 {
592 struct pxa3xx_nand_info *info = data;
593 struct dma_tx_state state;
594 enum dma_status status;
595
596 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
597 if (likely(status == DMA_COMPLETE)) {
598 info->state = STATE_DMA_DONE;
599 } else {
600 dev_err(&info->pdev->dev, "DMA error on data channel\n");
601 info->retcode = ERR_DMABUSERR;
602 }
603 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
604
605 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
606 enable_int(info, NDCR_INT_MASK);
607 }
608
609 static void start_data_dma(struct pxa3xx_nand_info *info)
610 {
611 enum dma_transfer_direction direction;
612 struct dma_async_tx_descriptor *tx;
613
614 switch (info->state) {
615 case STATE_DMA_WRITING:
616 info->dma_dir = DMA_TO_DEVICE;
617 direction = DMA_MEM_TO_DEV;
618 break;
619 case STATE_DMA_READING:
620 info->dma_dir = DMA_FROM_DEVICE;
621 direction = DMA_DEV_TO_MEM;
622 break;
623 default:
624 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
625 info->state);
626 BUG();
627 }
628 info->sg.length = info->data_size +
629 (info->oob_size ? info->spare_size + info->ecc_size : 0);
630 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
631
632 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
633 DMA_PREP_INTERRUPT);
634 if (!tx) {
635 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
636 return;
637 }
638 tx->callback = pxa3xx_nand_data_dma_irq;
639 tx->callback_param = info;
640 info->dma_cookie = dmaengine_submit(tx);
641 dma_async_issue_pending(info->dma_chan);
642 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
643 __func__, direction, info->dma_cookie, info->sg.length);
644 }
645
646 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
647 {
648 struct pxa3xx_nand_info *info = data;
649
650 handle_data_pio(info);
651
652 info->state = STATE_CMD_DONE;
653 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
654
655 return IRQ_HANDLED;
656 }
657
658 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
659 {
660 struct pxa3xx_nand_info *info = devid;
661 unsigned int status, is_completed = 0, is_ready = 0;
662 unsigned int ready, cmd_done;
663 irqreturn_t ret = IRQ_HANDLED;
664
665 if (info->cs == 0) {
666 ready = NDSR_FLASH_RDY;
667 cmd_done = NDSR_CS0_CMDD;
668 } else {
669 ready = NDSR_RDY;
670 cmd_done = NDSR_CS1_CMDD;
671 }
672
673 status = nand_readl(info, NDSR);
674
675 if (status & NDSR_UNCORERR)
676 info->retcode = ERR_UNCORERR;
677 if (status & NDSR_CORERR) {
678 info->retcode = ERR_CORERR;
679 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
680 info->ecc_bch)
681 info->ecc_err_cnt = NDSR_ERR_CNT(status);
682 else
683 info->ecc_err_cnt = 1;
684
685 /*
686 * Each chunk composing a page is corrected independently,
687 * and we need to store maximum number of corrected bitflips
688 * to return it to the MTD layer in ecc.read_page().
689 */
690 info->max_bitflips = max_t(unsigned int,
691 info->max_bitflips,
692 info->ecc_err_cnt);
693 }
694 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
695 /* whether use dma to transfer data */
696 if (info->use_dma) {
697 disable_int(info, NDCR_INT_MASK);
698 info->state = (status & NDSR_RDDREQ) ?
699 STATE_DMA_READING : STATE_DMA_WRITING;
700 start_data_dma(info);
701 goto NORMAL_IRQ_EXIT;
702 } else {
703 info->state = (status & NDSR_RDDREQ) ?
704 STATE_PIO_READING : STATE_PIO_WRITING;
705 ret = IRQ_WAKE_THREAD;
706 goto NORMAL_IRQ_EXIT;
707 }
708 }
709 if (status & cmd_done) {
710 info->state = STATE_CMD_DONE;
711 is_completed = 1;
712 }
713 if (status & ready) {
714 info->state = STATE_READY;
715 is_ready = 1;
716 }
717
718 /*
719 * Clear all status bit before issuing the next command, which
720 * can and will alter the status bits and will deserve a new
721 * interrupt on its own. This lets the controller exit the IRQ
722 */
723 nand_writel(info, NDSR, status);
724
725 if (status & NDSR_WRCMDREQ) {
726 status &= ~NDSR_WRCMDREQ;
727 info->state = STATE_CMD_HANDLE;
728
729 /*
730 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
731 * must be loaded by writing directly either 12 or 16
732 * bytes directly to NDCB0, four bytes at a time.
733 *
734 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
735 * but each NDCBx register can be read.
736 */
737 nand_writel(info, NDCB0, info->ndcb0);
738 nand_writel(info, NDCB0, info->ndcb1);
739 nand_writel(info, NDCB0, info->ndcb2);
740
741 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
742 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
743 nand_writel(info, NDCB0, info->ndcb3);
744 }
745
746 if (is_completed)
747 complete(&info->cmd_complete);
748 if (is_ready)
749 complete(&info->dev_ready);
750 NORMAL_IRQ_EXIT:
751 return ret;
752 }
753
754 static inline int is_buf_blank(uint8_t *buf, size_t len)
755 {
756 for (; len > 0; len--)
757 if (*buf++ != 0xff)
758 return 0;
759 return 1;
760 }
761
762 static void set_command_address(struct pxa3xx_nand_info *info,
763 unsigned int page_size, uint16_t column, int page_addr)
764 {
765 /* small page addr setting */
766 if (page_size < PAGE_CHUNK_SIZE) {
767 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
768 | (column & 0xFF);
769
770 info->ndcb2 = 0;
771 } else {
772 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
773 | (column & 0xFFFF);
774
775 if (page_addr & 0xFF0000)
776 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
777 else
778 info->ndcb2 = 0;
779 }
780 }
781
782 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
783 {
784 struct pxa3xx_nand_host *host = info->host[info->cs];
785 struct mtd_info *mtd = host->mtd;
786
787 /* reset data and oob column point to handle data */
788 info->buf_start = 0;
789 info->buf_count = 0;
790 info->oob_size = 0;
791 info->data_buff_pos = 0;
792 info->oob_buff_pos = 0;
793 info->use_ecc = 0;
794 info->use_spare = 1;
795 info->retcode = ERR_NONE;
796 info->ecc_err_cnt = 0;
797 info->ndcb3 = 0;
798 info->need_wait = 0;
799
800 switch (command) {
801 case NAND_CMD_READ0:
802 case NAND_CMD_PAGEPROG:
803 info->use_ecc = 1;
804 case NAND_CMD_READOOB:
805 pxa3xx_set_datasize(info, mtd);
806 break;
807 case NAND_CMD_PARAM:
808 info->use_spare = 0;
809 break;
810 default:
811 info->ndcb1 = 0;
812 info->ndcb2 = 0;
813 break;
814 }
815
816 /*
817 * If we are about to issue a read command, or about to set
818 * the write address, then clean the data buffer.
819 */
820 if (command == NAND_CMD_READ0 ||
821 command == NAND_CMD_READOOB ||
822 command == NAND_CMD_SEQIN) {
823
824 info->buf_count = mtd->writesize + mtd->oobsize;
825 memset(info->data_buff, 0xFF, info->buf_count);
826 }
827
828 }
829
830 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
831 int ext_cmd_type, uint16_t column, int page_addr)
832 {
833 int addr_cycle, exec_cmd;
834 struct pxa3xx_nand_host *host;
835 struct mtd_info *mtd;
836
837 host = info->host[info->cs];
838 mtd = host->mtd;
839 addr_cycle = 0;
840 exec_cmd = 1;
841
842 if (info->cs != 0)
843 info->ndcb0 = NDCB0_CSEL;
844 else
845 info->ndcb0 = 0;
846
847 if (command == NAND_CMD_SEQIN)
848 exec_cmd = 0;
849
850 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
851 + host->col_addr_cycles);
852
853 switch (command) {
854 case NAND_CMD_READOOB:
855 case NAND_CMD_READ0:
856 info->buf_start = column;
857 info->ndcb0 |= NDCB0_CMD_TYPE(0)
858 | addr_cycle
859 | NAND_CMD_READ0;
860
861 if (command == NAND_CMD_READOOB)
862 info->buf_start += mtd->writesize;
863
864 /*
865 * Multiple page read needs an 'extended command type' field,
866 * which is either naked-read or last-read according to the
867 * state.
868 */
869 if (mtd->writesize == PAGE_CHUNK_SIZE) {
870 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
871 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
872 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
873 | NDCB0_LEN_OVRD
874 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
875 info->ndcb3 = info->chunk_size +
876 info->oob_size;
877 }
878
879 set_command_address(info, mtd->writesize, column, page_addr);
880 break;
881
882 case NAND_CMD_SEQIN:
883
884 info->buf_start = column;
885 set_command_address(info, mtd->writesize, 0, page_addr);
886
887 /*
888 * Multiple page programming needs to execute the initial
889 * SEQIN command that sets the page address.
890 */
891 if (mtd->writesize > PAGE_CHUNK_SIZE) {
892 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
893 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
894 | addr_cycle
895 | command;
896 /* No data transfer in this case */
897 info->data_size = 0;
898 exec_cmd = 1;
899 }
900 break;
901
902 case NAND_CMD_PAGEPROG:
903 if (is_buf_blank(info->data_buff,
904 (mtd->writesize + mtd->oobsize))) {
905 exec_cmd = 0;
906 break;
907 }
908
909 /* Second command setting for large pages */
910 if (mtd->writesize > PAGE_CHUNK_SIZE) {
911 /*
912 * Multiple page write uses the 'extended command'
913 * field. This can be used to issue a command dispatch
914 * or a naked-write depending on the current stage.
915 */
916 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917 | NDCB0_LEN_OVRD
918 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
919 info->ndcb3 = info->chunk_size +
920 info->oob_size;
921
922 /*
923 * This is the command dispatch that completes a chunked
924 * page program operation.
925 */
926 if (info->data_size == 0) {
927 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
928 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
929 | command;
930 info->ndcb1 = 0;
931 info->ndcb2 = 0;
932 info->ndcb3 = 0;
933 }
934 } else {
935 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
936 | NDCB0_AUTO_RS
937 | NDCB0_ST_ROW_EN
938 | NDCB0_DBC
939 | (NAND_CMD_PAGEPROG << 8)
940 | NAND_CMD_SEQIN
941 | addr_cycle;
942 }
943 break;
944
945 case NAND_CMD_PARAM:
946 info->buf_count = INIT_BUFFER_SIZE;
947 info->ndcb0 |= NDCB0_CMD_TYPE(0)
948 | NDCB0_ADDR_CYC(1)
949 | NDCB0_LEN_OVRD
950 | command;
951 info->ndcb1 = (column & 0xFF);
952 info->ndcb3 = INIT_BUFFER_SIZE;
953 info->data_size = INIT_BUFFER_SIZE;
954 break;
955
956 case NAND_CMD_READID:
957 info->buf_count = READ_ID_BYTES;
958 info->ndcb0 |= NDCB0_CMD_TYPE(3)
959 | NDCB0_ADDR_CYC(1)
960 | command;
961 info->ndcb1 = (column & 0xFF);
962
963 info->data_size = 8;
964 break;
965 case NAND_CMD_STATUS:
966 info->buf_count = 1;
967 info->ndcb0 |= NDCB0_CMD_TYPE(4)
968 | NDCB0_ADDR_CYC(1)
969 | command;
970
971 info->data_size = 8;
972 break;
973
974 case NAND_CMD_ERASE1:
975 info->ndcb0 |= NDCB0_CMD_TYPE(2)
976 | NDCB0_AUTO_RS
977 | NDCB0_ADDR_CYC(3)
978 | NDCB0_DBC
979 | (NAND_CMD_ERASE2 << 8)
980 | NAND_CMD_ERASE1;
981 info->ndcb1 = page_addr;
982 info->ndcb2 = 0;
983
984 break;
985 case NAND_CMD_RESET:
986 info->ndcb0 |= NDCB0_CMD_TYPE(5)
987 | command;
988
989 break;
990
991 case NAND_CMD_ERASE2:
992 exec_cmd = 0;
993 break;
994
995 default:
996 exec_cmd = 0;
997 dev_err(&info->pdev->dev, "non-supported command %x\n",
998 command);
999 break;
1000 }
1001
1002 return exec_cmd;
1003 }
1004
1005 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1006 int column, int page_addr)
1007 {
1008 struct pxa3xx_nand_host *host = mtd->priv;
1009 struct pxa3xx_nand_info *info = host->info_data;
1010 int exec_cmd;
1011
1012 /*
1013 * if this is a x16 device ,then convert the input
1014 * "byte" address into a "word" address appropriate
1015 * for indexing a word-oriented device
1016 */
1017 if (info->reg_ndcr & NDCR_DWIDTH_M)
1018 column /= 2;
1019
1020 /*
1021 * There may be different NAND chip hooked to
1022 * different chip select, so check whether
1023 * chip select has been changed, if yes, reset the timing
1024 */
1025 if (info->cs != host->cs) {
1026 info->cs = host->cs;
1027 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1028 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1029 }
1030
1031 prepare_start_command(info, command);
1032
1033 info->state = STATE_PREPARED;
1034 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1035
1036 if (exec_cmd) {
1037 init_completion(&info->cmd_complete);
1038 init_completion(&info->dev_ready);
1039 info->need_wait = 1;
1040 pxa3xx_nand_start(info);
1041
1042 if (!wait_for_completion_timeout(&info->cmd_complete,
1043 CHIP_DELAY_TIMEOUT)) {
1044 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1045 /* Stop State Machine for next command cycle */
1046 pxa3xx_nand_stop(info);
1047 }
1048 }
1049 info->state = STATE_IDLE;
1050 }
1051
1052 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1053 const unsigned command,
1054 int column, int page_addr)
1055 {
1056 struct pxa3xx_nand_host *host = mtd->priv;
1057 struct pxa3xx_nand_info *info = host->info_data;
1058 int exec_cmd, ext_cmd_type;
1059
1060 /*
1061 * if this is a x16 device then convert the input
1062 * "byte" address into a "word" address appropriate
1063 * for indexing a word-oriented device
1064 */
1065 if (info->reg_ndcr & NDCR_DWIDTH_M)
1066 column /= 2;
1067
1068 /*
1069 * There may be different NAND chip hooked to
1070 * different chip select, so check whether
1071 * chip select has been changed, if yes, reset the timing
1072 */
1073 if (info->cs != host->cs) {
1074 info->cs = host->cs;
1075 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1076 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1077 }
1078
1079 /* Select the extended command for the first command */
1080 switch (command) {
1081 case NAND_CMD_READ0:
1082 case NAND_CMD_READOOB:
1083 ext_cmd_type = EXT_CMD_TYPE_MONO;
1084 break;
1085 case NAND_CMD_SEQIN:
1086 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1087 break;
1088 case NAND_CMD_PAGEPROG:
1089 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1090 break;
1091 default:
1092 ext_cmd_type = 0;
1093 break;
1094 }
1095
1096 prepare_start_command(info, command);
1097
1098 /*
1099 * Prepare the "is ready" completion before starting a command
1100 * transaction sequence. If the command is not executed the
1101 * completion will be completed, see below.
1102 *
1103 * We can do that inside the loop because the command variable
1104 * is invariant and thus so is the exec_cmd.
1105 */
1106 info->need_wait = 1;
1107 init_completion(&info->dev_ready);
1108 do {
1109 info->state = STATE_PREPARED;
1110 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1111 column, page_addr);
1112 if (!exec_cmd) {
1113 info->need_wait = 0;
1114 complete(&info->dev_ready);
1115 break;
1116 }
1117
1118 init_completion(&info->cmd_complete);
1119 pxa3xx_nand_start(info);
1120
1121 if (!wait_for_completion_timeout(&info->cmd_complete,
1122 CHIP_DELAY_TIMEOUT)) {
1123 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1124 /* Stop State Machine for next command cycle */
1125 pxa3xx_nand_stop(info);
1126 break;
1127 }
1128
1129 /* Check if the sequence is complete */
1130 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1131 break;
1132
1133 /*
1134 * After a splitted program command sequence has issued
1135 * the command dispatch, the command sequence is complete.
1136 */
1137 if (info->data_size == 0 &&
1138 command == NAND_CMD_PAGEPROG &&
1139 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1140 break;
1141
1142 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1143 /* Last read: issue a 'last naked read' */
1144 if (info->data_size == info->chunk_size)
1145 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1146 else
1147 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1148
1149 /*
1150 * If a splitted program command has no more data to transfer,
1151 * the command dispatch must be issued to complete.
1152 */
1153 } else if (command == NAND_CMD_PAGEPROG &&
1154 info->data_size == 0) {
1155 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1156 }
1157 } while (1);
1158
1159 info->state = STATE_IDLE;
1160 }
1161
1162 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1163 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1164 int page)
1165 {
1166 chip->write_buf(mtd, buf, mtd->writesize);
1167 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1168
1169 return 0;
1170 }
1171
1172 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1173 struct nand_chip *chip, uint8_t *buf, int oob_required,
1174 int page)
1175 {
1176 struct pxa3xx_nand_host *host = mtd->priv;
1177 struct pxa3xx_nand_info *info = host->info_data;
1178
1179 chip->read_buf(mtd, buf, mtd->writesize);
1180 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1181
1182 if (info->retcode == ERR_CORERR && info->use_ecc) {
1183 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1184
1185 } else if (info->retcode == ERR_UNCORERR) {
1186 /*
1187 * for blank page (all 0xff), HW will calculate its ECC as
1188 * 0, which is different from the ECC information within
1189 * OOB, ignore such uncorrectable errors
1190 */
1191 if (is_buf_blank(buf, mtd->writesize))
1192 info->retcode = ERR_NONE;
1193 else
1194 mtd->ecc_stats.failed++;
1195 }
1196
1197 return info->max_bitflips;
1198 }
1199
1200 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1201 {
1202 struct pxa3xx_nand_host *host = mtd->priv;
1203 struct pxa3xx_nand_info *info = host->info_data;
1204 char retval = 0xFF;
1205
1206 if (info->buf_start < info->buf_count)
1207 /* Has just send a new command? */
1208 retval = info->data_buff[info->buf_start++];
1209
1210 return retval;
1211 }
1212
1213 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1214 {
1215 struct pxa3xx_nand_host *host = mtd->priv;
1216 struct pxa3xx_nand_info *info = host->info_data;
1217 u16 retval = 0xFFFF;
1218
1219 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1220 retval = *((u16 *)(info->data_buff+info->buf_start));
1221 info->buf_start += 2;
1222 }
1223 return retval;
1224 }
1225
1226 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1227 {
1228 struct pxa3xx_nand_host *host = mtd->priv;
1229 struct pxa3xx_nand_info *info = host->info_data;
1230 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1231
1232 memcpy(buf, info->data_buff + info->buf_start, real_len);
1233 info->buf_start += real_len;
1234 }
1235
1236 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1237 const uint8_t *buf, int len)
1238 {
1239 struct pxa3xx_nand_host *host = mtd->priv;
1240 struct pxa3xx_nand_info *info = host->info_data;
1241 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1242
1243 memcpy(info->data_buff + info->buf_start, buf, real_len);
1244 info->buf_start += real_len;
1245 }
1246
1247 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1248 {
1249 return;
1250 }
1251
1252 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1253 {
1254 struct pxa3xx_nand_host *host = mtd->priv;
1255 struct pxa3xx_nand_info *info = host->info_data;
1256
1257 if (info->need_wait) {
1258 info->need_wait = 0;
1259 if (!wait_for_completion_timeout(&info->dev_ready,
1260 CHIP_DELAY_TIMEOUT)) {
1261 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1262 return NAND_STATUS_FAIL;
1263 }
1264 }
1265
1266 /* pxa3xx_nand_send_command has waited for command complete */
1267 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1268 if (info->retcode == ERR_NONE)
1269 return 0;
1270 else
1271 return NAND_STATUS_FAIL;
1272 }
1273
1274 return NAND_STATUS_READY;
1275 }
1276
1277 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1278 const struct pxa3xx_nand_flash *f)
1279 {
1280 struct platform_device *pdev = info->pdev;
1281 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1282 struct pxa3xx_nand_host *host = info->host[info->cs];
1283 uint32_t ndcr = 0x0; /* enable all interrupts */
1284
1285 if (f->page_size != 2048 && f->page_size != 512) {
1286 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1287 return -EINVAL;
1288 }
1289
1290 if (f->flash_width != 16 && f->flash_width != 8) {
1291 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1292 return -EINVAL;
1293 }
1294
1295 /* calculate addressing information */
1296 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1297
1298 if (f->num_blocks * f->page_per_block > 65536)
1299 host->row_addr_cycles = 3;
1300 else
1301 host->row_addr_cycles = 2;
1302
1303 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1304 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1305 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1306 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1307 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1308 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1309
1310 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1311 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1312
1313 info->reg_ndcr = ndcr;
1314
1315 pxa3xx_nand_set_timing(host, f->timing);
1316 return 0;
1317 }
1318
1319 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1320 {
1321 uint32_t ndcr = nand_readl(info, NDCR);
1322
1323 /* Set an initial chunk size */
1324 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1325 info->reg_ndcr = ndcr &
1326 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1327 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1328 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1329 return 0;
1330 }
1331
1332 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1333 {
1334 struct platform_device *pdev = info->pdev;
1335 struct dma_slave_config config;
1336 dma_cap_mask_t mask;
1337 struct pxad_param param;
1338 int ret;
1339
1340 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1341 if (info->data_buff == NULL)
1342 return -ENOMEM;
1343 if (use_dma == 0)
1344 return 0;
1345
1346 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1347 if (ret)
1348 return ret;
1349
1350 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1351 dma_cap_zero(mask);
1352 dma_cap_set(DMA_SLAVE, mask);
1353 param.prio = PXAD_PRIO_LOWEST;
1354 param.drcmr = info->drcmr_dat;
1355 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1356 &param, &pdev->dev,
1357 "data");
1358 if (!info->dma_chan) {
1359 dev_err(&pdev->dev, "unable to request data dma channel\n");
1360 return -ENODEV;
1361 }
1362
1363 memset(&config, 0, sizeof(config));
1364 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1365 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1366 config.src_addr = info->mmio_phys + NDDB;
1367 config.dst_addr = info->mmio_phys + NDDB;
1368 config.src_maxburst = 32;
1369 config.dst_maxburst = 32;
1370 ret = dmaengine_slave_config(info->dma_chan, &config);
1371 if (ret < 0) {
1372 dev_err(&info->pdev->dev,
1373 "dma channel configuration failed: %d\n",
1374 ret);
1375 return ret;
1376 }
1377
1378 /*
1379 * Now that DMA buffers are allocated we turn on
1380 * DMA proper for I/O operations.
1381 */
1382 info->use_dma = 1;
1383 return 0;
1384 }
1385
1386 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1387 {
1388 if (info->use_dma) {
1389 dmaengine_terminate_all(info->dma_chan);
1390 dma_release_channel(info->dma_chan);
1391 }
1392 kfree(info->data_buff);
1393 }
1394
1395 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1396 {
1397 struct mtd_info *mtd;
1398 struct nand_chip *chip;
1399 int ret;
1400
1401 mtd = info->host[info->cs]->mtd;
1402 chip = mtd->priv;
1403
1404 /* use the common timing to make a try */
1405 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1406 if (ret)
1407 return ret;
1408
1409 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1410 ret = chip->waitfunc(mtd, chip);
1411 if (ret & NAND_STATUS_FAIL)
1412 return -ENODEV;
1413
1414 return 0;
1415 }
1416
1417 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1418 struct nand_ecc_ctrl *ecc,
1419 int strength, int ecc_stepsize, int page_size)
1420 {
1421 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1422 info->chunk_size = 2048;
1423 info->spare_size = 40;
1424 info->ecc_size = 24;
1425 ecc->mode = NAND_ECC_HW;
1426 ecc->size = 512;
1427 ecc->strength = 1;
1428
1429 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1430 info->chunk_size = 512;
1431 info->spare_size = 8;
1432 info->ecc_size = 8;
1433 ecc->mode = NAND_ECC_HW;
1434 ecc->size = 512;
1435 ecc->strength = 1;
1436
1437 /*
1438 * Required ECC: 4-bit correction per 512 bytes
1439 * Select: 16-bit correction per 2048 bytes
1440 */
1441 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1442 info->ecc_bch = 1;
1443 info->chunk_size = 2048;
1444 info->spare_size = 32;
1445 info->ecc_size = 32;
1446 ecc->mode = NAND_ECC_HW;
1447 ecc->size = info->chunk_size;
1448 ecc->layout = &ecc_layout_2KB_bch4bit;
1449 ecc->strength = 16;
1450
1451 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1452 info->ecc_bch = 1;
1453 info->chunk_size = 2048;
1454 info->spare_size = 32;
1455 info->ecc_size = 32;
1456 ecc->mode = NAND_ECC_HW;
1457 ecc->size = info->chunk_size;
1458 ecc->layout = &ecc_layout_4KB_bch4bit;
1459 ecc->strength = 16;
1460
1461 /*
1462 * Required ECC: 8-bit correction per 512 bytes
1463 * Select: 16-bit correction per 1024 bytes
1464 */
1465 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1466 info->ecc_bch = 1;
1467 info->chunk_size = 1024;
1468 info->spare_size = 0;
1469 info->ecc_size = 32;
1470 ecc->mode = NAND_ECC_HW;
1471 ecc->size = info->chunk_size;
1472 ecc->layout = &ecc_layout_4KB_bch8bit;
1473 ecc->strength = 16;
1474 } else {
1475 dev_err(&info->pdev->dev,
1476 "ECC strength %d at page size %d is not supported\n",
1477 strength, page_size);
1478 return -ENODEV;
1479 }
1480
1481 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1482 ecc->strength, ecc->size);
1483 return 0;
1484 }
1485
1486 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1487 {
1488 struct pxa3xx_nand_host *host = mtd->priv;
1489 struct pxa3xx_nand_info *info = host->info_data;
1490 struct platform_device *pdev = info->pdev;
1491 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1492 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1493 const struct pxa3xx_nand_flash *f = NULL;
1494 struct nand_chip *chip = mtd->priv;
1495 uint32_t id = -1;
1496 uint64_t chipsize;
1497 int i, ret, num;
1498 uint16_t ecc_strength, ecc_step;
1499
1500 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1501 goto KEEP_CONFIG;
1502
1503 /* Set a default chunk size */
1504 info->chunk_size = 512;
1505
1506 ret = pxa3xx_nand_sensing(info);
1507 if (ret) {
1508 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1509 info->cs);
1510
1511 return ret;
1512 }
1513
1514 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1515 id = *((uint16_t *)(info->data_buff));
1516 if (id != 0)
1517 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1518 else {
1519 dev_warn(&info->pdev->dev,
1520 "Read out ID 0, potential timing set wrong!!\n");
1521
1522 return -EINVAL;
1523 }
1524
1525 num = ARRAY_SIZE(builtin_flash_types) - 1;
1526 for (i = 0; i < num; i++) {
1527 f = &builtin_flash_types[i + 1];
1528
1529 /* find the chip in default list */
1530 if (f->chip_id == id)
1531 break;
1532 }
1533
1534 if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
1535 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1536
1537 return -EINVAL;
1538 }
1539
1540 ret = pxa3xx_nand_config_flash(info, f);
1541 if (ret) {
1542 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1543 return ret;
1544 }
1545
1546 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1547
1548 pxa3xx_flash_ids[0].name = f->name;
1549 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1550 pxa3xx_flash_ids[0].pagesize = f->page_size;
1551 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1552 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1553 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1554 if (f->flash_width == 16)
1555 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1556 pxa3xx_flash_ids[1].name = NULL;
1557 def = pxa3xx_flash_ids;
1558 KEEP_CONFIG:
1559 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1560 if (info->reg_ndcr & NDCR_DWIDTH_M)
1561 chip->options |= NAND_BUSWIDTH_16;
1562
1563 /* Device detection must be done with ECC disabled */
1564 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1565 nand_writel(info, NDECCCTRL, 0x0);
1566
1567 if (nand_scan_ident(mtd, 1, def))
1568 return -ENODEV;
1569
1570 if (pdata->flash_bbt) {
1571 /*
1572 * We'll use a bad block table stored in-flash and don't
1573 * allow writing the bad block marker to the flash.
1574 */
1575 chip->bbt_options |= NAND_BBT_USE_FLASH |
1576 NAND_BBT_NO_OOB_BBM;
1577 chip->bbt_td = &bbt_main_descr;
1578 chip->bbt_md = &bbt_mirror_descr;
1579 }
1580
1581 /*
1582 * If the page size is bigger than the FIFO size, let's check
1583 * we are given the right variant and then switch to the extended
1584 * (aka splitted) command handling,
1585 */
1586 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1587 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1588 chip->cmdfunc = nand_cmdfunc_extended;
1589 } else {
1590 dev_err(&info->pdev->dev,
1591 "unsupported page size on this variant\n");
1592 return -ENODEV;
1593 }
1594 }
1595
1596 if (pdata->ecc_strength && pdata->ecc_step_size) {
1597 ecc_strength = pdata->ecc_strength;
1598 ecc_step = pdata->ecc_step_size;
1599 } else {
1600 ecc_strength = chip->ecc_strength_ds;
1601 ecc_step = chip->ecc_step_ds;
1602 }
1603
1604 /* Set default ECC strength requirements on non-ONFI devices */
1605 if (ecc_strength < 1 && ecc_step < 1) {
1606 ecc_strength = 1;
1607 ecc_step = 512;
1608 }
1609
1610 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1611 ecc_step, mtd->writesize);
1612 if (ret)
1613 return ret;
1614
1615 /* calculate addressing information */
1616 if (mtd->writesize >= 2048)
1617 host->col_addr_cycles = 2;
1618 else
1619 host->col_addr_cycles = 1;
1620
1621 /* release the initial buffer */
1622 kfree(info->data_buff);
1623
1624 /* allocate the real data + oob buffer */
1625 info->buf_size = mtd->writesize + mtd->oobsize;
1626 ret = pxa3xx_nand_init_buff(info);
1627 if (ret)
1628 return ret;
1629 info->oob_buff = info->data_buff + mtd->writesize;
1630
1631 if ((mtd->size >> chip->page_shift) > 65536)
1632 host->row_addr_cycles = 3;
1633 else
1634 host->row_addr_cycles = 2;
1635 return nand_scan_tail(mtd);
1636 }
1637
1638 static int alloc_nand_resource(struct platform_device *pdev)
1639 {
1640 struct pxa3xx_nand_platform_data *pdata;
1641 struct pxa3xx_nand_info *info;
1642 struct pxa3xx_nand_host *host;
1643 struct nand_chip *chip = NULL;
1644 struct mtd_info *mtd;
1645 struct resource *r;
1646 int ret, irq, cs;
1647
1648 pdata = dev_get_platdata(&pdev->dev);
1649 if (pdata->num_cs <= 0)
1650 return -ENODEV;
1651 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1652 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1653 if (!info)
1654 return -ENOMEM;
1655
1656 info->pdev = pdev;
1657 info->variant = pxa3xx_nand_get_variant(pdev);
1658 for (cs = 0; cs < pdata->num_cs; cs++) {
1659 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1660 chip = (struct nand_chip *)(&mtd[1]);
1661 host = (struct pxa3xx_nand_host *)chip;
1662 info->host[cs] = host;
1663 host->mtd = mtd;
1664 host->cs = cs;
1665 host->info_data = info;
1666 mtd->priv = host;
1667 mtd->dev.parent = &pdev->dev;
1668
1669 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1670 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1671 chip->controller = &info->controller;
1672 chip->waitfunc = pxa3xx_nand_waitfunc;
1673 chip->select_chip = pxa3xx_nand_select_chip;
1674 chip->read_word = pxa3xx_nand_read_word;
1675 chip->read_byte = pxa3xx_nand_read_byte;
1676 chip->read_buf = pxa3xx_nand_read_buf;
1677 chip->write_buf = pxa3xx_nand_write_buf;
1678 chip->options |= NAND_NO_SUBPAGE_WRITE;
1679 chip->cmdfunc = nand_cmdfunc;
1680 }
1681
1682 spin_lock_init(&chip->controller->lock);
1683 init_waitqueue_head(&chip->controller->wq);
1684 info->clk = devm_clk_get(&pdev->dev, NULL);
1685 if (IS_ERR(info->clk)) {
1686 dev_err(&pdev->dev, "failed to get nand clock\n");
1687 return PTR_ERR(info->clk);
1688 }
1689 ret = clk_prepare_enable(info->clk);
1690 if (ret < 0)
1691 return ret;
1692
1693 if (use_dma) {
1694 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1695 if (r == NULL) {
1696 dev_err(&pdev->dev,
1697 "no resource defined for data DMA\n");
1698 ret = -ENXIO;
1699 goto fail_disable_clk;
1700 }
1701 info->drcmr_dat = r->start;
1702
1703 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1704 if (r == NULL) {
1705 dev_err(&pdev->dev,
1706 "no resource defined for cmd DMA\n");
1707 ret = -ENXIO;
1708 goto fail_disable_clk;
1709 }
1710 info->drcmr_cmd = r->start;
1711 }
1712
1713 irq = platform_get_irq(pdev, 0);
1714 if (irq < 0) {
1715 dev_err(&pdev->dev, "no IRQ resource defined\n");
1716 ret = -ENXIO;
1717 goto fail_disable_clk;
1718 }
1719
1720 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1721 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1722 if (IS_ERR(info->mmio_base)) {
1723 ret = PTR_ERR(info->mmio_base);
1724 goto fail_disable_clk;
1725 }
1726 info->mmio_phys = r->start;
1727
1728 /* Allocate a buffer to allow flash detection */
1729 info->buf_size = INIT_BUFFER_SIZE;
1730 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1731 if (info->data_buff == NULL) {
1732 ret = -ENOMEM;
1733 goto fail_disable_clk;
1734 }
1735
1736 /* initialize all interrupts to be disabled */
1737 disable_int(info, NDSR_MASK);
1738
1739 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1740 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1741 pdev->name, info);
1742 if (ret < 0) {
1743 dev_err(&pdev->dev, "failed to request IRQ\n");
1744 goto fail_free_buf;
1745 }
1746
1747 platform_set_drvdata(pdev, info);
1748
1749 return 0;
1750
1751 fail_free_buf:
1752 free_irq(irq, info);
1753 kfree(info->data_buff);
1754 fail_disable_clk:
1755 clk_disable_unprepare(info->clk);
1756 return ret;
1757 }
1758
1759 static int pxa3xx_nand_remove(struct platform_device *pdev)
1760 {
1761 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1762 struct pxa3xx_nand_platform_data *pdata;
1763 int irq, cs;
1764
1765 if (!info)
1766 return 0;
1767
1768 pdata = dev_get_platdata(&pdev->dev);
1769
1770 irq = platform_get_irq(pdev, 0);
1771 if (irq >= 0)
1772 free_irq(irq, info);
1773 pxa3xx_nand_free_buff(info);
1774
1775 /*
1776 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1777 * In order to prevent a lockup of the system bus, the DFI bus
1778 * arbitration is granted to SMC upon driver removal. This is done by
1779 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1780 * access to the bus anymore.
1781 */
1782 nand_writel(info, NDCR,
1783 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1784 NFCV1_NDCR_ARB_CNTL);
1785 clk_disable_unprepare(info->clk);
1786
1787 for (cs = 0; cs < pdata->num_cs; cs++)
1788 nand_release(info->host[cs]->mtd);
1789 return 0;
1790 }
1791
1792 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1793 {
1794 struct pxa3xx_nand_platform_data *pdata;
1795 struct device_node *np = pdev->dev.of_node;
1796 const struct of_device_id *of_id =
1797 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1798
1799 if (!of_id)
1800 return 0;
1801
1802 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1803 if (!pdata)
1804 return -ENOMEM;
1805
1806 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1807 pdata->enable_arbiter = 1;
1808 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1809 pdata->keep_config = 1;
1810 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1811 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1812
1813 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1814 if (pdata->ecc_strength < 0)
1815 pdata->ecc_strength = 0;
1816
1817 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1818 if (pdata->ecc_step_size < 0)
1819 pdata->ecc_step_size = 0;
1820
1821 pdev->dev.platform_data = pdata;
1822
1823 return 0;
1824 }
1825
1826 static int pxa3xx_nand_probe(struct platform_device *pdev)
1827 {
1828 struct pxa3xx_nand_platform_data *pdata;
1829 struct mtd_part_parser_data ppdata = {};
1830 struct pxa3xx_nand_info *info;
1831 int ret, cs, probe_success, dma_available;
1832
1833 dma_available = IS_ENABLED(CONFIG_ARM) &&
1834 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1835 if (use_dma && !dma_available) {
1836 use_dma = 0;
1837 dev_warn(&pdev->dev,
1838 "This platform can't do DMA on this device\n");
1839 }
1840
1841 ret = pxa3xx_nand_probe_dt(pdev);
1842 if (ret)
1843 return ret;
1844
1845 pdata = dev_get_platdata(&pdev->dev);
1846 if (!pdata) {
1847 dev_err(&pdev->dev, "no platform data defined\n");
1848 return -ENODEV;
1849 }
1850
1851 ret = alloc_nand_resource(pdev);
1852 if (ret) {
1853 dev_err(&pdev->dev, "alloc nand resource failed\n");
1854 return ret;
1855 }
1856
1857 info = platform_get_drvdata(pdev);
1858 probe_success = 0;
1859 for (cs = 0; cs < pdata->num_cs; cs++) {
1860 struct mtd_info *mtd = info->host[cs]->mtd;
1861
1862 /*
1863 * The mtd name matches the one used in 'mtdparts' kernel
1864 * parameter. This name cannot be changed or otherwise
1865 * user's mtd partitions configuration would get broken.
1866 */
1867 mtd->name = "pxa3xx_nand-0";
1868 info->cs = cs;
1869 ret = pxa3xx_nand_scan(mtd);
1870 if (ret) {
1871 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1872 cs);
1873 continue;
1874 }
1875
1876 ppdata.of_node = pdev->dev.of_node;
1877 ret = mtd_device_parse_register(mtd, NULL,
1878 &ppdata, pdata->parts[cs],
1879 pdata->nr_parts[cs]);
1880 if (!ret)
1881 probe_success = 1;
1882 }
1883
1884 if (!probe_success) {
1885 pxa3xx_nand_remove(pdev);
1886 return -ENODEV;
1887 }
1888
1889 return 0;
1890 }
1891
1892 #ifdef CONFIG_PM
1893 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1894 {
1895 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1896 struct pxa3xx_nand_platform_data *pdata;
1897 struct mtd_info *mtd;
1898 int cs;
1899
1900 pdata = dev_get_platdata(&pdev->dev);
1901 if (info->state) {
1902 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1903 return -EAGAIN;
1904 }
1905
1906 for (cs = 0; cs < pdata->num_cs; cs++) {
1907 mtd = info->host[cs]->mtd;
1908 mtd_suspend(mtd);
1909 }
1910
1911 return 0;
1912 }
1913
1914 static int pxa3xx_nand_resume(struct platform_device *pdev)
1915 {
1916 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1917 struct pxa3xx_nand_platform_data *pdata;
1918 struct mtd_info *mtd;
1919 int cs;
1920
1921 pdata = dev_get_platdata(&pdev->dev);
1922 /* We don't want to handle interrupt without calling mtd routine */
1923 disable_int(info, NDCR_INT_MASK);
1924
1925 /*
1926 * Directly set the chip select to a invalid value,
1927 * then the driver would reset the timing according
1928 * to current chip select at the beginning of cmdfunc
1929 */
1930 info->cs = 0xff;
1931
1932 /*
1933 * As the spec says, the NDSR would be updated to 0x1800 when
1934 * doing the nand_clk disable/enable.
1935 * To prevent it damaging state machine of the driver, clear
1936 * all status before resume
1937 */
1938 nand_writel(info, NDSR, NDSR_MASK);
1939 for (cs = 0; cs < pdata->num_cs; cs++) {
1940 mtd = info->host[cs]->mtd;
1941 mtd_resume(mtd);
1942 }
1943
1944 return 0;
1945 }
1946 #else
1947 #define pxa3xx_nand_suspend NULL
1948 #define pxa3xx_nand_resume NULL
1949 #endif
1950
1951 static struct platform_driver pxa3xx_nand_driver = {
1952 .driver = {
1953 .name = "pxa3xx-nand",
1954 .of_match_table = pxa3xx_nand_dt_ids,
1955 },
1956 .probe = pxa3xx_nand_probe,
1957 .remove = pxa3xx_nand_remove,
1958 .suspend = pxa3xx_nand_suspend,
1959 .resume = pxa3xx_nand_resume,
1960 };
1961
1962 module_platform_driver(pxa3xx_nand_driver);
1963
1964 MODULE_LICENSE("GPL");
1965 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.078534 seconds and 5 git commands to generate.