mtd: nand: sunxi: remove direct mtd->priv accesses
[deliverable/linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
34
35 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE (2048)
38
39 /*
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46 */
47 #define INIT_BUFFER_SIZE 2048
48
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
62
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79 #define NDCR_RA_START (0x1 << 15)
80 #define NDCR_PG_PER_BLK (0x1 << 14)
81 #define NDCR_ND_ARB_EN (0x1 << 12)
82 #define NDCR_INT_MASK (0xFFF)
83
84 #define NDSR_MASK (0xfff)
85 #define NDSR_ERR_CNT_OFF (16)
86 #define NDSR_ERR_CNT_MASK (0x1f)
87 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_UNCORERR (0x1 << 4)
97 #define NDSR_CORERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
101
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC (0x1 << 20)
111 #define NDCB0_DBC (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK (0xff << 8)
115 #define NDCB0_CMD1_MASK (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT (16)
117
118 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ 4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
125
126 /*
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
130 */
131 #define READ_ID_BYTES 7
132
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val) \
135 do { \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
140 } while (0)
141
142 #define nand_readl(info, off) \
143 ({ \
144 unsigned int _v; \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
149 _v; \
150 })
151
152 /* error code and state */
153 enum {
154 ERR_NONE = 0,
155 ERR_DMABUSERR = -1,
156 ERR_SENDCMD = -2,
157 ERR_UNCORERR = -3,
158 ERR_BBERR = -4,
159 ERR_CORERR = -5,
160 };
161
162 enum {
163 STATE_IDLE = 0,
164 STATE_PREPARED,
165 STATE_CMD_HANDLE,
166 STATE_DMA_READING,
167 STATE_DMA_WRITING,
168 STATE_DMA_DONE,
169 STATE_PIO_READING,
170 STATE_PIO_WRITING,
171 STATE_CMD_DONE,
172 STATE_READY,
173 };
174
175 enum pxa3xx_nand_variant {
176 PXA3XX_NAND_VARIANT_PXA,
177 PXA3XX_NAND_VARIANT_ARMADA370,
178 };
179
180 struct pxa3xx_nand_host {
181 struct nand_chip chip;
182 void *info_data;
183
184 /* page size of attached chip */
185 int use_ecc;
186 int cs;
187
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles;
190 unsigned int row_addr_cycles;
191 };
192
193 struct pxa3xx_nand_info {
194 struct nand_hw_control controller;
195 struct platform_device *pdev;
196
197 struct clk *clk;
198 void __iomem *mmio_base;
199 unsigned long mmio_phys;
200 struct completion cmd_complete, dev_ready;
201
202 unsigned int buf_start;
203 unsigned int buf_count;
204 unsigned int buf_size;
205 unsigned int data_buff_pos;
206 unsigned int oob_buff_pos;
207
208 /* DMA information */
209 struct scatterlist sg;
210 enum dma_data_direction dma_dir;
211 struct dma_chan *dma_chan;
212 dma_cookie_t dma_cookie;
213 int drcmr_dat;
214 int drcmr_cmd;
215
216 unsigned char *data_buff;
217 unsigned char *oob_buff;
218 dma_addr_t data_buff_phys;
219 int data_dma_ch;
220
221 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
222 unsigned int state;
223
224 /*
225 * This driver supports NFCv1 (as found in PXA SoC)
226 * and NFCv2 (as found in Armada 370/XP SoC).
227 */
228 enum pxa3xx_nand_variant variant;
229
230 int cs;
231 int use_ecc; /* use HW ECC ? */
232 int ecc_bch; /* using BCH ECC? */
233 int use_dma; /* use DMA ? */
234 int use_spare; /* use spare ? */
235 int need_wait;
236
237 /* Amount of real data per full chunk */
238 unsigned int chunk_size;
239
240 /* Amount of spare data per full chunk */
241 unsigned int spare_size;
242
243 /* Number of full chunks (i.e chunk_size + spare_size) */
244 unsigned int nfullchunks;
245
246 /*
247 * Total number of chunks. If equal to nfullchunks, then there
248 * are only full chunks. Otherwise, there is one last chunk of
249 * size (last_chunk_size + last_spare_size)
250 */
251 unsigned int ntotalchunks;
252
253 /* Amount of real data in the last chunk */
254 unsigned int last_chunk_size;
255
256 /* Amount of spare data in the last chunk */
257 unsigned int last_spare_size;
258
259 unsigned int ecc_size;
260 unsigned int ecc_err_cnt;
261 unsigned int max_bitflips;
262 int retcode;
263
264 /*
265 * Variables only valid during command
266 * execution. step_chunk_size and step_spare_size is the
267 * amount of real data and spare data in the current
268 * chunk. cur_chunk is the current chunk being
269 * read/programmed.
270 */
271 unsigned int step_chunk_size;
272 unsigned int step_spare_size;
273 unsigned int cur_chunk;
274
275 /* cached register value */
276 uint32_t reg_ndcr;
277 uint32_t ndtr0cs0;
278 uint32_t ndtr1cs0;
279
280 /* generated NDCBx register values */
281 uint32_t ndcb0;
282 uint32_t ndcb1;
283 uint32_t ndcb2;
284 uint32_t ndcb3;
285 };
286
287 static bool use_dma = 1;
288 module_param(use_dma, bool, 0444);
289 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
290
291 struct pxa3xx_nand_timing {
292 unsigned int tCH; /* Enable signal hold time */
293 unsigned int tCS; /* Enable signal setup time */
294 unsigned int tWH; /* ND_nWE high duration */
295 unsigned int tWP; /* ND_nWE pulse time */
296 unsigned int tRH; /* ND_nRE high duration */
297 unsigned int tRP; /* ND_nRE pulse width */
298 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
299 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
300 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
301 };
302
303 struct pxa3xx_nand_flash {
304 uint32_t chip_id;
305 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
306 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
307 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
308 };
309
310 static struct pxa3xx_nand_timing timing[] = {
311 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
312 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
313 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
314 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
315 };
316
317 static struct pxa3xx_nand_flash builtin_flash_types[] = {
318 { 0x46ec, 16, 16, &timing[1] },
319 { 0xdaec, 8, 8, &timing[1] },
320 { 0xd7ec, 8, 8, &timing[1] },
321 { 0xa12c, 8, 8, &timing[2] },
322 { 0xb12c, 16, 16, &timing[2] },
323 { 0xdc2c, 8, 8, &timing[2] },
324 { 0xcc2c, 16, 16, &timing[2] },
325 { 0xba20, 16, 16, &timing[3] },
326 };
327
328 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
329 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
330
331 static struct nand_bbt_descr bbt_main_descr = {
332 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
333 | NAND_BBT_2BIT | NAND_BBT_VERSION,
334 .offs = 8,
335 .len = 6,
336 .veroffs = 14,
337 .maxblocks = 8, /* Last 8 blocks in each chip */
338 .pattern = bbt_pattern
339 };
340
341 static struct nand_bbt_descr bbt_mirror_descr = {
342 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
343 | NAND_BBT_2BIT | NAND_BBT_VERSION,
344 .offs = 8,
345 .len = 6,
346 .veroffs = 14,
347 .maxblocks = 8, /* Last 8 blocks in each chip */
348 .pattern = bbt_mirror_pattern
349 };
350
351 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
352 .eccbytes = 32,
353 .eccpos = {
354 32, 33, 34, 35, 36, 37, 38, 39,
355 40, 41, 42, 43, 44, 45, 46, 47,
356 48, 49, 50, 51, 52, 53, 54, 55,
357 56, 57, 58, 59, 60, 61, 62, 63},
358 .oobfree = { {2, 30} }
359 };
360
361 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
362 .eccbytes = 64,
363 .eccpos = {
364 32, 33, 34, 35, 36, 37, 38, 39,
365 40, 41, 42, 43, 44, 45, 46, 47,
366 48, 49, 50, 51, 52, 53, 54, 55,
367 56, 57, 58, 59, 60, 61, 62, 63,
368 96, 97, 98, 99, 100, 101, 102, 103,
369 104, 105, 106, 107, 108, 109, 110, 111,
370 112, 113, 114, 115, 116, 117, 118, 119,
371 120, 121, 122, 123, 124, 125, 126, 127},
372 /* Bootrom looks in bytes 0 & 5 for bad blocks */
373 .oobfree = { {6, 26}, { 64, 32} }
374 };
375
376 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
377 .eccbytes = 128,
378 .eccpos = {
379 32, 33, 34, 35, 36, 37, 38, 39,
380 40, 41, 42, 43, 44, 45, 46, 47,
381 48, 49, 50, 51, 52, 53, 54, 55,
382 56, 57, 58, 59, 60, 61, 62, 63},
383 .oobfree = { }
384 };
385
386 #define NDTR0_tCH(c) (min((c), 7) << 19)
387 #define NDTR0_tCS(c) (min((c), 7) << 16)
388 #define NDTR0_tWH(c) (min((c), 7) << 11)
389 #define NDTR0_tWP(c) (min((c), 7) << 8)
390 #define NDTR0_tRH(c) (min((c), 7) << 3)
391 #define NDTR0_tRP(c) (min((c), 7) << 0)
392
393 #define NDTR1_tR(c) (min((c), 65535) << 16)
394 #define NDTR1_tWHR(c) (min((c), 15) << 4)
395 #define NDTR1_tAR(c) (min((c), 15) << 0)
396
397 /* convert nano-seconds to nand flash controller clock cycles */
398 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
399
400 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
401 {
402 .compatible = "marvell,pxa3xx-nand",
403 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
404 },
405 {
406 .compatible = "marvell,armada370-nand",
407 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
408 },
409 {}
410 };
411 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
412
413 static enum pxa3xx_nand_variant
414 pxa3xx_nand_get_variant(struct platform_device *pdev)
415 {
416 const struct of_device_id *of_id =
417 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
418 if (!of_id)
419 return PXA3XX_NAND_VARIANT_PXA;
420 return (enum pxa3xx_nand_variant)of_id->data;
421 }
422
423 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
424 const struct pxa3xx_nand_timing *t)
425 {
426 struct pxa3xx_nand_info *info = host->info_data;
427 unsigned long nand_clk = clk_get_rate(info->clk);
428 uint32_t ndtr0, ndtr1;
429
430 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
431 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
432 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
433 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
434 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
435 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
436
437 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
438 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
439 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
440
441 info->ndtr0cs0 = ndtr0;
442 info->ndtr1cs0 = ndtr1;
443 nand_writel(info, NDTR0CS0, ndtr0);
444 nand_writel(info, NDTR1CS0, ndtr1);
445 }
446
447 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
448 const struct nand_sdr_timings *t)
449 {
450 struct pxa3xx_nand_info *info = host->info_data;
451 struct nand_chip *chip = &host->chip;
452 unsigned long nand_clk = clk_get_rate(info->clk);
453 uint32_t ndtr0, ndtr1;
454
455 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
456 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
457 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
458 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
459 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
460 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
461 u32 tR = chip->chip_delay * 1000;
462 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
463 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
464
465 /* fallback to a default value if tR = 0 */
466 if (!tR)
467 tR = 20000;
468
469 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
470 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
471 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
472 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
473 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
474 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
475
476 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
477 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
478 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
479
480 info->ndtr0cs0 = ndtr0;
481 info->ndtr1cs0 = ndtr1;
482 nand_writel(info, NDTR0CS0, ndtr0);
483 nand_writel(info, NDTR1CS0, ndtr1);
484 }
485
486 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
487 unsigned int *flash_width,
488 unsigned int *dfc_width)
489 {
490 struct nand_chip *chip = &host->chip;
491 struct pxa3xx_nand_info *info = host->info_data;
492 const struct pxa3xx_nand_flash *f = NULL;
493 struct mtd_info *mtd = nand_to_mtd(&host->chip);
494 int i, id, ntypes;
495
496 ntypes = ARRAY_SIZE(builtin_flash_types);
497
498 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
499
500 id = chip->read_byte(mtd);
501 id |= chip->read_byte(mtd) << 0x8;
502
503 for (i = 0; i < ntypes; i++) {
504 f = &builtin_flash_types[i];
505
506 if (f->chip_id == id)
507 break;
508 }
509
510 if (i == ntypes) {
511 dev_err(&info->pdev->dev, "Error: timings not found\n");
512 return -EINVAL;
513 }
514
515 pxa3xx_nand_set_timing(host, f->timing);
516
517 *flash_width = f->flash_width;
518 *dfc_width = f->dfc_width;
519
520 return 0;
521 }
522
523 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
524 int mode)
525 {
526 const struct nand_sdr_timings *timings;
527
528 mode = fls(mode) - 1;
529 if (mode < 0)
530 mode = 0;
531
532 timings = onfi_async_timing_mode_to_sdr_timings(mode);
533 if (IS_ERR(timings))
534 return PTR_ERR(timings);
535
536 pxa3xx_nand_set_sdr_timing(host, timings);
537
538 return 0;
539 }
540
541 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
542 {
543 struct nand_chip *chip = &host->chip;
544 struct pxa3xx_nand_info *info = host->info_data;
545 unsigned int flash_width = 0, dfc_width = 0;
546 int mode, err;
547
548 mode = onfi_get_async_timing_mode(chip);
549 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
550 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
551 &dfc_width);
552 if (err)
553 return err;
554
555 if (flash_width == 16) {
556 info->reg_ndcr |= NDCR_DWIDTH_M;
557 chip->options |= NAND_BUSWIDTH_16;
558 }
559
560 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
561 } else {
562 err = pxa3xx_nand_init_timings_onfi(host, mode);
563 if (err)
564 return err;
565 }
566
567 return 0;
568 }
569
570 /**
571 * NOTE: it is a must to set ND_RUN firstly, then write
572 * command buffer, otherwise, it does not work.
573 * We enable all the interrupt at the same time, and
574 * let pxa3xx_nand_irq to handle all logic.
575 */
576 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
577 {
578 uint32_t ndcr;
579
580 ndcr = info->reg_ndcr;
581
582 if (info->use_ecc) {
583 ndcr |= NDCR_ECC_EN;
584 if (info->ecc_bch)
585 nand_writel(info, NDECCCTRL, 0x1);
586 } else {
587 ndcr &= ~NDCR_ECC_EN;
588 if (info->ecc_bch)
589 nand_writel(info, NDECCCTRL, 0x0);
590 }
591
592 if (info->use_dma)
593 ndcr |= NDCR_DMA_EN;
594 else
595 ndcr &= ~NDCR_DMA_EN;
596
597 if (info->use_spare)
598 ndcr |= NDCR_SPARE_EN;
599 else
600 ndcr &= ~NDCR_SPARE_EN;
601
602 ndcr |= NDCR_ND_RUN;
603
604 /* clear status bits and run */
605 nand_writel(info, NDSR, NDSR_MASK);
606 nand_writel(info, NDCR, 0);
607 nand_writel(info, NDCR, ndcr);
608 }
609
610 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
611 {
612 uint32_t ndcr;
613 int timeout = NAND_STOP_DELAY;
614
615 /* wait RUN bit in NDCR become 0 */
616 ndcr = nand_readl(info, NDCR);
617 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
618 ndcr = nand_readl(info, NDCR);
619 udelay(1);
620 }
621
622 if (timeout <= 0) {
623 ndcr &= ~NDCR_ND_RUN;
624 nand_writel(info, NDCR, ndcr);
625 }
626 if (info->dma_chan)
627 dmaengine_terminate_all(info->dma_chan);
628
629 /* clear status bits */
630 nand_writel(info, NDSR, NDSR_MASK);
631 }
632
633 static void __maybe_unused
634 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
635 {
636 uint32_t ndcr;
637
638 ndcr = nand_readl(info, NDCR);
639 nand_writel(info, NDCR, ndcr & ~int_mask);
640 }
641
642 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
643 {
644 uint32_t ndcr;
645
646 ndcr = nand_readl(info, NDCR);
647 nand_writel(info, NDCR, ndcr | int_mask);
648 }
649
650 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
651 {
652 if (info->ecc_bch) {
653 u32 val;
654 int ret;
655
656 /*
657 * According to the datasheet, when reading from NDDB
658 * with BCH enabled, after each 32 bytes reads, we
659 * have to make sure that the NDSR.RDDREQ bit is set.
660 *
661 * Drain the FIFO 8 32 bits reads at a time, and skip
662 * the polling on the last read.
663 */
664 while (len > 8) {
665 ioread32_rep(info->mmio_base + NDDB, data, 8);
666
667 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
668 val & NDSR_RDDREQ, 1000, 5000);
669 if (ret) {
670 dev_err(&info->pdev->dev,
671 "Timeout on RDDREQ while draining the FIFO\n");
672 return;
673 }
674
675 data += 32;
676 len -= 8;
677 }
678 }
679
680 ioread32_rep(info->mmio_base + NDDB, data, len);
681 }
682
683 static void handle_data_pio(struct pxa3xx_nand_info *info)
684 {
685 switch (info->state) {
686 case STATE_PIO_WRITING:
687 if (info->step_chunk_size)
688 writesl(info->mmio_base + NDDB,
689 info->data_buff + info->data_buff_pos,
690 DIV_ROUND_UP(info->step_chunk_size, 4));
691
692 if (info->step_spare_size)
693 writesl(info->mmio_base + NDDB,
694 info->oob_buff + info->oob_buff_pos,
695 DIV_ROUND_UP(info->step_spare_size, 4));
696 break;
697 case STATE_PIO_READING:
698 if (info->step_chunk_size)
699 drain_fifo(info,
700 info->data_buff + info->data_buff_pos,
701 DIV_ROUND_UP(info->step_chunk_size, 4));
702
703 if (info->step_spare_size)
704 drain_fifo(info,
705 info->oob_buff + info->oob_buff_pos,
706 DIV_ROUND_UP(info->step_spare_size, 4));
707 break;
708 default:
709 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
710 info->state);
711 BUG();
712 }
713
714 /* Update buffer pointers for multi-page read/write */
715 info->data_buff_pos += info->step_chunk_size;
716 info->oob_buff_pos += info->step_spare_size;
717 }
718
719 static void pxa3xx_nand_data_dma_irq(void *data)
720 {
721 struct pxa3xx_nand_info *info = data;
722 struct dma_tx_state state;
723 enum dma_status status;
724
725 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
726 if (likely(status == DMA_COMPLETE)) {
727 info->state = STATE_DMA_DONE;
728 } else {
729 dev_err(&info->pdev->dev, "DMA error on data channel\n");
730 info->retcode = ERR_DMABUSERR;
731 }
732 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
733
734 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
735 enable_int(info, NDCR_INT_MASK);
736 }
737
738 static void start_data_dma(struct pxa3xx_nand_info *info)
739 {
740 enum dma_transfer_direction direction;
741 struct dma_async_tx_descriptor *tx;
742
743 switch (info->state) {
744 case STATE_DMA_WRITING:
745 info->dma_dir = DMA_TO_DEVICE;
746 direction = DMA_MEM_TO_DEV;
747 break;
748 case STATE_DMA_READING:
749 info->dma_dir = DMA_FROM_DEVICE;
750 direction = DMA_DEV_TO_MEM;
751 break;
752 default:
753 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
754 info->state);
755 BUG();
756 }
757 info->sg.length = info->chunk_size;
758 if (info->use_spare)
759 info->sg.length += info->spare_size + info->ecc_size;
760 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
761
762 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
763 DMA_PREP_INTERRUPT);
764 if (!tx) {
765 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
766 return;
767 }
768 tx->callback = pxa3xx_nand_data_dma_irq;
769 tx->callback_param = info;
770 info->dma_cookie = dmaengine_submit(tx);
771 dma_async_issue_pending(info->dma_chan);
772 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
773 __func__, direction, info->dma_cookie, info->sg.length);
774 }
775
776 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
777 {
778 struct pxa3xx_nand_info *info = data;
779
780 handle_data_pio(info);
781
782 info->state = STATE_CMD_DONE;
783 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
784
785 return IRQ_HANDLED;
786 }
787
788 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
789 {
790 struct pxa3xx_nand_info *info = devid;
791 unsigned int status, is_completed = 0, is_ready = 0;
792 unsigned int ready, cmd_done;
793 irqreturn_t ret = IRQ_HANDLED;
794
795 if (info->cs == 0) {
796 ready = NDSR_FLASH_RDY;
797 cmd_done = NDSR_CS0_CMDD;
798 } else {
799 ready = NDSR_RDY;
800 cmd_done = NDSR_CS1_CMDD;
801 }
802
803 status = nand_readl(info, NDSR);
804
805 if (status & NDSR_UNCORERR)
806 info->retcode = ERR_UNCORERR;
807 if (status & NDSR_CORERR) {
808 info->retcode = ERR_CORERR;
809 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
810 info->ecc_bch)
811 info->ecc_err_cnt = NDSR_ERR_CNT(status);
812 else
813 info->ecc_err_cnt = 1;
814
815 /*
816 * Each chunk composing a page is corrected independently,
817 * and we need to store maximum number of corrected bitflips
818 * to return it to the MTD layer in ecc.read_page().
819 */
820 info->max_bitflips = max_t(unsigned int,
821 info->max_bitflips,
822 info->ecc_err_cnt);
823 }
824 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
825 /* whether use dma to transfer data */
826 if (info->use_dma) {
827 disable_int(info, NDCR_INT_MASK);
828 info->state = (status & NDSR_RDDREQ) ?
829 STATE_DMA_READING : STATE_DMA_WRITING;
830 start_data_dma(info);
831 goto NORMAL_IRQ_EXIT;
832 } else {
833 info->state = (status & NDSR_RDDREQ) ?
834 STATE_PIO_READING : STATE_PIO_WRITING;
835 ret = IRQ_WAKE_THREAD;
836 goto NORMAL_IRQ_EXIT;
837 }
838 }
839 if (status & cmd_done) {
840 info->state = STATE_CMD_DONE;
841 is_completed = 1;
842 }
843 if (status & ready) {
844 info->state = STATE_READY;
845 is_ready = 1;
846 }
847
848 /*
849 * Clear all status bit before issuing the next command, which
850 * can and will alter the status bits and will deserve a new
851 * interrupt on its own. This lets the controller exit the IRQ
852 */
853 nand_writel(info, NDSR, status);
854
855 if (status & NDSR_WRCMDREQ) {
856 status &= ~NDSR_WRCMDREQ;
857 info->state = STATE_CMD_HANDLE;
858
859 /*
860 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
861 * must be loaded by writing directly either 12 or 16
862 * bytes directly to NDCB0, four bytes at a time.
863 *
864 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
865 * but each NDCBx register can be read.
866 */
867 nand_writel(info, NDCB0, info->ndcb0);
868 nand_writel(info, NDCB0, info->ndcb1);
869 nand_writel(info, NDCB0, info->ndcb2);
870
871 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
872 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
873 nand_writel(info, NDCB0, info->ndcb3);
874 }
875
876 if (is_completed)
877 complete(&info->cmd_complete);
878 if (is_ready)
879 complete(&info->dev_ready);
880 NORMAL_IRQ_EXIT:
881 return ret;
882 }
883
884 static inline int is_buf_blank(uint8_t *buf, size_t len)
885 {
886 for (; len > 0; len--)
887 if (*buf++ != 0xff)
888 return 0;
889 return 1;
890 }
891
892 static void set_command_address(struct pxa3xx_nand_info *info,
893 unsigned int page_size, uint16_t column, int page_addr)
894 {
895 /* small page addr setting */
896 if (page_size < PAGE_CHUNK_SIZE) {
897 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
898 | (column & 0xFF);
899
900 info->ndcb2 = 0;
901 } else {
902 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
903 | (column & 0xFFFF);
904
905 if (page_addr & 0xFF0000)
906 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
907 else
908 info->ndcb2 = 0;
909 }
910 }
911
912 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
913 {
914 struct pxa3xx_nand_host *host = info->host[info->cs];
915 struct mtd_info *mtd = nand_to_mtd(&host->chip);
916
917 /* reset data and oob column point to handle data */
918 info->buf_start = 0;
919 info->buf_count = 0;
920 info->data_buff_pos = 0;
921 info->oob_buff_pos = 0;
922 info->step_chunk_size = 0;
923 info->step_spare_size = 0;
924 info->cur_chunk = 0;
925 info->use_ecc = 0;
926 info->use_spare = 1;
927 info->retcode = ERR_NONE;
928 info->ecc_err_cnt = 0;
929 info->ndcb3 = 0;
930 info->need_wait = 0;
931
932 switch (command) {
933 case NAND_CMD_READ0:
934 case NAND_CMD_PAGEPROG:
935 info->use_ecc = 1;
936 break;
937 case NAND_CMD_PARAM:
938 info->use_spare = 0;
939 break;
940 default:
941 info->ndcb1 = 0;
942 info->ndcb2 = 0;
943 break;
944 }
945
946 /*
947 * If we are about to issue a read command, or about to set
948 * the write address, then clean the data buffer.
949 */
950 if (command == NAND_CMD_READ0 ||
951 command == NAND_CMD_READOOB ||
952 command == NAND_CMD_SEQIN) {
953
954 info->buf_count = mtd->writesize + mtd->oobsize;
955 memset(info->data_buff, 0xFF, info->buf_count);
956 }
957
958 }
959
960 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
961 int ext_cmd_type, uint16_t column, int page_addr)
962 {
963 int addr_cycle, exec_cmd;
964 struct pxa3xx_nand_host *host;
965 struct mtd_info *mtd;
966
967 host = info->host[info->cs];
968 mtd = nand_to_mtd(&host->chip);
969 addr_cycle = 0;
970 exec_cmd = 1;
971
972 if (info->cs != 0)
973 info->ndcb0 = NDCB0_CSEL;
974 else
975 info->ndcb0 = 0;
976
977 if (command == NAND_CMD_SEQIN)
978 exec_cmd = 0;
979
980 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
981 + host->col_addr_cycles);
982
983 switch (command) {
984 case NAND_CMD_READOOB:
985 case NAND_CMD_READ0:
986 info->buf_start = column;
987 info->ndcb0 |= NDCB0_CMD_TYPE(0)
988 | addr_cycle
989 | NAND_CMD_READ0;
990
991 if (command == NAND_CMD_READOOB)
992 info->buf_start += mtd->writesize;
993
994 if (info->cur_chunk < info->nfullchunks) {
995 info->step_chunk_size = info->chunk_size;
996 info->step_spare_size = info->spare_size;
997 } else {
998 info->step_chunk_size = info->last_chunk_size;
999 info->step_spare_size = info->last_spare_size;
1000 }
1001
1002 /*
1003 * Multiple page read needs an 'extended command type' field,
1004 * which is either naked-read or last-read according to the
1005 * state.
1006 */
1007 if (mtd->writesize == PAGE_CHUNK_SIZE) {
1008 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1009 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
1010 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
1011 | NDCB0_LEN_OVRD
1012 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1013 info->ndcb3 = info->step_chunk_size +
1014 info->step_spare_size;
1015 }
1016
1017 set_command_address(info, mtd->writesize, column, page_addr);
1018 break;
1019
1020 case NAND_CMD_SEQIN:
1021
1022 info->buf_start = column;
1023 set_command_address(info, mtd->writesize, 0, page_addr);
1024
1025 /*
1026 * Multiple page programming needs to execute the initial
1027 * SEQIN command that sets the page address.
1028 */
1029 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1030 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1031 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1032 | addr_cycle
1033 | command;
1034 exec_cmd = 1;
1035 }
1036 break;
1037
1038 case NAND_CMD_PAGEPROG:
1039 if (is_buf_blank(info->data_buff,
1040 (mtd->writesize + mtd->oobsize))) {
1041 exec_cmd = 0;
1042 break;
1043 }
1044
1045 if (info->cur_chunk < info->nfullchunks) {
1046 info->step_chunk_size = info->chunk_size;
1047 info->step_spare_size = info->spare_size;
1048 } else {
1049 info->step_chunk_size = info->last_chunk_size;
1050 info->step_spare_size = info->last_spare_size;
1051 }
1052
1053 /* Second command setting for large pages */
1054 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1055 /*
1056 * Multiple page write uses the 'extended command'
1057 * field. This can be used to issue a command dispatch
1058 * or a naked-write depending on the current stage.
1059 */
1060 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1061 | NDCB0_LEN_OVRD
1062 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1063 info->ndcb3 = info->step_chunk_size +
1064 info->step_spare_size;
1065
1066 /*
1067 * This is the command dispatch that completes a chunked
1068 * page program operation.
1069 */
1070 if (info->cur_chunk == info->ntotalchunks) {
1071 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1072 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1073 | command;
1074 info->ndcb1 = 0;
1075 info->ndcb2 = 0;
1076 info->ndcb3 = 0;
1077 }
1078 } else {
1079 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1080 | NDCB0_AUTO_RS
1081 | NDCB0_ST_ROW_EN
1082 | NDCB0_DBC
1083 | (NAND_CMD_PAGEPROG << 8)
1084 | NAND_CMD_SEQIN
1085 | addr_cycle;
1086 }
1087 break;
1088
1089 case NAND_CMD_PARAM:
1090 info->buf_count = INIT_BUFFER_SIZE;
1091 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1092 | NDCB0_ADDR_CYC(1)
1093 | NDCB0_LEN_OVRD
1094 | command;
1095 info->ndcb1 = (column & 0xFF);
1096 info->ndcb3 = INIT_BUFFER_SIZE;
1097 info->step_chunk_size = INIT_BUFFER_SIZE;
1098 break;
1099
1100 case NAND_CMD_READID:
1101 info->buf_count = READ_ID_BYTES;
1102 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1103 | NDCB0_ADDR_CYC(1)
1104 | command;
1105 info->ndcb1 = (column & 0xFF);
1106
1107 info->step_chunk_size = 8;
1108 break;
1109 case NAND_CMD_STATUS:
1110 info->buf_count = 1;
1111 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1112 | NDCB0_ADDR_CYC(1)
1113 | command;
1114
1115 info->step_chunk_size = 8;
1116 break;
1117
1118 case NAND_CMD_ERASE1:
1119 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1120 | NDCB0_AUTO_RS
1121 | NDCB0_ADDR_CYC(3)
1122 | NDCB0_DBC
1123 | (NAND_CMD_ERASE2 << 8)
1124 | NAND_CMD_ERASE1;
1125 info->ndcb1 = page_addr;
1126 info->ndcb2 = 0;
1127
1128 break;
1129 case NAND_CMD_RESET:
1130 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1131 | command;
1132
1133 break;
1134
1135 case NAND_CMD_ERASE2:
1136 exec_cmd = 0;
1137 break;
1138
1139 default:
1140 exec_cmd = 0;
1141 dev_err(&info->pdev->dev, "non-supported command %x\n",
1142 command);
1143 break;
1144 }
1145
1146 return exec_cmd;
1147 }
1148
1149 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1150 int column, int page_addr)
1151 {
1152 struct nand_chip *chip = mtd_to_nand(mtd);
1153 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1154 struct pxa3xx_nand_info *info = host->info_data;
1155 int exec_cmd;
1156
1157 /*
1158 * if this is a x16 device ,then convert the input
1159 * "byte" address into a "word" address appropriate
1160 * for indexing a word-oriented device
1161 */
1162 if (info->reg_ndcr & NDCR_DWIDTH_M)
1163 column /= 2;
1164
1165 /*
1166 * There may be different NAND chip hooked to
1167 * different chip select, so check whether
1168 * chip select has been changed, if yes, reset the timing
1169 */
1170 if (info->cs != host->cs) {
1171 info->cs = host->cs;
1172 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1173 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1174 }
1175
1176 prepare_start_command(info, command);
1177
1178 info->state = STATE_PREPARED;
1179 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1180
1181 if (exec_cmd) {
1182 init_completion(&info->cmd_complete);
1183 init_completion(&info->dev_ready);
1184 info->need_wait = 1;
1185 pxa3xx_nand_start(info);
1186
1187 if (!wait_for_completion_timeout(&info->cmd_complete,
1188 CHIP_DELAY_TIMEOUT)) {
1189 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1190 /* Stop State Machine for next command cycle */
1191 pxa3xx_nand_stop(info);
1192 }
1193 }
1194 info->state = STATE_IDLE;
1195 }
1196
1197 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1198 const unsigned command,
1199 int column, int page_addr)
1200 {
1201 struct nand_chip *chip = mtd_to_nand(mtd);
1202 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1203 struct pxa3xx_nand_info *info = host->info_data;
1204 int exec_cmd, ext_cmd_type;
1205
1206 /*
1207 * if this is a x16 device then convert the input
1208 * "byte" address into a "word" address appropriate
1209 * for indexing a word-oriented device
1210 */
1211 if (info->reg_ndcr & NDCR_DWIDTH_M)
1212 column /= 2;
1213
1214 /*
1215 * There may be different NAND chip hooked to
1216 * different chip select, so check whether
1217 * chip select has been changed, if yes, reset the timing
1218 */
1219 if (info->cs != host->cs) {
1220 info->cs = host->cs;
1221 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1222 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1223 }
1224
1225 /* Select the extended command for the first command */
1226 switch (command) {
1227 case NAND_CMD_READ0:
1228 case NAND_CMD_READOOB:
1229 ext_cmd_type = EXT_CMD_TYPE_MONO;
1230 break;
1231 case NAND_CMD_SEQIN:
1232 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1233 break;
1234 case NAND_CMD_PAGEPROG:
1235 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1236 break;
1237 default:
1238 ext_cmd_type = 0;
1239 break;
1240 }
1241
1242 prepare_start_command(info, command);
1243
1244 /*
1245 * Prepare the "is ready" completion before starting a command
1246 * transaction sequence. If the command is not executed the
1247 * completion will be completed, see below.
1248 *
1249 * We can do that inside the loop because the command variable
1250 * is invariant and thus so is the exec_cmd.
1251 */
1252 info->need_wait = 1;
1253 init_completion(&info->dev_ready);
1254 do {
1255 info->state = STATE_PREPARED;
1256
1257 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1258 column, page_addr);
1259 if (!exec_cmd) {
1260 info->need_wait = 0;
1261 complete(&info->dev_ready);
1262 break;
1263 }
1264
1265 init_completion(&info->cmd_complete);
1266 pxa3xx_nand_start(info);
1267
1268 if (!wait_for_completion_timeout(&info->cmd_complete,
1269 CHIP_DELAY_TIMEOUT)) {
1270 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1271 /* Stop State Machine for next command cycle */
1272 pxa3xx_nand_stop(info);
1273 break;
1274 }
1275
1276 /* Only a few commands need several steps */
1277 if (command != NAND_CMD_PAGEPROG &&
1278 command != NAND_CMD_READ0 &&
1279 command != NAND_CMD_READOOB)
1280 break;
1281
1282 info->cur_chunk++;
1283
1284 /* Check if the sequence is complete */
1285 if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1286 break;
1287
1288 /*
1289 * After a splitted program command sequence has issued
1290 * the command dispatch, the command sequence is complete.
1291 */
1292 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1293 command == NAND_CMD_PAGEPROG &&
1294 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1295 break;
1296
1297 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1298 /* Last read: issue a 'last naked read' */
1299 if (info->cur_chunk == info->ntotalchunks - 1)
1300 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1301 else
1302 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1303
1304 /*
1305 * If a splitted program command has no more data to transfer,
1306 * the command dispatch must be issued to complete.
1307 */
1308 } else if (command == NAND_CMD_PAGEPROG &&
1309 info->cur_chunk == info->ntotalchunks) {
1310 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1311 }
1312 } while (1);
1313
1314 info->state = STATE_IDLE;
1315 }
1316
1317 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1318 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1319 int page)
1320 {
1321 chip->write_buf(mtd, buf, mtd->writesize);
1322 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1323
1324 return 0;
1325 }
1326
1327 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1328 struct nand_chip *chip, uint8_t *buf, int oob_required,
1329 int page)
1330 {
1331 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1332 struct pxa3xx_nand_info *info = host->info_data;
1333
1334 chip->read_buf(mtd, buf, mtd->writesize);
1335 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1336
1337 if (info->retcode == ERR_CORERR && info->use_ecc) {
1338 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1339
1340 } else if (info->retcode == ERR_UNCORERR) {
1341 /*
1342 * for blank page (all 0xff), HW will calculate its ECC as
1343 * 0, which is different from the ECC information within
1344 * OOB, ignore such uncorrectable errors
1345 */
1346 if (is_buf_blank(buf, mtd->writesize))
1347 info->retcode = ERR_NONE;
1348 else
1349 mtd->ecc_stats.failed++;
1350 }
1351
1352 return info->max_bitflips;
1353 }
1354
1355 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1356 {
1357 struct nand_chip *chip = mtd_to_nand(mtd);
1358 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1359 struct pxa3xx_nand_info *info = host->info_data;
1360 char retval = 0xFF;
1361
1362 if (info->buf_start < info->buf_count)
1363 /* Has just send a new command? */
1364 retval = info->data_buff[info->buf_start++];
1365
1366 return retval;
1367 }
1368
1369 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1370 {
1371 struct nand_chip *chip = mtd_to_nand(mtd);
1372 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1373 struct pxa3xx_nand_info *info = host->info_data;
1374 u16 retval = 0xFFFF;
1375
1376 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1377 retval = *((u16 *)(info->data_buff+info->buf_start));
1378 info->buf_start += 2;
1379 }
1380 return retval;
1381 }
1382
1383 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1384 {
1385 struct nand_chip *chip = mtd_to_nand(mtd);
1386 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1387 struct pxa3xx_nand_info *info = host->info_data;
1388 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1389
1390 memcpy(buf, info->data_buff + info->buf_start, real_len);
1391 info->buf_start += real_len;
1392 }
1393
1394 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1395 const uint8_t *buf, int len)
1396 {
1397 struct nand_chip *chip = mtd_to_nand(mtd);
1398 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1399 struct pxa3xx_nand_info *info = host->info_data;
1400 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1401
1402 memcpy(info->data_buff + info->buf_start, buf, real_len);
1403 info->buf_start += real_len;
1404 }
1405
1406 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1407 {
1408 return;
1409 }
1410
1411 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1412 {
1413 struct nand_chip *chip = mtd_to_nand(mtd);
1414 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1415 struct pxa3xx_nand_info *info = host->info_data;
1416
1417 if (info->need_wait) {
1418 info->need_wait = 0;
1419 if (!wait_for_completion_timeout(&info->dev_ready,
1420 CHIP_DELAY_TIMEOUT)) {
1421 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1422 return NAND_STATUS_FAIL;
1423 }
1424 }
1425
1426 /* pxa3xx_nand_send_command has waited for command complete */
1427 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1428 if (info->retcode == ERR_NONE)
1429 return 0;
1430 else
1431 return NAND_STATUS_FAIL;
1432 }
1433
1434 return NAND_STATUS_READY;
1435 }
1436
1437 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1438 {
1439 struct pxa3xx_nand_host *host = info->host[info->cs];
1440 struct platform_device *pdev = info->pdev;
1441 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1442 const struct nand_sdr_timings *timings;
1443
1444 /* Configure default flash values */
1445 info->chunk_size = PAGE_CHUNK_SIZE;
1446 info->reg_ndcr = 0x0; /* enable all interrupts */
1447 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1448 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1449 info->reg_ndcr |= NDCR_SPARE_EN;
1450
1451 /* use the common timing to make a try */
1452 timings = onfi_async_timing_mode_to_sdr_timings(0);
1453 if (IS_ERR(timings))
1454 return PTR_ERR(timings);
1455
1456 pxa3xx_nand_set_sdr_timing(host, timings);
1457 return 0;
1458 }
1459
1460 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1461 {
1462 struct pxa3xx_nand_host *host = info->host[info->cs];
1463 struct nand_chip *chip = &host->chip;
1464 struct mtd_info *mtd = nand_to_mtd(chip);
1465
1466 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1467 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1468 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1469 }
1470
1471 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1472 {
1473 struct platform_device *pdev = info->pdev;
1474 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1475 uint32_t ndcr = nand_readl(info, NDCR);
1476
1477 /* Set an initial chunk size */
1478 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1479 info->reg_ndcr = ndcr &
1480 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1481 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1482 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1483 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1484 }
1485
1486 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1487 {
1488 struct platform_device *pdev = info->pdev;
1489 struct dma_slave_config config;
1490 dma_cap_mask_t mask;
1491 struct pxad_param param;
1492 int ret;
1493
1494 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1495 if (info->data_buff == NULL)
1496 return -ENOMEM;
1497 if (use_dma == 0)
1498 return 0;
1499
1500 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1501 if (ret)
1502 return ret;
1503
1504 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1505 dma_cap_zero(mask);
1506 dma_cap_set(DMA_SLAVE, mask);
1507 param.prio = PXAD_PRIO_LOWEST;
1508 param.drcmr = info->drcmr_dat;
1509 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1510 &param, &pdev->dev,
1511 "data");
1512 if (!info->dma_chan) {
1513 dev_err(&pdev->dev, "unable to request data dma channel\n");
1514 return -ENODEV;
1515 }
1516
1517 memset(&config, 0, sizeof(config));
1518 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1519 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1520 config.src_addr = info->mmio_phys + NDDB;
1521 config.dst_addr = info->mmio_phys + NDDB;
1522 config.src_maxburst = 32;
1523 config.dst_maxburst = 32;
1524 ret = dmaengine_slave_config(info->dma_chan, &config);
1525 if (ret < 0) {
1526 dev_err(&info->pdev->dev,
1527 "dma channel configuration failed: %d\n",
1528 ret);
1529 return ret;
1530 }
1531
1532 /*
1533 * Now that DMA buffers are allocated we turn on
1534 * DMA proper for I/O operations.
1535 */
1536 info->use_dma = 1;
1537 return 0;
1538 }
1539
1540 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1541 {
1542 if (info->use_dma) {
1543 dmaengine_terminate_all(info->dma_chan);
1544 dma_release_channel(info->dma_chan);
1545 }
1546 kfree(info->data_buff);
1547 }
1548
1549 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1550 struct nand_ecc_ctrl *ecc,
1551 int strength, int ecc_stepsize, int page_size)
1552 {
1553 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1554 info->nfullchunks = 1;
1555 info->ntotalchunks = 1;
1556 info->chunk_size = 2048;
1557 info->spare_size = 40;
1558 info->ecc_size = 24;
1559 ecc->mode = NAND_ECC_HW;
1560 ecc->size = 512;
1561 ecc->strength = 1;
1562
1563 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1564 info->nfullchunks = 1;
1565 info->ntotalchunks = 1;
1566 info->chunk_size = 512;
1567 info->spare_size = 8;
1568 info->ecc_size = 8;
1569 ecc->mode = NAND_ECC_HW;
1570 ecc->size = 512;
1571 ecc->strength = 1;
1572
1573 /*
1574 * Required ECC: 4-bit correction per 512 bytes
1575 * Select: 16-bit correction per 2048 bytes
1576 */
1577 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1578 info->ecc_bch = 1;
1579 info->nfullchunks = 1;
1580 info->ntotalchunks = 1;
1581 info->chunk_size = 2048;
1582 info->spare_size = 32;
1583 info->ecc_size = 32;
1584 ecc->mode = NAND_ECC_HW;
1585 ecc->size = info->chunk_size;
1586 ecc->layout = &ecc_layout_2KB_bch4bit;
1587 ecc->strength = 16;
1588
1589 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1590 info->ecc_bch = 1;
1591 info->nfullchunks = 2;
1592 info->ntotalchunks = 2;
1593 info->chunk_size = 2048;
1594 info->spare_size = 32;
1595 info->ecc_size = 32;
1596 ecc->mode = NAND_ECC_HW;
1597 ecc->size = info->chunk_size;
1598 ecc->layout = &ecc_layout_4KB_bch4bit;
1599 ecc->strength = 16;
1600
1601 /*
1602 * Required ECC: 8-bit correction per 512 bytes
1603 * Select: 16-bit correction per 1024 bytes
1604 */
1605 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1606 info->ecc_bch = 1;
1607 info->nfullchunks = 4;
1608 info->ntotalchunks = 5;
1609 info->chunk_size = 1024;
1610 info->spare_size = 0;
1611 info->last_chunk_size = 0;
1612 info->last_spare_size = 64;
1613 info->ecc_size = 32;
1614 ecc->mode = NAND_ECC_HW;
1615 ecc->size = info->chunk_size;
1616 ecc->layout = &ecc_layout_4KB_bch8bit;
1617 ecc->strength = 16;
1618 } else {
1619 dev_err(&info->pdev->dev,
1620 "ECC strength %d at page size %d is not supported\n",
1621 strength, page_size);
1622 return -ENODEV;
1623 }
1624
1625 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1626 ecc->strength, ecc->size);
1627 return 0;
1628 }
1629
1630 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1631 {
1632 struct nand_chip *chip = mtd_to_nand(mtd);
1633 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1634 struct pxa3xx_nand_info *info = host->info_data;
1635 struct platform_device *pdev = info->pdev;
1636 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1637 int ret;
1638 uint16_t ecc_strength, ecc_step;
1639
1640 if (pdata->keep_config) {
1641 pxa3xx_nand_detect_config(info);
1642 } else {
1643 ret = pxa3xx_nand_config_ident(info);
1644 if (ret)
1645 return ret;
1646 }
1647
1648 if (info->reg_ndcr & NDCR_DWIDTH_M)
1649 chip->options |= NAND_BUSWIDTH_16;
1650
1651 /* Device detection must be done with ECC disabled */
1652 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1653 nand_writel(info, NDECCCTRL, 0x0);
1654
1655 if (nand_scan_ident(mtd, 1, NULL))
1656 return -ENODEV;
1657
1658 if (!pdata->keep_config) {
1659 ret = pxa3xx_nand_init(host);
1660 if (ret) {
1661 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1662 ret);
1663 return ret;
1664 }
1665 }
1666
1667 if (pdata->flash_bbt) {
1668 /*
1669 * We'll use a bad block table stored in-flash and don't
1670 * allow writing the bad block marker to the flash.
1671 */
1672 chip->bbt_options |= NAND_BBT_USE_FLASH |
1673 NAND_BBT_NO_OOB_BBM;
1674 chip->bbt_td = &bbt_main_descr;
1675 chip->bbt_md = &bbt_mirror_descr;
1676 }
1677
1678 /*
1679 * If the page size is bigger than the FIFO size, let's check
1680 * we are given the right variant and then switch to the extended
1681 * (aka splitted) command handling,
1682 */
1683 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1684 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1685 chip->cmdfunc = nand_cmdfunc_extended;
1686 } else {
1687 dev_err(&info->pdev->dev,
1688 "unsupported page size on this variant\n");
1689 return -ENODEV;
1690 }
1691 }
1692
1693 if (pdata->ecc_strength && pdata->ecc_step_size) {
1694 ecc_strength = pdata->ecc_strength;
1695 ecc_step = pdata->ecc_step_size;
1696 } else {
1697 ecc_strength = chip->ecc_strength_ds;
1698 ecc_step = chip->ecc_step_ds;
1699 }
1700
1701 /* Set default ECC strength requirements on non-ONFI devices */
1702 if (ecc_strength < 1 && ecc_step < 1) {
1703 ecc_strength = 1;
1704 ecc_step = 512;
1705 }
1706
1707 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1708 ecc_step, mtd->writesize);
1709 if (ret)
1710 return ret;
1711
1712 /* calculate addressing information */
1713 if (mtd->writesize >= 2048)
1714 host->col_addr_cycles = 2;
1715 else
1716 host->col_addr_cycles = 1;
1717
1718 /* release the initial buffer */
1719 kfree(info->data_buff);
1720
1721 /* allocate the real data + oob buffer */
1722 info->buf_size = mtd->writesize + mtd->oobsize;
1723 ret = pxa3xx_nand_init_buff(info);
1724 if (ret)
1725 return ret;
1726 info->oob_buff = info->data_buff + mtd->writesize;
1727
1728 if ((mtd->size >> chip->page_shift) > 65536)
1729 host->row_addr_cycles = 3;
1730 else
1731 host->row_addr_cycles = 2;
1732
1733 if (!pdata->keep_config)
1734 pxa3xx_nand_config_tail(info);
1735
1736 return nand_scan_tail(mtd);
1737 }
1738
1739 static int alloc_nand_resource(struct platform_device *pdev)
1740 {
1741 struct device_node *np = pdev->dev.of_node;
1742 struct pxa3xx_nand_platform_data *pdata;
1743 struct pxa3xx_nand_info *info;
1744 struct pxa3xx_nand_host *host;
1745 struct nand_chip *chip = NULL;
1746 struct mtd_info *mtd;
1747 struct resource *r;
1748 int ret, irq, cs;
1749
1750 pdata = dev_get_platdata(&pdev->dev);
1751 if (pdata->num_cs <= 0)
1752 return -ENODEV;
1753 info = devm_kzalloc(&pdev->dev,
1754 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1755 GFP_KERNEL);
1756 if (!info)
1757 return -ENOMEM;
1758
1759 info->pdev = pdev;
1760 info->variant = pxa3xx_nand_get_variant(pdev);
1761 for (cs = 0; cs < pdata->num_cs; cs++) {
1762 host = (void *)&info[1] + sizeof(*host) * cs;
1763 chip = &host->chip;
1764 nand_set_controller_data(chip, host);
1765 mtd = nand_to_mtd(chip);
1766 info->host[cs] = host;
1767 host->cs = cs;
1768 host->info_data = info;
1769 mtd->dev.parent = &pdev->dev;
1770 /* FIXME: all chips use the same device tree partitions */
1771 nand_set_flash_node(chip, np);
1772
1773 nand_set_controller_data(chip, host);
1774 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1775 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1776 chip->controller = &info->controller;
1777 chip->waitfunc = pxa3xx_nand_waitfunc;
1778 chip->select_chip = pxa3xx_nand_select_chip;
1779 chip->read_word = pxa3xx_nand_read_word;
1780 chip->read_byte = pxa3xx_nand_read_byte;
1781 chip->read_buf = pxa3xx_nand_read_buf;
1782 chip->write_buf = pxa3xx_nand_write_buf;
1783 chip->options |= NAND_NO_SUBPAGE_WRITE;
1784 chip->cmdfunc = nand_cmdfunc;
1785 }
1786
1787 spin_lock_init(&chip->controller->lock);
1788 init_waitqueue_head(&chip->controller->wq);
1789 info->clk = devm_clk_get(&pdev->dev, NULL);
1790 if (IS_ERR(info->clk)) {
1791 dev_err(&pdev->dev, "failed to get nand clock\n");
1792 return PTR_ERR(info->clk);
1793 }
1794 ret = clk_prepare_enable(info->clk);
1795 if (ret < 0)
1796 return ret;
1797
1798 if (use_dma) {
1799 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1800 if (r == NULL) {
1801 dev_err(&pdev->dev,
1802 "no resource defined for data DMA\n");
1803 ret = -ENXIO;
1804 goto fail_disable_clk;
1805 }
1806 info->drcmr_dat = r->start;
1807
1808 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1809 if (r == NULL) {
1810 dev_err(&pdev->dev,
1811 "no resource defined for cmd DMA\n");
1812 ret = -ENXIO;
1813 goto fail_disable_clk;
1814 }
1815 info->drcmr_cmd = r->start;
1816 }
1817
1818 irq = platform_get_irq(pdev, 0);
1819 if (irq < 0) {
1820 dev_err(&pdev->dev, "no IRQ resource defined\n");
1821 ret = -ENXIO;
1822 goto fail_disable_clk;
1823 }
1824
1825 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1826 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1827 if (IS_ERR(info->mmio_base)) {
1828 ret = PTR_ERR(info->mmio_base);
1829 goto fail_disable_clk;
1830 }
1831 info->mmio_phys = r->start;
1832
1833 /* Allocate a buffer to allow flash detection */
1834 info->buf_size = INIT_BUFFER_SIZE;
1835 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1836 if (info->data_buff == NULL) {
1837 ret = -ENOMEM;
1838 goto fail_disable_clk;
1839 }
1840
1841 /* initialize all interrupts to be disabled */
1842 disable_int(info, NDSR_MASK);
1843
1844 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1845 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1846 pdev->name, info);
1847 if (ret < 0) {
1848 dev_err(&pdev->dev, "failed to request IRQ\n");
1849 goto fail_free_buf;
1850 }
1851
1852 platform_set_drvdata(pdev, info);
1853
1854 return 0;
1855
1856 fail_free_buf:
1857 free_irq(irq, info);
1858 kfree(info->data_buff);
1859 fail_disable_clk:
1860 clk_disable_unprepare(info->clk);
1861 return ret;
1862 }
1863
1864 static int pxa3xx_nand_remove(struct platform_device *pdev)
1865 {
1866 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1867 struct pxa3xx_nand_platform_data *pdata;
1868 int irq, cs;
1869
1870 if (!info)
1871 return 0;
1872
1873 pdata = dev_get_platdata(&pdev->dev);
1874
1875 irq = platform_get_irq(pdev, 0);
1876 if (irq >= 0)
1877 free_irq(irq, info);
1878 pxa3xx_nand_free_buff(info);
1879
1880 /*
1881 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1882 * In order to prevent a lockup of the system bus, the DFI bus
1883 * arbitration is granted to SMC upon driver removal. This is done by
1884 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1885 * access to the bus anymore.
1886 */
1887 nand_writel(info, NDCR,
1888 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1889 NFCV1_NDCR_ARB_CNTL);
1890 clk_disable_unprepare(info->clk);
1891
1892 for (cs = 0; cs < pdata->num_cs; cs++)
1893 nand_release(nand_to_mtd(&info->host[cs]->chip));
1894 return 0;
1895 }
1896
1897 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1898 {
1899 struct pxa3xx_nand_platform_data *pdata;
1900 struct device_node *np = pdev->dev.of_node;
1901 const struct of_device_id *of_id =
1902 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1903
1904 if (!of_id)
1905 return 0;
1906
1907 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1908 if (!pdata)
1909 return -ENOMEM;
1910
1911 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1912 pdata->enable_arbiter = 1;
1913 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1914 pdata->keep_config = 1;
1915 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1916 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1917
1918 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1919 if (pdata->ecc_strength < 0)
1920 pdata->ecc_strength = 0;
1921
1922 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1923 if (pdata->ecc_step_size < 0)
1924 pdata->ecc_step_size = 0;
1925
1926 pdev->dev.platform_data = pdata;
1927
1928 return 0;
1929 }
1930
1931 static int pxa3xx_nand_probe(struct platform_device *pdev)
1932 {
1933 struct pxa3xx_nand_platform_data *pdata;
1934 struct pxa3xx_nand_info *info;
1935 int ret, cs, probe_success, dma_available;
1936
1937 dma_available = IS_ENABLED(CONFIG_ARM) &&
1938 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1939 if (use_dma && !dma_available) {
1940 use_dma = 0;
1941 dev_warn(&pdev->dev,
1942 "This platform can't do DMA on this device\n");
1943 }
1944
1945 ret = pxa3xx_nand_probe_dt(pdev);
1946 if (ret)
1947 return ret;
1948
1949 pdata = dev_get_platdata(&pdev->dev);
1950 if (!pdata) {
1951 dev_err(&pdev->dev, "no platform data defined\n");
1952 return -ENODEV;
1953 }
1954
1955 ret = alloc_nand_resource(pdev);
1956 if (ret) {
1957 dev_err(&pdev->dev, "alloc nand resource failed\n");
1958 return ret;
1959 }
1960
1961 info = platform_get_drvdata(pdev);
1962 probe_success = 0;
1963 for (cs = 0; cs < pdata->num_cs; cs++) {
1964 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1965
1966 /*
1967 * The mtd name matches the one used in 'mtdparts' kernel
1968 * parameter. This name cannot be changed or otherwise
1969 * user's mtd partitions configuration would get broken.
1970 */
1971 mtd->name = "pxa3xx_nand-0";
1972 info->cs = cs;
1973 ret = pxa3xx_nand_scan(mtd);
1974 if (ret) {
1975 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1976 cs);
1977 continue;
1978 }
1979
1980 ret = mtd_device_register(mtd, pdata->parts[cs],
1981 pdata->nr_parts[cs]);
1982 if (!ret)
1983 probe_success = 1;
1984 }
1985
1986 if (!probe_success) {
1987 pxa3xx_nand_remove(pdev);
1988 return -ENODEV;
1989 }
1990
1991 return 0;
1992 }
1993
1994 #ifdef CONFIG_PM
1995 static int pxa3xx_nand_suspend(struct device *dev)
1996 {
1997 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1998
1999 if (info->state) {
2000 dev_err(dev, "driver busy, state = %d\n", info->state);
2001 return -EAGAIN;
2002 }
2003
2004 clk_disable(info->clk);
2005 return 0;
2006 }
2007
2008 static int pxa3xx_nand_resume(struct device *dev)
2009 {
2010 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2011 int ret;
2012
2013 ret = clk_enable(info->clk);
2014 if (ret < 0)
2015 return ret;
2016
2017 /* We don't want to handle interrupt without calling mtd routine */
2018 disable_int(info, NDCR_INT_MASK);
2019
2020 /*
2021 * Directly set the chip select to a invalid value,
2022 * then the driver would reset the timing according
2023 * to current chip select at the beginning of cmdfunc
2024 */
2025 info->cs = 0xff;
2026
2027 /*
2028 * As the spec says, the NDSR would be updated to 0x1800 when
2029 * doing the nand_clk disable/enable.
2030 * To prevent it damaging state machine of the driver, clear
2031 * all status before resume
2032 */
2033 nand_writel(info, NDSR, NDSR_MASK);
2034
2035 return 0;
2036 }
2037 #else
2038 #define pxa3xx_nand_suspend NULL
2039 #define pxa3xx_nand_resume NULL
2040 #endif
2041
2042 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
2043 .suspend = pxa3xx_nand_suspend,
2044 .resume = pxa3xx_nand_resume,
2045 };
2046
2047 static struct platform_driver pxa3xx_nand_driver = {
2048 .driver = {
2049 .name = "pxa3xx-nand",
2050 .of_match_table = pxa3xx_nand_dt_ids,
2051 .pm = &pxa3xx_nand_pm_ops,
2052 },
2053 .probe = pxa3xx_nand_probe,
2054 .remove = pxa3xx_nand_remove,
2055 };
2056
2057 module_platform_driver(pxa3xx_nand_driver);
2058
2059 MODULE_LICENSE("GPL");
2060 MODULE_DESCRIPTION("PXA3xx NAND controller driver");
This page took 0.074912 seconds and 6 git commands to generate.