Merge branch 'omap-for-v4.8/legacy' into for-next
[deliverable/linux.git] / drivers / mtd / nand / sh_flctl.c
CommitLineData
6028aa01
YS
1/*
2 * SuperH FLCTL nand controller
3 *
b79c7adf
MD
4 * Copyright (c) 2008 Renesas Solutions Corp.
5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6028aa01 6 *
b79c7adf 7 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
6028aa01
YS
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
83738d87 26#include <linux/completion.h>
6028aa01 27#include <linux/delay.h>
83738d87
BH
28#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h>
3c7ea4ec 30#include <linux/interrupt.h>
6028aa01 31#include <linux/io.h>
7c8f680e
BH
32#include <linux/of.h>
33#include <linux/of_device.h>
6028aa01 34#include <linux/platform_device.h>
cfe78194 35#include <linux/pm_runtime.h>
83738d87 36#include <linux/sh_dma.h>
5a0e3ad6 37#include <linux/slab.h>
d76236f3 38#include <linux/string.h>
6028aa01
YS
39
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/nand.h>
42#include <linux/mtd/partitions.h>
43#include <linux/mtd/sh_flctl.h>
44
e7049f29
BB
45static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
46 struct mtd_oob_region *oobregion)
47{
48 struct nand_chip *chip = mtd_to_nand(mtd);
49
50 if (section)
51 return -ERANGE;
52
53 oobregion->offset = 0;
54 oobregion->length = chip->ecc.bytes;
55
56 return 0;
57}
58
59static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
60 struct mtd_oob_region *oobregion)
61{
62 if (section)
63 return -ERANGE;
64
65 oobregion->offset = 12;
66 oobregion->length = 4;
67
68 return 0;
69}
70
71static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
72 .ecc = flctl_4secc_ooblayout_sp_ecc,
73 .free = flctl_4secc_ooblayout_sp_free,
6028aa01
YS
74};
75
e7049f29
BB
76static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
77 struct mtd_oob_region *oobregion)
78{
79 struct nand_chip *chip = mtd_to_nand(mtd);
80
81 if (section >= chip->ecc.steps)
82 return -ERANGE;
83
84 oobregion->offset = (section * 16) + 6;
85 oobregion->length = chip->ecc.bytes;
86
87 return 0;
88}
89
90static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
91 struct mtd_oob_region *oobregion)
92{
93 struct nand_chip *chip = mtd_to_nand(mtd);
94
95 if (section >= chip->ecc.steps)
96 return -ERANGE;
97
98 oobregion->offset = section * 16;
99 oobregion->length = 6;
100
101 if (!section) {
102 oobregion->offset += 2;
103 oobregion->length -= 2;
104 }
105
106 return 0;
107}
108
109static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
110 .ecc = flctl_4secc_ooblayout_lp_ecc,
111 .free = flctl_4secc_ooblayout_lp_free,
6028aa01
YS
112};
113
114static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
115
116static struct nand_bbt_descr flctl_4secc_smallpage = {
117 .options = NAND_BBT_SCAN2NDPAGE,
118 .offs = 11,
119 .len = 1,
120 .pattern = scan_ff_pattern,
121};
122
123static struct nand_bbt_descr flctl_4secc_largepage = {
c0e6616a 124 .options = NAND_BBT_SCAN2NDPAGE,
aa32d1f0 125 .offs = 0,
6028aa01
YS
126 .len = 2,
127 .pattern = scan_ff_pattern,
128};
129
130static void empty_fifo(struct sh_flctl *flctl)
131{
3c7ea4ec
BH
132 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
133 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
6028aa01
YS
134}
135
136static void start_translation(struct sh_flctl *flctl)
137{
138 writeb(TRSTRT, FLTRCR(flctl));
139}
140
b79c7adf
MD
141static void timeout_error(struct sh_flctl *flctl, const char *str)
142{
25985edc 143 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
b79c7adf
MD
144}
145
6028aa01
YS
146static void wait_completion(struct sh_flctl *flctl)
147{
148 uint32_t timeout = LOOP_TIMEOUT_MAX;
149
150 while (timeout--) {
151 if (readb(FLTRCR(flctl)) & TREND) {
152 writeb(0x0, FLTRCR(flctl));
153 return;
154 }
155 udelay(1);
156 }
157
b79c7adf 158 timeout_error(flctl, __func__);
6028aa01
YS
159 writeb(0x0, FLTRCR(flctl));
160}
161
83738d87
BH
162static void flctl_dma_complete(void *param)
163{
164 struct sh_flctl *flctl = param;
165
166 complete(&flctl->dma_complete);
167}
168
169static void flctl_release_dma(struct sh_flctl *flctl)
170{
171 if (flctl->chan_fifo0_rx) {
172 dma_release_channel(flctl->chan_fifo0_rx);
173 flctl->chan_fifo0_rx = NULL;
174 }
175 if (flctl->chan_fifo0_tx) {
176 dma_release_channel(flctl->chan_fifo0_tx);
177 flctl->chan_fifo0_tx = NULL;
178 }
179}
180
181static void flctl_setup_dma(struct sh_flctl *flctl)
182{
183 dma_cap_mask_t mask;
184 struct dma_slave_config cfg;
185 struct platform_device *pdev = flctl->pdev;
453810b7 186 struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
83738d87
BH
187 int ret;
188
189 if (!pdata)
190 return;
191
192 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
193 return;
194
195 /* We can only either use DMA for both Tx and Rx or not use it at all */
196 dma_cap_zero(mask);
197 dma_cap_set(DMA_SLAVE, mask);
198
199 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
82ae816e 200 (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
83738d87
BH
201 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
202 flctl->chan_fifo0_tx);
203
204 if (!flctl->chan_fifo0_tx)
205 return;
206
207 memset(&cfg, 0, sizeof(cfg));
83738d87 208 cfg.direction = DMA_MEM_TO_DEV;
1873315f 209 cfg.dst_addr = flctl->fifo;
83738d87
BH
210 cfg.src_addr = 0;
211 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
212 if (ret < 0)
213 goto err;
214
215 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
82ae816e 216 (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
83738d87
BH
217 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
218 flctl->chan_fifo0_rx);
219
220 if (!flctl->chan_fifo0_rx)
221 goto err;
222
83738d87
BH
223 cfg.direction = DMA_DEV_TO_MEM;
224 cfg.dst_addr = 0;
1873315f 225 cfg.src_addr = flctl->fifo;
83738d87
BH
226 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
227 if (ret < 0)
228 goto err;
229
230 init_completion(&flctl->dma_complete);
231
232 return;
233
234err:
235 flctl_release_dma(flctl);
236}
237
6028aa01
YS
238static void set_addr(struct mtd_info *mtd, int column, int page_addr)
239{
240 struct sh_flctl *flctl = mtd_to_flctl(mtd);
241 uint32_t addr = 0;
242
243 if (column == -1) {
244 addr = page_addr; /* ERASE1 */
245 } else if (page_addr != -1) {
246 /* SEQIN, READ0, etc.. */
010ab820
MD
247 if (flctl->chip.options & NAND_BUSWIDTH_16)
248 column >>= 1;
6028aa01
YS
249 if (flctl->page_size) {
250 addr = column & 0x0FFF;
251 addr |= (page_addr & 0xff) << 16;
252 addr |= ((page_addr >> 8) & 0xff) << 24;
253 /* big than 128MB */
254 if (flctl->rw_ADRCNT == ADRCNT2_E) {
255 uint32_t addr2;
256 addr2 = (page_addr >> 16) & 0xff;
257 writel(addr2, FLADR2(flctl));
258 }
259 } else {
260 addr = column;
261 addr |= (page_addr & 0xff) << 8;
262 addr |= ((page_addr >> 8) & 0xff) << 16;
263 addr |= ((page_addr >> 16) & 0xff) << 24;
264 }
265 }
266 writel(addr, FLADR(flctl));
267}
268
269static void wait_rfifo_ready(struct sh_flctl *flctl)
270{
271 uint32_t timeout = LOOP_TIMEOUT_MAX;
272
273 while (timeout--) {
274 uint32_t val;
275 /* check FIFO */
276 val = readl(FLDTCNTR(flctl)) >> 16;
277 if (val & 0xFF)
278 return;
279 udelay(1);
280 }
b79c7adf 281 timeout_error(flctl, __func__);
6028aa01
YS
282}
283
284static void wait_wfifo_ready(struct sh_flctl *flctl)
285{
286 uint32_t len, timeout = LOOP_TIMEOUT_MAX;
287
288 while (timeout--) {
289 /* check FIFO */
290 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
291 if (len >= 4)
292 return;
293 udelay(1);
294 }
b79c7adf 295 timeout_error(flctl, __func__);
6028aa01
YS
296}
297
6667a6d5
BH
298static enum flctl_ecc_res_t wait_recfifo_ready
299 (struct sh_flctl *flctl, int sector_number)
6028aa01
YS
300{
301 uint32_t timeout = LOOP_TIMEOUT_MAX;
6028aa01
YS
302 void __iomem *ecc_reg[4];
303 int i;
6667a6d5 304 int state = FL_SUCCESS;
6028aa01
YS
305 uint32_t data, size;
306
6667a6d5
BH
307 /*
308 * First this loops checks in FLDTCNTR if we are ready to read out the
309 * oob data. This is the case if either all went fine without errors or
310 * if the bottom part of the loop corrected the errors or marked them as
311 * uncorrectable and the controller is given time to push the data into
312 * the FIFO.
313 */
6028aa01 314 while (timeout--) {
6667a6d5 315 /* check if all is ok and we can read out the OOB */
6028aa01 316 size = readl(FLDTCNTR(flctl)) >> 24;
6667a6d5
BH
317 if ((size & 0xFF) == 4)
318 return state;
319
320 /* check if a correction code has been calculated */
321 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
322 /*
323 * either we wait for the fifo to be filled or a
324 * correction pattern is being generated
325 */
326 udelay(1);
327 continue;
328 }
6028aa01 329
6667a6d5
BH
330 /* check for an uncorrectable error */
331 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
332 /* check if we face a non-empty page */
333 for (i = 0; i < 512; i++) {
334 if (flctl->done_buff[i] != 0xff) {
335 state = FL_ERROR; /* can't correct */
336 break;
337 }
338 }
6028aa01 339
6667a6d5
BH
340 if (state == FL_SUCCESS)
341 dev_dbg(&flctl->pdev->dev,
342 "reading empty sector %d, ecc error ignored\n",
343 sector_number);
344
345 writel(0, FL4ECCCR(flctl));
6028aa01 346 continue;
6667a6d5 347 }
6028aa01
YS
348
349 /* start error correction */
350 ecc_reg[0] = FL4ECCRESULT0(flctl);
351 ecc_reg[1] = FL4ECCRESULT1(flctl);
352 ecc_reg[2] = FL4ECCRESULT2(flctl);
353 ecc_reg[3] = FL4ECCRESULT3(flctl);
354
355 for (i = 0; i < 3; i++) {
6667a6d5 356 uint8_t org;
e8a9d8f3 357 unsigned int index;
6667a6d5 358
6028aa01 359 data = readl(ecc_reg[i]);
6028aa01 360
6667a6d5
BH
361 if (flctl->page_size)
362 index = (512 * sector_number) +
363 (data >> 16);
364 else
365 index = data >> 16;
366
367 org = flctl->done_buff[index];
368 flctl->done_buff[index] = org ^ (data & 0xFF);
369 }
370 state = FL_REPAIRABLE;
6028aa01
YS
371 writel(0, FL4ECCCR(flctl));
372 }
373
b79c7adf 374 timeout_error(flctl, __func__);
6667a6d5 375 return FL_TIMEOUT; /* timeout */
6028aa01
YS
376}
377
378static void wait_wecfifo_ready(struct sh_flctl *flctl)
379{
380 uint32_t timeout = LOOP_TIMEOUT_MAX;
381 uint32_t len;
382
383 while (timeout--) {
384 /* check FLECFIFO */
385 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
386 if (len >= 4)
387 return;
388 udelay(1);
389 }
b79c7adf 390 timeout_error(flctl, __func__);
6028aa01
YS
391}
392
83738d87
BH
393static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
394 int len, enum dma_data_direction dir)
395{
396 struct dma_async_tx_descriptor *desc = NULL;
397 struct dma_chan *chan;
398 enum dma_transfer_direction tr_dir;
399 dma_addr_t dma_addr;
400 dma_cookie_t cookie = -EINVAL;
401 uint32_t reg;
402 int ret;
403
404 if (dir == DMA_FROM_DEVICE) {
405 chan = flctl->chan_fifo0_rx;
406 tr_dir = DMA_DEV_TO_MEM;
407 } else {
408 chan = flctl->chan_fifo0_tx;
409 tr_dir = DMA_MEM_TO_DEV;
410 }
411
412 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
413
414 if (dma_addr)
415 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
416 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
417
418 if (desc) {
419 reg = readl(FLINTDMACR(flctl));
420 reg |= DREQ0EN;
421 writel(reg, FLINTDMACR(flctl));
422
423 desc->callback = flctl_dma_complete;
424 desc->callback_param = flctl;
425 cookie = dmaengine_submit(desc);
426
427 dma_async_issue_pending(chan);
428 } else {
429 /* DMA failed, fall back to PIO */
430 flctl_release_dma(flctl);
431 dev_warn(&flctl->pdev->dev,
432 "DMA failed, falling back to PIO\n");
433 ret = -EIO;
434 goto out;
435 }
436
437 ret =
438 wait_for_completion_timeout(&flctl->dma_complete,
439 msecs_to_jiffies(3000));
440
441 if (ret <= 0) {
0e497c36 442 dmaengine_terminate_all(chan);
83738d87
BH
443 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
444 }
445
446out:
447 reg = readl(FLINTDMACR(flctl));
448 reg &= ~DREQ0EN;
449 writel(reg, FLINTDMACR(flctl));
450
451 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
452
453 /* ret > 0 is success */
454 return ret;
455}
456
6028aa01
YS
457static void read_datareg(struct sh_flctl *flctl, int offset)
458{
459 unsigned long data;
460 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
461
462 wait_completion(flctl);
463
464 data = readl(FLDATAR(flctl));
465 *buf = le32_to_cpu(data);
466}
467
468static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
469{
470 int i, len_4align;
471 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
6028aa01
YS
472
473 len_4align = (rlen + 3) / 4;
474
83738d87
BH
475 /* initiate DMA transfer */
476 if (flctl->chan_fifo0_rx && rlen >= 32 &&
477 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
478 goto convert; /* DMA success */
479
480 /* do polling transfer */
6028aa01
YS
481 for (i = 0; i < len_4align; i++) {
482 wait_rfifo_ready(flctl);
3166df0d 483 buf[i] = readl(FLDTFIFO(flctl));
6028aa01 484 }
83738d87
BH
485
486convert:
487 for (i = 0; i < len_4align; i++)
488 buf[i] = be32_to_cpu(buf[i]);
6028aa01
YS
489}
490
6667a6d5
BH
491static enum flctl_ecc_res_t read_ecfiforeg
492 (struct sh_flctl *flctl, uint8_t *buff, int sector)
6028aa01
YS
493{
494 int i;
6667a6d5 495 enum flctl_ecc_res_t res;
6028aa01 496 unsigned long *ecc_buf = (unsigned long *)buff;
6028aa01 497
6667a6d5
BH
498 res = wait_recfifo_ready(flctl , sector);
499
500 if (res != FL_ERROR) {
501 for (i = 0; i < 4; i++) {
502 ecc_buf[i] = readl(FLECFIFO(flctl));
503 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
504 }
6028aa01
YS
505 }
506
6667a6d5 507 return res;
6028aa01
YS
508}
509
e8a9d8f3
BH
510static void write_fiforeg(struct sh_flctl *flctl, int rlen,
511 unsigned int offset)
6028aa01
YS
512{
513 int i, len_4align;
e8a9d8f3 514 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
6028aa01
YS
515
516 len_4align = (rlen + 3) / 4;
517 for (i = 0; i < len_4align; i++) {
518 wait_wfifo_ready(flctl);
e8a9d8f3 519 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
6028aa01
YS
520 }
521}
522
e8a9d8f3
BH
523static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
524 unsigned int offset)
3166df0d
BH
525{
526 int i, len_4align;
e8a9d8f3 527 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
3166df0d
BH
528
529 len_4align = (rlen + 3) / 4;
83738d87
BH
530
531 for (i = 0; i < len_4align; i++)
532 buf[i] = cpu_to_be32(buf[i]);
533
534 /* initiate DMA transfer */
535 if (flctl->chan_fifo0_tx && rlen >= 32 &&
536 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
537 return; /* DMA success */
538
539 /* do polling transfer */
3166df0d
BH
540 for (i = 0; i < len_4align; i++) {
541 wait_wecfifo_ready(flctl);
83738d87 542 writel(buf[i], FLECFIFO(flctl));
3166df0d
BH
543 }
544}
545
6028aa01
YS
546static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
547{
548 struct sh_flctl *flctl = mtd_to_flctl(mtd);
0b3f0d12 549 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
6028aa01
YS
550 uint32_t flcmdcr_val, addr_len_bytes = 0;
551
552 /* Set SNAND bit if page size is 2048byte */
553 if (flctl->page_size)
554 flcmncr_val |= SNAND_E;
555 else
556 flcmncr_val &= ~SNAND_E;
557
558 /* default FLCMDCR val */
559 flcmdcr_val = DOCMD1_E | DOADR_E;
560
561 /* Set for FLCMDCR */
562 switch (cmd) {
563 case NAND_CMD_ERASE1:
564 addr_len_bytes = flctl->erase_ADRCNT;
565 flcmdcr_val |= DOCMD2_E;
566 break;
567 case NAND_CMD_READ0:
568 case NAND_CMD_READOOB:
dd5ab248 569 case NAND_CMD_RNDOUT:
6028aa01
YS
570 addr_len_bytes = flctl->rw_ADRCNT;
571 flcmdcr_val |= CDSRC_E;
010ab820
MD
572 if (flctl->chip.options & NAND_BUSWIDTH_16)
573 flcmncr_val |= SEL_16BIT;
6028aa01
YS
574 break;
575 case NAND_CMD_SEQIN:
576 /* This case is that cmd is READ0 or READ1 or READ00 */
577 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
578 break;
579 case NAND_CMD_PAGEPROG:
580 addr_len_bytes = flctl->rw_ADRCNT;
35a34799 581 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
010ab820
MD
582 if (flctl->chip.options & NAND_BUSWIDTH_16)
583 flcmncr_val |= SEL_16BIT;
35a34799
YS
584 break;
585 case NAND_CMD_READID:
586 flcmncr_val &= ~SNAND_E;
7b6b2303 587 flcmdcr_val |= CDSRC_E;
35a34799
YS
588 addr_len_bytes = ADRCNT_1;
589 break;
590 case NAND_CMD_STATUS:
591 case NAND_CMD_RESET:
592 flcmncr_val &= ~SNAND_E;
593 flcmdcr_val &= ~(DOADR_E | DOSR_E);
594 break;
595 default:
596 break;
597 }
598
599 /* Set address bytes parameter */
600 flcmdcr_val |= addr_len_bytes;
601
602 /* Now actually write */
603 writel(flcmncr_val, FLCMNCR(flctl));
604 writel(flcmdcr_val, FLCMDCR(flctl));
605 writel(flcmcdr_val, FLCMCDR(flctl));
606}
607
608static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1fbb938d 609 uint8_t *buf, int oob_required, int page)
35a34799 610{
50ed399c 611 chip->read_buf(mtd, buf, mtd->writesize);
894824f9
BH
612 if (oob_required)
613 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
35a34799
YS
614 return 0;
615}
616
fdbad98d 617static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
45aaeff9
BB
618 const uint8_t *buf, int oob_required,
619 int page)
35a34799 620{
50ed399c 621 chip->write_buf(mtd, buf, mtd->writesize);
3166df0d 622 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d 623 return 0;
35a34799
YS
624}
625
626static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
627{
628 struct sh_flctl *flctl = mtd_to_flctl(mtd);
629 int sector, page_sectors;
6667a6d5 630 enum flctl_ecc_res_t ecc_result;
35a34799 631
623c55ca
BH
632 page_sectors = flctl->page_size ? 4 : 1;
633
634 set_cmd_regs(mtd, NAND_CMD_READ0,
635 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
35a34799
YS
636
637 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
638 FLCMNCR(flctl));
623c55ca
BH
639 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
640 writel(page_addr << 2, FLADR(flctl));
35a34799 641
623c55ca
BH
642 empty_fifo(flctl);
643 start_translation(flctl);
35a34799
YS
644
645 for (sector = 0; sector < page_sectors; sector++) {
35a34799
YS
646 read_fiforeg(flctl, 512, 512 * sector);
647
6667a6d5 648 ecc_result = read_ecfiforeg(flctl,
c0e6616a
YS
649 &flctl->done_buff[mtd->writesize + 16 * sector],
650 sector);
35a34799 651
6667a6d5
BH
652 switch (ecc_result) {
653 case FL_REPAIRABLE:
654 dev_info(&flctl->pdev->dev,
655 "applied ecc on page 0x%x", page_addr);
9c9eef89 656 mtd->ecc_stats.corrected++;
6667a6d5
BH
657 break;
658 case FL_ERROR:
659 dev_warn(&flctl->pdev->dev,
660 "page 0x%x contains corrupted data\n",
661 page_addr);
9c9eef89 662 mtd->ecc_stats.failed++;
6667a6d5
BH
663 break;
664 default:
665 ;
666 }
35a34799 667 }
623c55ca
BH
668
669 wait_completion(flctl);
670
35a34799
YS
671 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
672 FLCMNCR(flctl));
673}
674
675static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
676{
677 struct sh_flctl *flctl = mtd_to_flctl(mtd);
ef4ce0bc
BH
678 int page_sectors = flctl->page_size ? 4 : 1;
679 int i;
35a34799
YS
680
681 set_cmd_regs(mtd, NAND_CMD_READ0,
682 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
683
684 empty_fifo(flctl);
35a34799 685
ef4ce0bc
BH
686 for (i = 0; i < page_sectors; i++) {
687 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
35a34799
YS
688 writel(16, FLDTCNTR(flctl));
689
690 start_translation(flctl);
ef4ce0bc 691 read_fiforeg(flctl, 16, 16 * i);
35a34799
YS
692 wait_completion(flctl);
693 }
694}
695
696static void execmd_write_page_sector(struct mtd_info *mtd)
697{
698 struct sh_flctl *flctl = mtd_to_flctl(mtd);
3166df0d 699 int page_addr = flctl->seqin_page_addr;
35a34799
YS
700 int sector, page_sectors;
701
623c55ca 702 page_sectors = flctl->page_size ? 4 : 1;
35a34799
YS
703
704 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
705 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
706
623c55ca
BH
707 empty_fifo(flctl);
708 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
709 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
710 writel(page_addr << 2, FLADR(flctl));
711 start_translation(flctl);
35a34799 712
623c55ca 713 for (sector = 0; sector < page_sectors; sector++) {
35a34799 714 write_fiforeg(flctl, 512, 512 * sector);
3166df0d 715 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
35a34799
YS
716 }
717
623c55ca 718 wait_completion(flctl);
35a34799
YS
719 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
720}
721
722static void execmd_write_oob(struct mtd_info *mtd)
723{
724 struct sh_flctl *flctl = mtd_to_flctl(mtd);
725 int page_addr = flctl->seqin_page_addr;
726 int sector, page_sectors;
727
ef4ce0bc 728 page_sectors = flctl->page_size ? 4 : 1;
35a34799
YS
729
730 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
731 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
732
ef4ce0bc 733 for (sector = 0; sector < page_sectors; sector++) {
35a34799
YS
734 empty_fifo(flctl);
735 set_addr(mtd, sector * 528 + 512, page_addr);
736 writel(16, FLDTCNTR(flctl)); /* set read size */
737
738 start_translation(flctl);
739 write_fiforeg(flctl, 16, 16 * sector);
740 wait_completion(flctl);
741 }
742}
743
744static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
745 int column, int page_addr)
746{
747 struct sh_flctl *flctl = mtd_to_flctl(mtd);
748 uint32_t read_cmd = 0;
749
cfe78194
BH
750 pm_runtime_get_sync(&flctl->pdev->dev);
751
35a34799
YS
752 flctl->read_bytes = 0;
753 if (command != NAND_CMD_PAGEPROG)
754 flctl->index = 0;
755
756 switch (command) {
757 case NAND_CMD_READ1:
758 case NAND_CMD_READ0:
759 if (flctl->hwecc) {
760 /* read page with hwecc */
761 execmd_read_page_sector(mtd, page_addr);
762 break;
763 }
35a34799
YS
764 if (flctl->page_size)
765 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
766 | command);
767 else
768 set_cmd_regs(mtd, command, command);
769
770 set_addr(mtd, 0, page_addr);
771
772 flctl->read_bytes = mtd->writesize + mtd->oobsize;
010ab820
MD
773 if (flctl->chip.options & NAND_BUSWIDTH_16)
774 column >>= 1;
35a34799
YS
775 flctl->index += column;
776 goto read_normal_exit;
777
778 case NAND_CMD_READOOB:
779 if (flctl->hwecc) {
780 /* read page with hwecc */
781 execmd_read_oob(mtd, page_addr);
782 break;
783 }
784
35a34799
YS
785 if (flctl->page_size) {
786 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
787 | NAND_CMD_READ0);
788 set_addr(mtd, mtd->writesize, page_addr);
789 } else {
790 set_cmd_regs(mtd, command, command);
791 set_addr(mtd, 0, page_addr);
792 }
793 flctl->read_bytes = mtd->oobsize;
794 goto read_normal_exit;
795
dd5ab248
BH
796 case NAND_CMD_RNDOUT:
797 if (flctl->hwecc)
798 break;
799
800 if (flctl->page_size)
801 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
802 | command);
803 else
804 set_cmd_regs(mtd, command, command);
805
806 set_addr(mtd, column, 0);
807
808 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
809 goto read_normal_exit;
810
35a34799 811 case NAND_CMD_READID:
35a34799 812 set_cmd_regs(mtd, command, command);
35a34799 813
7b6b2303
BH
814 /* READID is always performed using an 8-bit bus */
815 if (flctl->chip.options & NAND_BUSWIDTH_16)
816 column <<= 1;
817 set_addr(mtd, column, 0);
818
819 flctl->read_bytes = 8;
35a34799 820 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
abb59ef3 821 empty_fifo(flctl);
35a34799 822 start_translation(flctl);
7b6b2303
BH
823 read_fiforeg(flctl, flctl->read_bytes, 0);
824 wait_completion(flctl);
35a34799
YS
825 break;
826
827 case NAND_CMD_ERASE1:
828 flctl->erase1_page_addr = page_addr;
829 break;
830
831 case NAND_CMD_ERASE2:
832 set_cmd_regs(mtd, NAND_CMD_ERASE1,
833 (command << 8) | NAND_CMD_ERASE1);
834 set_addr(mtd, -1, flctl->erase1_page_addr);
835 start_translation(flctl);
836 wait_completion(flctl);
837 break;
838
839 case NAND_CMD_SEQIN:
840 if (!flctl->page_size) {
841 /* output read command */
842 if (column >= mtd->writesize) {
843 column -= mtd->writesize;
844 read_cmd = NAND_CMD_READOOB;
845 } else if (column < 256) {
846 read_cmd = NAND_CMD_READ0;
847 } else {
848 column -= 256;
849 read_cmd = NAND_CMD_READ1;
850 }
851 }
852 flctl->seqin_column = column;
853 flctl->seqin_page_addr = page_addr;
854 flctl->seqin_read_cmd = read_cmd;
855 break;
856
857 case NAND_CMD_PAGEPROG:
858 empty_fifo(flctl);
859 if (!flctl->page_size) {
860 set_cmd_regs(mtd, NAND_CMD_SEQIN,
861 flctl->seqin_read_cmd);
862 set_addr(mtd, -1, -1);
863 writel(0, FLDTCNTR(flctl)); /* set 0 size */
864 start_translation(flctl);
865 wait_completion(flctl);
866 }
867 if (flctl->hwecc) {
868 /* write page with hwecc */
869 if (flctl->seqin_column == mtd->writesize)
870 execmd_write_oob(mtd);
871 else if (!flctl->seqin_column)
872 execmd_write_page_sector(mtd);
873 else
874 printk(KERN_ERR "Invalid address !?\n");
875 break;
876 }
877 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
878 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
879 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
880 start_translation(flctl);
881 write_fiforeg(flctl, flctl->index, 0);
882 wait_completion(flctl);
883 break;
884
885 case NAND_CMD_STATUS:
886 set_cmd_regs(mtd, command, command);
887 set_addr(mtd, -1, -1);
888
889 flctl->read_bytes = 1;
890 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
891 start_translation(flctl);
892 read_datareg(flctl, 0); /* read and end */
893 break;
894
895 case NAND_CMD_RESET:
896 set_cmd_regs(mtd, command, command);
897 set_addr(mtd, -1, -1);
898
899 writel(0, FLDTCNTR(flctl)); /* set 0 size */
900 start_translation(flctl);
901 wait_completion(flctl);
902 break;
903
904 default:
905 break;
906 }
cfe78194 907 goto runtime_exit;
35a34799
YS
908
909read_normal_exit:
910 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
abb59ef3 911 empty_fifo(flctl);
35a34799
YS
912 start_translation(flctl);
913 read_fiforeg(flctl, flctl->read_bytes, 0);
914 wait_completion(flctl);
cfe78194
BH
915runtime_exit:
916 pm_runtime_put_sync(&flctl->pdev->dev);
35a34799
YS
917 return;
918}
919
920static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
921{
922 struct sh_flctl *flctl = mtd_to_flctl(mtd);
cfe78194 923 int ret;
35a34799
YS
924
925 switch (chipnr) {
926 case -1:
0b3f0d12 927 flctl->flcmncr_base &= ~CE0_ENABLE;
cfe78194
BH
928
929 pm_runtime_get_sync(&flctl->pdev->dev);
0b3f0d12 930 writel(flctl->flcmncr_base, FLCMNCR(flctl));
cfe78194
BH
931
932 if (flctl->qos_request) {
933 dev_pm_qos_remove_request(&flctl->pm_qos);
934 flctl->qos_request = 0;
935 }
936
937 pm_runtime_put_sync(&flctl->pdev->dev);
35a34799
YS
938 break;
939 case 0:
0b3f0d12 940 flctl->flcmncr_base |= CE0_ENABLE;
cfe78194
BH
941
942 if (!flctl->qos_request) {
943 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
ae0fb4b7 944 &flctl->pm_qos,
b02f6695 945 DEV_PM_QOS_RESUME_LATENCY,
ae0fb4b7 946 100);
cfe78194
BH
947 if (ret < 0)
948 dev_err(&flctl->pdev->dev,
949 "PM QoS request failed: %d\n", ret);
950 flctl->qos_request = 1;
951 }
952
953 if (flctl->holden) {
954 pm_runtime_get_sync(&flctl->pdev->dev);
3f2e924b 955 writel(HOLDEN, FLHOLDCR(flctl));
cfe78194
BH
956 pm_runtime_put_sync(&flctl->pdev->dev);
957 }
35a34799
YS
958 break;
959 default:
960 BUG();
961 }
962}
963
964static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
965{
966 struct sh_flctl *flctl = mtd_to_flctl(mtd);
35a34799 967
e8a9d8f3 968 memcpy(&flctl->done_buff[flctl->index], buf, len);
35a34799
YS
969 flctl->index += len;
970}
971
972static uint8_t flctl_read_byte(struct mtd_info *mtd)
973{
974 struct sh_flctl *flctl = mtd_to_flctl(mtd);
35a34799
YS
975 uint8_t data;
976
e8a9d8f3 977 data = flctl->done_buff[flctl->index];
35a34799
YS
978 flctl->index++;
979 return data;
980}
981
010ab820
MD
982static uint16_t flctl_read_word(struct mtd_info *mtd)
983{
e8a9d8f3
BH
984 struct sh_flctl *flctl = mtd_to_flctl(mtd);
985 uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
010ab820 986
e8a9d8f3
BH
987 flctl->index += 2;
988 return *buf;
010ab820
MD
989}
990
35a34799
YS
991static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
992{
d76236f3 993 struct sh_flctl *flctl = mtd_to_flctl(mtd);
35a34799 994
e8a9d8f3 995 memcpy(buf, &flctl->done_buff[flctl->index], len);
d76236f3 996 flctl->index += len;
35a34799
YS
997}
998
35a34799
YS
999static int flctl_chip_init_tail(struct mtd_info *mtd)
1000{
1001 struct sh_flctl *flctl = mtd_to_flctl(mtd);
1002 struct nand_chip *chip = &flctl->chip;
1003
1004 if (mtd->writesize == 512) {
1005 flctl->page_size = 0;
1006 if (chip->chipsize > (32 << 20)) {
1007 /* big than 32MB */
1008 flctl->rw_ADRCNT = ADRCNT_4;
1009 flctl->erase_ADRCNT = ADRCNT_3;
1010 } else if (chip->chipsize > (2 << 16)) {
1011 /* big than 128KB */
1012 flctl->rw_ADRCNT = ADRCNT_3;
1013 flctl->erase_ADRCNT = ADRCNT_2;
1014 } else {
1015 flctl->rw_ADRCNT = ADRCNT_2;
1016 flctl->erase_ADRCNT = ADRCNT_1;
1017 }
1018 } else {
1019 flctl->page_size = 1;
1020 if (chip->chipsize > (128 << 20)) {
1021 /* big than 128MB */
1022 flctl->rw_ADRCNT = ADRCNT2_E;
1023 flctl->erase_ADRCNT = ADRCNT_3;
1024 } else if (chip->chipsize > (8 << 16)) {
1025 /* big than 512KB */
1026 flctl->rw_ADRCNT = ADRCNT_4;
1027 flctl->erase_ADRCNT = ADRCNT_2;
1028 } else {
1029 flctl->rw_ADRCNT = ADRCNT_3;
1030 flctl->erase_ADRCNT = ADRCNT_1;
1031 }
1032 }
1033
1034 if (flctl->hwecc) {
1035 if (mtd->writesize == 512) {
e7049f29 1036 mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
35a34799
YS
1037 chip->badblock_pattern = &flctl_4secc_smallpage;
1038 } else {
e7049f29 1039 mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
35a34799
YS
1040 chip->badblock_pattern = &flctl_4secc_largepage;
1041 }
1042
1043 chip->ecc.size = 512;
1044 chip->ecc.bytes = 10;
6a918bad 1045 chip->ecc.strength = 4;
35a34799
YS
1046 chip->ecc.read_page = flctl_read_page_hwecc;
1047 chip->ecc.write_page = flctl_write_page_hwecc;
1048 chip->ecc.mode = NAND_ECC_HW;
1049
1050 /* 4 symbols ECC enabled */
aa32d1f0 1051 flctl->flcmncr_base |= _4ECCEN;
35a34799
YS
1052 } else {
1053 chip->ecc.mode = NAND_ECC_SOFT;
e020cc05 1054 chip->ecc.algo = NAND_ECC_HAMMING;
35a34799
YS
1055 }
1056
1057 return 0;
1058}
1059
3c7ea4ec
BH
1060static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1061{
1062 struct sh_flctl *flctl = dev_id;
1063
1064 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1065 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1066
1067 return IRQ_HANDLED;
1068}
1069
7c8f680e
BH
1070struct flctl_soc_config {
1071 unsigned long flcmncr_val;
1072 unsigned has_hwecc:1;
1073 unsigned use_holden:1;
1074};
1075
1076static struct flctl_soc_config flctl_sh7372_config = {
1077 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1078 .has_hwecc = 1,
1079 .use_holden = 1,
1080};
1081
1082static const struct of_device_id of_flctl_match[] = {
1083 { .compatible = "renesas,shmobile-flctl-sh7372",
1084 .data = &flctl_sh7372_config },
1085 {},
1086};
1087MODULE_DEVICE_TABLE(of, of_flctl_match);
1088
1089static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1090{
1091 const struct of_device_id *match;
1092 struct flctl_soc_config *config;
1093 struct sh_flctl_platform_data *pdata;
7c8f680e
BH
1094
1095 match = of_match_device(of_flctl_match, dev);
1096 if (match)
1097 config = (struct flctl_soc_config *)match->data;
1098 else {
1099 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1100 return NULL;
1101 }
1102
1103 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1104 GFP_KERNEL);
b5d306c0 1105 if (!pdata)
7c8f680e 1106 return NULL;
7c8f680e
BH
1107
1108 /* set SoC specific options */
1109 pdata->flcmncr_val = config->flcmncr_val;
1110 pdata->has_hwecc = config->has_hwecc;
1111 pdata->use_holden = config->use_holden;
1112
7c8f680e
BH
1113 return pdata;
1114}
7c8f680e 1115
06f25510 1116static int flctl_probe(struct platform_device *pdev)
35a34799
YS
1117{
1118 struct resource *res;
1119 struct sh_flctl *flctl;
1120 struct mtd_info *flctl_mtd;
1121 struct nand_chip *nand;
1122 struct sh_flctl_platform_data *pdata;
f7b5e849 1123 int ret;
3c7ea4ec 1124 int irq;
35a34799 1125
f7b5e849 1126 flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
b5d306c0 1127 if (!flctl)
35a34799 1128 return -ENOMEM;
35a34799
YS
1129
1130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
f7b5e849
LP
1131 flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1132 if (IS_ERR(flctl->reg))
1133 return PTR_ERR(flctl->reg);
1873315f 1134 flctl->fifo = res->start + 0x24; /* FLDTFIFO */
35a34799 1135
3c7ea4ec
BH
1136 irq = platform_get_irq(pdev, 0);
1137 if (irq < 0) {
1138 dev_err(&pdev->dev, "failed to get flste irq data\n");
f7b5e849 1139 return -ENXIO;
3c7ea4ec
BH
1140 }
1141
f7b5e849
LP
1142 ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
1143 "flste", flctl);
3c7ea4ec
BH
1144 if (ret) {
1145 dev_err(&pdev->dev, "request interrupt failed.\n");
f7b5e849 1146 return ret;
3c7ea4ec
BH
1147 }
1148
7c8f680e
BH
1149 if (pdev->dev.of_node)
1150 pdata = flctl_parse_dt(&pdev->dev);
1151 else
453810b7 1152 pdata = dev_get_platdata(&pdev->dev);
7c8f680e
BH
1153
1154 if (!pdata) {
1155 dev_err(&pdev->dev, "no setup data defined\n");
f7b5e849 1156 return -EINVAL;
7c8f680e
BH
1157 }
1158
35a34799 1159 platform_set_drvdata(pdev, flctl);
35a34799 1160 nand = &flctl->chip;
9c9eef89 1161 flctl_mtd = nand_to_mtd(nand);
a61ae81a 1162 nand_set_flash_node(nand, pdev->dev.of_node);
c4f7dc72 1163 flctl_mtd->dev.parent = &pdev->dev;
b79c7adf 1164 flctl->pdev = pdev;
35a34799 1165 flctl->hwecc = pdata->has_hwecc;
3f2e924b 1166 flctl->holden = pdata->use_holden;
3c7ea4ec
BH
1167 flctl->flcmncr_base = pdata->flcmncr_val;
1168 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
35a34799 1169
35a34799
YS
1170 /* Set address of hardware control function */
1171 /* 20 us command delay time */
1172 nand->chip_delay = 20;
1173
1174 nand->read_byte = flctl_read_byte;
14667d8d 1175 nand->read_word = flctl_read_word;
35a34799
YS
1176 nand->write_buf = flctl_write_buf;
1177 nand->read_buf = flctl_read_buf;
35a34799
YS
1178 nand->select_chip = flctl_select_chip;
1179 nand->cmdfunc = flctl_cmdfunc;
1180
14667d8d 1181 if (pdata->flcmncr_val & SEL_16BIT)
010ab820 1182 nand->options |= NAND_BUSWIDTH_16;
010ab820 1183
cfe78194
BH
1184 pm_runtime_enable(&pdev->dev);
1185 pm_runtime_resume(&pdev->dev);
1186
83738d87
BH
1187 flctl_setup_dma(flctl);
1188
5e81e88a 1189 ret = nand_scan_ident(flctl_mtd, 1, NULL);
35a34799 1190 if (ret)
cfe78194 1191 goto err_chip;
35a34799 1192
14667d8d
BB
1193 if (nand->options & NAND_BUSWIDTH_16) {
1194 /*
1195 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
1196 * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
1197 * flctl->flcmncr_base to pdata->flcmncr_val.
1198 */
1199 pdata->flcmncr_val |= SEL_16BIT;
1200 flctl->flcmncr_base = pdata->flcmncr_val;
1201 }
1202
35a34799
YS
1203 ret = flctl_chip_init_tail(flctl_mtd);
1204 if (ret)
cfe78194 1205 goto err_chip;
35a34799
YS
1206
1207 ret = nand_scan_tail(flctl_mtd);
1208 if (ret)
cfe78194 1209 goto err_chip;
35a34799 1210
a61ae81a 1211 ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
35a34799
YS
1212
1213 return 0;
1214
cfe78194 1215err_chip:
83738d87 1216 flctl_release_dma(flctl);
cfe78194 1217 pm_runtime_disable(&pdev->dev);
35a34799
YS
1218 return ret;
1219}
1220
810b7e06 1221static int flctl_remove(struct platform_device *pdev)
35a34799
YS
1222{
1223 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1224
83738d87 1225 flctl_release_dma(flctl);
9c9eef89 1226 nand_release(nand_to_mtd(&flctl->chip));
cfe78194 1227 pm_runtime_disable(&pdev->dev);
35a34799
YS
1228
1229 return 0;
1230}
1231
1232static struct platform_driver flctl_driver = {
35a34799
YS
1233 .remove = flctl_remove,
1234 .driver = {
1235 .name = "sh_flctl",
bd247acb 1236 .of_match_table = of_match_ptr(of_flctl_match),
35a34799
YS
1237 },
1238};
1239
14ec6dae 1240module_platform_driver_probe(flctl_driver, flctl_probe);
35a34799
YS
1241
1242MODULE_LICENSE("GPL");
1243MODULE_AUTHOR("Yoshihiro Shimoda");
1244MODULE_DESCRIPTION("SuperH FLCTL driver");
1245MODULE_ALIAS("platform:sh_flctl");
This page took 0.48475 seconds and 5 git commands to generate.