mtd: onenand: use dev_get_platdata()
[deliverable/linux.git] / drivers / mtd / onenand / omap2.c
1 /*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26 #include <linux/device.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/onenand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/platform_device.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/regulator/consumer.h>
39
40 #include <asm/mach/flash.h>
41 #include <linux/platform_data/mtd-onenand-omap2.h>
42 #include <asm/gpio.h>
43
44 #include <linux/omap-dma.h>
45
46 #define DRIVER_NAME "omap2-onenand"
47
48 #define ONENAND_BUFRAM_SIZE (1024 * 5)
49
50 struct omap2_onenand {
51 struct platform_device *pdev;
52 int gpmc_cs;
53 unsigned long phys_base;
54 unsigned int mem_size;
55 int gpio_irq;
56 struct mtd_info mtd;
57 struct onenand_chip onenand;
58 struct completion irq_done;
59 struct completion dma_done;
60 int dma_channel;
61 int freq;
62 int (*setup)(void __iomem *base, int *freq_ptr);
63 struct regulator *regulator;
64 u8 flags;
65 };
66
67 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
68 {
69 struct omap2_onenand *c = data;
70
71 complete(&c->dma_done);
72 }
73
74 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
75 {
76 struct omap2_onenand *c = dev_id;
77
78 complete(&c->irq_done);
79
80 return IRQ_HANDLED;
81 }
82
83 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
84 {
85 return readw(c->onenand.base + reg);
86 }
87
88 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
89 int reg)
90 {
91 writew(value, c->onenand.base + reg);
92 }
93
94 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
95 {
96 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
97 msg, state, ctrl, intr);
98 }
99
100 static void wait_warn(char *msg, int state, unsigned int ctrl,
101 unsigned int intr)
102 {
103 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
104 "intr 0x%04x\n", msg, state, ctrl, intr);
105 }
106
107 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
108 {
109 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
110 struct onenand_chip *this = mtd->priv;
111 unsigned int intr = 0;
112 unsigned int ctrl, ctrl_mask;
113 unsigned long timeout;
114 u32 syscfg;
115
116 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
117 state == FL_VERIFYING_ERASE) {
118 int i = 21;
119 unsigned int intr_flags = ONENAND_INT_MASTER;
120
121 switch (state) {
122 case FL_RESETING:
123 intr_flags |= ONENAND_INT_RESET;
124 break;
125 case FL_PREPARING_ERASE:
126 intr_flags |= ONENAND_INT_ERASE;
127 break;
128 case FL_VERIFYING_ERASE:
129 i = 101;
130 break;
131 }
132
133 while (--i) {
134 udelay(1);
135 intr = read_reg(c, ONENAND_REG_INTERRUPT);
136 if (intr & ONENAND_INT_MASTER)
137 break;
138 }
139 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
140 if (ctrl & ONENAND_CTRL_ERROR) {
141 wait_err("controller error", state, ctrl, intr);
142 return -EIO;
143 }
144 if ((intr & intr_flags) == intr_flags)
145 return 0;
146 /* Continue in wait for interrupt branch */
147 }
148
149 if (state != FL_READING) {
150 int result;
151
152 /* Turn interrupts on */
153 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
154 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
155 syscfg |= ONENAND_SYS_CFG1_IOBE;
156 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
157 if (c->flags & ONENAND_IN_OMAP34XX)
158 /* Add a delay to let GPIO settle */
159 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
160 }
161
162 INIT_COMPLETION(c->irq_done);
163 if (c->gpio_irq) {
164 result = gpio_get_value(c->gpio_irq);
165 if (result == -1) {
166 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
167 intr = read_reg(c, ONENAND_REG_INTERRUPT);
168 wait_err("gpio error", state, ctrl, intr);
169 return -EIO;
170 }
171 } else
172 result = 0;
173 if (result == 0) {
174 int retry_cnt = 0;
175 retry:
176 result = wait_for_completion_timeout(&c->irq_done,
177 msecs_to_jiffies(20));
178 if (result == 0) {
179 /* Timeout after 20ms */
180 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
181 if (ctrl & ONENAND_CTRL_ONGO &&
182 !this->ongoing) {
183 /*
184 * The operation seems to be still going
185 * so give it some more time.
186 */
187 retry_cnt += 1;
188 if (retry_cnt < 3)
189 goto retry;
190 intr = read_reg(c,
191 ONENAND_REG_INTERRUPT);
192 wait_err("timeout", state, ctrl, intr);
193 return -EIO;
194 }
195 intr = read_reg(c, ONENAND_REG_INTERRUPT);
196 if ((intr & ONENAND_INT_MASTER) == 0)
197 wait_warn("timeout", state, ctrl, intr);
198 }
199 }
200 } else {
201 int retry_cnt = 0;
202
203 /* Turn interrupts off */
204 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
205 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
206 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
207
208 timeout = jiffies + msecs_to_jiffies(20);
209 while (1) {
210 if (time_before(jiffies, timeout)) {
211 intr = read_reg(c, ONENAND_REG_INTERRUPT);
212 if (intr & ONENAND_INT_MASTER)
213 break;
214 } else {
215 /* Timeout after 20ms */
216 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
217 if (ctrl & ONENAND_CTRL_ONGO) {
218 /*
219 * The operation seems to be still going
220 * so give it some more time.
221 */
222 retry_cnt += 1;
223 if (retry_cnt < 3) {
224 timeout = jiffies +
225 msecs_to_jiffies(20);
226 continue;
227 }
228 }
229 break;
230 }
231 }
232 }
233
234 intr = read_reg(c, ONENAND_REG_INTERRUPT);
235 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
236
237 if (intr & ONENAND_INT_READ) {
238 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
239
240 if (ecc) {
241 unsigned int addr1, addr8;
242
243 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
244 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
245 if (ecc & ONENAND_ECC_2BIT_ALL) {
246 printk(KERN_ERR "onenand_wait: ECC error = "
247 "0x%04x, addr1 %#x, addr8 %#x\n",
248 ecc, addr1, addr8);
249 mtd->ecc_stats.failed++;
250 return -EBADMSG;
251 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
252 printk(KERN_NOTICE "onenand_wait: correctable "
253 "ECC error = 0x%04x, addr1 %#x, "
254 "addr8 %#x\n", ecc, addr1, addr8);
255 mtd->ecc_stats.corrected++;
256 }
257 }
258 } else if (state == FL_READING) {
259 wait_err("timeout", state, ctrl, intr);
260 return -EIO;
261 }
262
263 if (ctrl & ONENAND_CTRL_ERROR) {
264 wait_err("controller error", state, ctrl, intr);
265 if (ctrl & ONENAND_CTRL_LOCK)
266 printk(KERN_ERR "onenand_wait: "
267 "Device is write protected!!!\n");
268 return -EIO;
269 }
270
271 ctrl_mask = 0xFE9F;
272 if (this->ongoing)
273 ctrl_mask &= ~0x8000;
274
275 if (ctrl & ctrl_mask)
276 wait_warn("unexpected controller status", state, ctrl, intr);
277
278 return 0;
279 }
280
281 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
282 {
283 struct onenand_chip *this = mtd->priv;
284
285 if (ONENAND_CURRENT_BUFFERRAM(this)) {
286 if (area == ONENAND_DATARAM)
287 return this->writesize;
288 if (area == ONENAND_SPARERAM)
289 return mtd->oobsize;
290 }
291
292 return 0;
293 }
294
295 #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
296
297 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
298 unsigned char *buffer, int offset,
299 size_t count)
300 {
301 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
302 struct onenand_chip *this = mtd->priv;
303 dma_addr_t dma_src, dma_dst;
304 int bram_offset;
305 unsigned long timeout;
306 void *buf = (void *)buffer;
307 size_t xtra;
308 volatile unsigned *done;
309
310 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
311 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
312 goto out_copy;
313
314 /* panic_write() may be in an interrupt context */
315 if (in_interrupt() || oops_in_progress)
316 goto out_copy;
317
318 if (buf >= high_memory) {
319 struct page *p1;
320
321 if (((size_t)buf & PAGE_MASK) !=
322 ((size_t)(buf + count - 1) & PAGE_MASK))
323 goto out_copy;
324 p1 = vmalloc_to_page(buf);
325 if (!p1)
326 goto out_copy;
327 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
328 }
329
330 xtra = count & 3;
331 if (xtra) {
332 count -= xtra;
333 memcpy(buf + count, this->base + bram_offset + count, xtra);
334 }
335
336 dma_src = c->phys_base + bram_offset;
337 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
338 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
339 dev_err(&c->pdev->dev,
340 "Couldn't DMA map a %d byte buffer\n",
341 count);
342 goto out_copy;
343 }
344
345 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
346 count >> 2, 1, 0, 0, 0);
347 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
348 dma_src, 0, 0);
349 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
350 dma_dst, 0, 0);
351
352 INIT_COMPLETION(c->dma_done);
353 omap_start_dma(c->dma_channel);
354
355 timeout = jiffies + msecs_to_jiffies(20);
356 done = &c->dma_done.done;
357 while (time_before(jiffies, timeout))
358 if (*done)
359 break;
360
361 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
362
363 if (!*done) {
364 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
365 goto out_copy;
366 }
367
368 return 0;
369
370 out_copy:
371 memcpy(buf, this->base + bram_offset, count);
372 return 0;
373 }
374
375 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
376 const unsigned char *buffer,
377 int offset, size_t count)
378 {
379 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
380 struct onenand_chip *this = mtd->priv;
381 dma_addr_t dma_src, dma_dst;
382 int bram_offset;
383 unsigned long timeout;
384 void *buf = (void *)buffer;
385 volatile unsigned *done;
386
387 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
388 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
389 goto out_copy;
390
391 /* panic_write() may be in an interrupt context */
392 if (in_interrupt() || oops_in_progress)
393 goto out_copy;
394
395 if (buf >= high_memory) {
396 struct page *p1;
397
398 if (((size_t)buf & PAGE_MASK) !=
399 ((size_t)(buf + count - 1) & PAGE_MASK))
400 goto out_copy;
401 p1 = vmalloc_to_page(buf);
402 if (!p1)
403 goto out_copy;
404 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
405 }
406
407 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
408 dma_dst = c->phys_base + bram_offset;
409 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
410 dev_err(&c->pdev->dev,
411 "Couldn't DMA map a %d byte buffer\n",
412 count);
413 return -1;
414 }
415
416 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
417 count >> 2, 1, 0, 0, 0);
418 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
419 dma_src, 0, 0);
420 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
421 dma_dst, 0, 0);
422
423 INIT_COMPLETION(c->dma_done);
424 omap_start_dma(c->dma_channel);
425
426 timeout = jiffies + msecs_to_jiffies(20);
427 done = &c->dma_done.done;
428 while (time_before(jiffies, timeout))
429 if (*done)
430 break;
431
432 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
433
434 if (!*done) {
435 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
436 goto out_copy;
437 }
438
439 return 0;
440
441 out_copy:
442 memcpy(this->base + bram_offset, buf, count);
443 return 0;
444 }
445
446 #else
447
448 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
449 unsigned char *buffer, int offset,
450 size_t count)
451 {
452 return -ENOSYS;
453 }
454
455 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
456 const unsigned char *buffer,
457 int offset, size_t count)
458 {
459 return -ENOSYS;
460 }
461
462 #endif
463
464 #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
465
466 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
467 unsigned char *buffer, int offset,
468 size_t count)
469 {
470 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
471 struct onenand_chip *this = mtd->priv;
472 dma_addr_t dma_src, dma_dst;
473 int bram_offset;
474
475 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
476 /* DMA is not used. Revisit PM requirements before enabling it. */
477 if (1 || (c->dma_channel < 0) ||
478 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
479 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
480 memcpy(buffer, (__force void *)(this->base + bram_offset),
481 count);
482 return 0;
483 }
484
485 dma_src = c->phys_base + bram_offset;
486 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
487 DMA_FROM_DEVICE);
488 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
489 dev_err(&c->pdev->dev,
490 "Couldn't DMA map a %d byte buffer\n",
491 count);
492 return -1;
493 }
494
495 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
496 count / 4, 1, 0, 0, 0);
497 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
498 dma_src, 0, 0);
499 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
500 dma_dst, 0, 0);
501
502 INIT_COMPLETION(c->dma_done);
503 omap_start_dma(c->dma_channel);
504 wait_for_completion(&c->dma_done);
505
506 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
507
508 return 0;
509 }
510
511 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
512 const unsigned char *buffer,
513 int offset, size_t count)
514 {
515 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
516 struct onenand_chip *this = mtd->priv;
517 dma_addr_t dma_src, dma_dst;
518 int bram_offset;
519
520 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
521 /* DMA is not used. Revisit PM requirements before enabling it. */
522 if (1 || (c->dma_channel < 0) ||
523 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
524 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
525 memcpy((__force void *)(this->base + bram_offset), buffer,
526 count);
527 return 0;
528 }
529
530 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
531 DMA_TO_DEVICE);
532 dma_dst = c->phys_base + bram_offset;
533 if (dma_mapping_error(&c->pdev->dev, dma_src)) {
534 dev_err(&c->pdev->dev,
535 "Couldn't DMA map a %d byte buffer\n",
536 count);
537 return -1;
538 }
539
540 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
541 count / 2, 1, 0, 0, 0);
542 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
543 dma_src, 0, 0);
544 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
545 dma_dst, 0, 0);
546
547 INIT_COMPLETION(c->dma_done);
548 omap_start_dma(c->dma_channel);
549 wait_for_completion(&c->dma_done);
550
551 dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
552
553 return 0;
554 }
555
556 #else
557
558 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
559 unsigned char *buffer, int offset,
560 size_t count)
561 {
562 return -ENOSYS;
563 }
564
565 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
566 const unsigned char *buffer,
567 int offset, size_t count)
568 {
569 return -ENOSYS;
570 }
571
572 #endif
573
574 static struct platform_driver omap2_onenand_driver;
575
576 static int __adjust_timing(struct device *dev, void *data)
577 {
578 int ret = 0;
579 struct omap2_onenand *c;
580
581 c = dev_get_drvdata(dev);
582
583 BUG_ON(c->setup == NULL);
584
585 /* DMA is not in use so this is all that is needed */
586 /* Revisit for OMAP3! */
587 ret = c->setup(c->onenand.base, &c->freq);
588
589 return ret;
590 }
591
592 int omap2_onenand_rephase(void)
593 {
594 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
595 NULL, __adjust_timing);
596 }
597
598 static void omap2_onenand_shutdown(struct platform_device *pdev)
599 {
600 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
601
602 /* With certain content in the buffer RAM, the OMAP boot ROM code
603 * can recognize the flash chip incorrectly. Zero it out before
604 * soft reset.
605 */
606 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
607 }
608
609 static int omap2_onenand_enable(struct mtd_info *mtd)
610 {
611 int ret;
612 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
613
614 ret = regulator_enable(c->regulator);
615 if (ret != 0)
616 dev_err(&c->pdev->dev, "can't enable regulator\n");
617
618 return ret;
619 }
620
621 static int omap2_onenand_disable(struct mtd_info *mtd)
622 {
623 int ret;
624 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
625
626 ret = regulator_disable(c->regulator);
627 if (ret != 0)
628 dev_err(&c->pdev->dev, "can't disable regulator\n");
629
630 return ret;
631 }
632
633 static int omap2_onenand_probe(struct platform_device *pdev)
634 {
635 struct omap_onenand_platform_data *pdata;
636 struct omap2_onenand *c;
637 struct onenand_chip *this;
638 int r;
639 struct resource *res;
640 struct mtd_part_parser_data ppdata = {};
641
642 pdata = dev_get_platdata(&pdev->dev);
643 if (pdata == NULL) {
644 dev_err(&pdev->dev, "platform data missing\n");
645 return -ENODEV;
646 }
647
648 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
649 if (!c)
650 return -ENOMEM;
651
652 init_completion(&c->irq_done);
653 init_completion(&c->dma_done);
654 c->flags = pdata->flags;
655 c->gpmc_cs = pdata->cs;
656 c->gpio_irq = pdata->gpio_irq;
657 c->dma_channel = pdata->dma_channel;
658 if (c->dma_channel < 0) {
659 /* if -1, don't use DMA */
660 c->gpio_irq = 0;
661 }
662
663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
664 if (res == NULL) {
665 r = -EINVAL;
666 dev_err(&pdev->dev, "error getting memory resource\n");
667 goto err_kfree;
668 }
669
670 c->phys_base = res->start;
671 c->mem_size = resource_size(res);
672
673 if (request_mem_region(c->phys_base, c->mem_size,
674 pdev->dev.driver->name) == NULL) {
675 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
676 c->phys_base, c->mem_size);
677 r = -EBUSY;
678 goto err_kfree;
679 }
680 c->onenand.base = ioremap(c->phys_base, c->mem_size);
681 if (c->onenand.base == NULL) {
682 r = -ENOMEM;
683 goto err_release_mem_region;
684 }
685
686 if (pdata->onenand_setup != NULL) {
687 r = pdata->onenand_setup(c->onenand.base, &c->freq);
688 if (r < 0) {
689 dev_err(&pdev->dev, "Onenand platform setup failed: "
690 "%d\n", r);
691 goto err_iounmap;
692 }
693 c->setup = pdata->onenand_setup;
694 }
695
696 if (c->gpio_irq) {
697 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
698 dev_err(&pdev->dev, "Failed to request GPIO%d for "
699 "OneNAND\n", c->gpio_irq);
700 goto err_iounmap;
701 }
702 gpio_direction_input(c->gpio_irq);
703
704 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
705 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
706 pdev->dev.driver->name, c)) < 0)
707 goto err_release_gpio;
708 }
709
710 if (c->dma_channel >= 0) {
711 r = omap_request_dma(0, pdev->dev.driver->name,
712 omap2_onenand_dma_cb, (void *) c,
713 &c->dma_channel);
714 if (r == 0) {
715 omap_set_dma_write_mode(c->dma_channel,
716 OMAP_DMA_WRITE_NON_POSTED);
717 omap_set_dma_src_data_pack(c->dma_channel, 1);
718 omap_set_dma_src_burst_mode(c->dma_channel,
719 OMAP_DMA_DATA_BURST_8);
720 omap_set_dma_dest_data_pack(c->dma_channel, 1);
721 omap_set_dma_dest_burst_mode(c->dma_channel,
722 OMAP_DMA_DATA_BURST_8);
723 } else {
724 dev_info(&pdev->dev,
725 "failed to allocate DMA for OneNAND, "
726 "using PIO instead\n");
727 c->dma_channel = -1;
728 }
729 }
730
731 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
732 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
733 c->onenand.base, c->freq);
734
735 c->pdev = pdev;
736 c->mtd.name = dev_name(&pdev->dev);
737 c->mtd.priv = &c->onenand;
738 c->mtd.owner = THIS_MODULE;
739
740 c->mtd.dev.parent = &pdev->dev;
741
742 this = &c->onenand;
743 if (c->dma_channel >= 0) {
744 this->wait = omap2_onenand_wait;
745 if (c->flags & ONENAND_IN_OMAP34XX) {
746 this->read_bufferram = omap3_onenand_read_bufferram;
747 this->write_bufferram = omap3_onenand_write_bufferram;
748 } else {
749 this->read_bufferram = omap2_onenand_read_bufferram;
750 this->write_bufferram = omap2_onenand_write_bufferram;
751 }
752 }
753
754 if (pdata->regulator_can_sleep) {
755 c->regulator = regulator_get(&pdev->dev, "vonenand");
756 if (IS_ERR(c->regulator)) {
757 dev_err(&pdev->dev, "Failed to get regulator\n");
758 r = PTR_ERR(c->regulator);
759 goto err_release_dma;
760 }
761 c->onenand.enable = omap2_onenand_enable;
762 c->onenand.disable = omap2_onenand_disable;
763 }
764
765 if (pdata->skip_initial_unlocking)
766 this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
767
768 if ((r = onenand_scan(&c->mtd, 1)) < 0)
769 goto err_release_regulator;
770
771 ppdata.of_node = pdata->of_node;
772 r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
773 pdata ? pdata->parts : NULL,
774 pdata ? pdata->nr_parts : 0);
775 if (r)
776 goto err_release_onenand;
777
778 platform_set_drvdata(pdev, c);
779
780 return 0;
781
782 err_release_onenand:
783 onenand_release(&c->mtd);
784 err_release_regulator:
785 regulator_put(c->regulator);
786 err_release_dma:
787 if (c->dma_channel != -1)
788 omap_free_dma(c->dma_channel);
789 if (c->gpio_irq)
790 free_irq(gpio_to_irq(c->gpio_irq), c);
791 err_release_gpio:
792 if (c->gpio_irq)
793 gpio_free(c->gpio_irq);
794 err_iounmap:
795 iounmap(c->onenand.base);
796 err_release_mem_region:
797 release_mem_region(c->phys_base, c->mem_size);
798 err_kfree:
799 kfree(c);
800
801 return r;
802 }
803
804 static int omap2_onenand_remove(struct platform_device *pdev)
805 {
806 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
807
808 onenand_release(&c->mtd);
809 regulator_put(c->regulator);
810 if (c->dma_channel != -1)
811 omap_free_dma(c->dma_channel);
812 omap2_onenand_shutdown(pdev);
813 if (c->gpio_irq) {
814 free_irq(gpio_to_irq(c->gpio_irq), c);
815 gpio_free(c->gpio_irq);
816 }
817 iounmap(c->onenand.base);
818 release_mem_region(c->phys_base, c->mem_size);
819 kfree(c);
820
821 return 0;
822 }
823
824 static struct platform_driver omap2_onenand_driver = {
825 .probe = omap2_onenand_probe,
826 .remove = omap2_onenand_remove,
827 .shutdown = omap2_onenand_shutdown,
828 .driver = {
829 .name = DRIVER_NAME,
830 .owner = THIS_MODULE,
831 },
832 };
833
834 module_platform_driver(omap2_onenand_driver);
835
836 MODULE_ALIAS("platform:" DRIVER_NAME);
837 MODULE_LICENSE("GPL");
838 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
839 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
This page took 0.046977 seconds and 5 git commands to generate.