mtd: nandsim: spell fixes in comments
[deliverable/linux.git] / drivers / mtd / onenand / samsung.c
1 /*
2 * Samsung S3C64XX/S5PC1XX OneNAND driver
3 *
4 * Copyright © 2008-2010 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Implementation:
13 * S3C64XX and S5PC100: emulate the pseudo BufferRAM
14 * S5PC110: use DMA
15 */
16
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/onenand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26
27 #include <asm/mach/flash.h>
28 #include <plat/regs-onenand.h>
29
30 #include <linux/io.h>
31
32 enum soc_type {
33 TYPE_S3C6400,
34 TYPE_S3C6410,
35 TYPE_S5PC100,
36 TYPE_S5PC110,
37 };
38
39 #define ONENAND_ERASE_STATUS 0x00
40 #define ONENAND_MULTI_ERASE_SET 0x01
41 #define ONENAND_ERASE_START 0x03
42 #define ONENAND_UNLOCK_START 0x08
43 #define ONENAND_UNLOCK_END 0x09
44 #define ONENAND_LOCK_START 0x0A
45 #define ONENAND_LOCK_END 0x0B
46 #define ONENAND_LOCK_TIGHT_START 0x0C
47 #define ONENAND_LOCK_TIGHT_END 0x0D
48 #define ONENAND_UNLOCK_ALL 0x0E
49 #define ONENAND_OTP_ACCESS 0x12
50 #define ONENAND_SPARE_ACCESS_ONLY 0x13
51 #define ONENAND_MAIN_ACCESS_ONLY 0x14
52 #define ONENAND_ERASE_VERIFY 0x15
53 #define ONENAND_MAIN_SPARE_ACCESS 0x16
54 #define ONENAND_PIPELINE_READ 0x4000
55
56 #define MAP_00 (0x0)
57 #define MAP_01 (0x1)
58 #define MAP_10 (0x2)
59 #define MAP_11 (0x3)
60
61 #define S3C64XX_CMD_MAP_SHIFT 24
62 #define S5PC100_CMD_MAP_SHIFT 26
63
64 #define S3C6400_FBA_SHIFT 10
65 #define S3C6400_FPA_SHIFT 4
66 #define S3C6400_FSA_SHIFT 2
67
68 #define S3C6410_FBA_SHIFT 12
69 #define S3C6410_FPA_SHIFT 6
70 #define S3C6410_FSA_SHIFT 4
71
72 #define S5PC100_FBA_SHIFT 13
73 #define S5PC100_FPA_SHIFT 7
74 #define S5PC100_FSA_SHIFT 5
75
76 /* S5PC110 specific definitions */
77 #define S5PC110_DMA_SRC_ADDR 0x400
78 #define S5PC110_DMA_SRC_CFG 0x404
79 #define S5PC110_DMA_DST_ADDR 0x408
80 #define S5PC110_DMA_DST_CFG 0x40C
81 #define S5PC110_DMA_TRANS_SIZE 0x414
82 #define S5PC110_DMA_TRANS_CMD 0x418
83 #define S5PC110_DMA_TRANS_STATUS 0x41C
84 #define S5PC110_DMA_TRANS_DIR 0x420
85 #define S5PC110_INTC_DMA_CLR 0x1004
86 #define S5PC110_INTC_ONENAND_CLR 0x1008
87 #define S5PC110_INTC_DMA_MASK 0x1024
88 #define S5PC110_INTC_ONENAND_MASK 0x1028
89 #define S5PC110_INTC_DMA_PEND 0x1044
90 #define S5PC110_INTC_ONENAND_PEND 0x1048
91 #define S5PC110_INTC_DMA_STATUS 0x1064
92 #define S5PC110_INTC_ONENAND_STATUS 0x1068
93
94 #define S5PC110_INTC_DMA_TD (1 << 24)
95 #define S5PC110_INTC_DMA_TE (1 << 16)
96
97 #define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
98 #define S5PC110_DMA_CFG_4BURST (0x2 << 16)
99 #define S5PC110_DMA_CFG_8BURST (0x3 << 16)
100 #define S5PC110_DMA_CFG_16BURST (0x4 << 16)
101
102 #define S5PC110_DMA_CFG_INC (0x0 << 8)
103 #define S5PC110_DMA_CFG_CNT (0x1 << 8)
104
105 #define S5PC110_DMA_CFG_8BIT (0x0 << 0)
106 #define S5PC110_DMA_CFG_16BIT (0x1 << 0)
107 #define S5PC110_DMA_CFG_32BIT (0x2 << 0)
108
109 #define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
110 S5PC110_DMA_CFG_INC | \
111 S5PC110_DMA_CFG_16BIT)
112 #define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
113 S5PC110_DMA_CFG_INC | \
114 S5PC110_DMA_CFG_32BIT)
115 #define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
116 S5PC110_DMA_CFG_INC | \
117 S5PC110_DMA_CFG_32BIT)
118 #define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
119 S5PC110_DMA_CFG_INC | \
120 S5PC110_DMA_CFG_16BIT)
121
122 #define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
123 #define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
124 #define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
125
126 #define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
127 #define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
128 #define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
129
130 #define S5PC110_DMA_DIR_READ 0x0
131 #define S5PC110_DMA_DIR_WRITE 0x1
132
133 struct s3c_onenand {
134 struct mtd_info *mtd;
135 struct platform_device *pdev;
136 enum soc_type type;
137 void __iomem *base;
138 struct resource *base_res;
139 void __iomem *ahb_addr;
140 struct resource *ahb_res;
141 int bootram_command;
142 void __iomem *page_buf;
143 void __iomem *oob_buf;
144 unsigned int (*mem_addr)(int fba, int fpa, int fsa);
145 unsigned int (*cmd_map)(unsigned int type, unsigned int val);
146 void __iomem *dma_addr;
147 struct resource *dma_res;
148 unsigned long phys_base;
149 struct completion complete;
150 #ifdef CONFIG_MTD_PARTITIONS
151 struct mtd_partition *parts;
152 #endif
153 };
154
155 #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
156 #define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
157 #define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
158 #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
159
160 static struct s3c_onenand *onenand;
161
162 #ifdef CONFIG_MTD_PARTITIONS
163 static const char *part_probes[] = { "cmdlinepart", NULL, };
164 #endif
165
166 static inline int s3c_read_reg(int offset)
167 {
168 return readl(onenand->base + offset);
169 }
170
171 static inline void s3c_write_reg(int value, int offset)
172 {
173 writel(value, onenand->base + offset);
174 }
175
176 static inline int s3c_read_cmd(unsigned int cmd)
177 {
178 return readl(onenand->ahb_addr + cmd);
179 }
180
181 static inline void s3c_write_cmd(int value, unsigned int cmd)
182 {
183 writel(value, onenand->ahb_addr + cmd);
184 }
185
186 #ifdef SAMSUNG_DEBUG
187 static void s3c_dump_reg(void)
188 {
189 int i;
190
191 for (i = 0; i < 0x400; i += 0x40) {
192 printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
193 (unsigned int) onenand->base + i,
194 s3c_read_reg(i), s3c_read_reg(i + 0x10),
195 s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
196 }
197 }
198 #endif
199
200 static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
201 {
202 return (type << S3C64XX_CMD_MAP_SHIFT) | val;
203 }
204
205 static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
206 {
207 return (type << S5PC100_CMD_MAP_SHIFT) | val;
208 }
209
210 static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
211 {
212 return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
213 (fsa << S3C6400_FSA_SHIFT);
214 }
215
216 static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
217 {
218 return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
219 (fsa << S3C6410_FSA_SHIFT);
220 }
221
222 static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
223 {
224 return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
225 (fsa << S5PC100_FSA_SHIFT);
226 }
227
228 static void s3c_onenand_reset(void)
229 {
230 unsigned long timeout = 0x10000;
231 int stat;
232
233 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
234 while (1 && timeout--) {
235 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
236 if (stat & RST_CMP)
237 break;
238 }
239 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
240 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
241
242 /* Clear interrupt */
243 s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
244 /* Clear the ECC status */
245 s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
246 }
247
248 static unsigned short s3c_onenand_readw(void __iomem *addr)
249 {
250 struct onenand_chip *this = onenand->mtd->priv;
251 struct device *dev = &onenand->pdev->dev;
252 int reg = addr - this->base;
253 int word_addr = reg >> 1;
254 int value;
255
256 /* It's used for probing time */
257 switch (reg) {
258 case ONENAND_REG_MANUFACTURER_ID:
259 return s3c_read_reg(MANUFACT_ID_OFFSET);
260 case ONENAND_REG_DEVICE_ID:
261 return s3c_read_reg(DEVICE_ID_OFFSET);
262 case ONENAND_REG_VERSION_ID:
263 return s3c_read_reg(FLASH_VER_ID_OFFSET);
264 case ONENAND_REG_DATA_BUFFER_SIZE:
265 return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
266 case ONENAND_REG_TECHNOLOGY:
267 return s3c_read_reg(TECH_OFFSET);
268 case ONENAND_REG_SYS_CFG1:
269 return s3c_read_reg(MEM_CFG_OFFSET);
270
271 /* Used at unlock all status */
272 case ONENAND_REG_CTRL_STATUS:
273 return 0;
274
275 case ONENAND_REG_WP_STATUS:
276 return ONENAND_WP_US;
277
278 default:
279 break;
280 }
281
282 /* BootRAM access control */
283 if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
284 if (word_addr == 0)
285 return s3c_read_reg(MANUFACT_ID_OFFSET);
286 if (word_addr == 1)
287 return s3c_read_reg(DEVICE_ID_OFFSET);
288 if (word_addr == 2)
289 return s3c_read_reg(FLASH_VER_ID_OFFSET);
290 }
291
292 value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
293 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
294 word_addr, value);
295 return value;
296 }
297
298 static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
299 {
300 struct onenand_chip *this = onenand->mtd->priv;
301 struct device *dev = &onenand->pdev->dev;
302 unsigned int reg = addr - this->base;
303 unsigned int word_addr = reg >> 1;
304
305 /* It's used for probing time */
306 switch (reg) {
307 case ONENAND_REG_SYS_CFG1:
308 s3c_write_reg(value, MEM_CFG_OFFSET);
309 return;
310
311 case ONENAND_REG_START_ADDRESS1:
312 case ONENAND_REG_START_ADDRESS2:
313 return;
314
315 /* Lock/lock-tight/unlock/unlock_all */
316 case ONENAND_REG_START_BLOCK_ADDRESS:
317 return;
318
319 default:
320 break;
321 }
322
323 /* BootRAM access control */
324 if ((unsigned int)addr < ONENAND_DATARAM) {
325 if (value == ONENAND_CMD_READID) {
326 onenand->bootram_command = 1;
327 return;
328 }
329 if (value == ONENAND_CMD_RESET) {
330 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
331 onenand->bootram_command = 0;
332 return;
333 }
334 }
335
336 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
337 word_addr, value);
338
339 s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
340 }
341
342 static int s3c_onenand_wait(struct mtd_info *mtd, int state)
343 {
344 struct device *dev = &onenand->pdev->dev;
345 unsigned int flags = INT_ACT;
346 unsigned int stat, ecc;
347 unsigned long timeout;
348
349 switch (state) {
350 case FL_READING:
351 flags |= BLK_RW_CMP | LOAD_CMP;
352 break;
353 case FL_WRITING:
354 flags |= BLK_RW_CMP | PGM_CMP;
355 break;
356 case FL_ERASING:
357 flags |= BLK_RW_CMP | ERS_CMP;
358 break;
359 case FL_LOCKING:
360 flags |= BLK_RW_CMP;
361 break;
362 default:
363 break;
364 }
365
366 /* The 20 msec is enough */
367 timeout = jiffies + msecs_to_jiffies(20);
368 while (time_before(jiffies, timeout)) {
369 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
370 if (stat & flags)
371 break;
372
373 if (state != FL_READING)
374 cond_resched();
375 }
376 /* To get correct interrupt status in timeout case */
377 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
378 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
379
380 /*
381 * In the Spec. it checks the controller status first
382 * However if you get the correct information in case of
383 * power off recovery (POR) test, it should read ECC status first
384 */
385 if (stat & LOAD_CMP) {
386 ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
387 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
388 dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
389 ecc);
390 mtd->ecc_stats.failed++;
391 return -EBADMSG;
392 }
393 }
394
395 if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
396 dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
397 stat);
398 if (stat & LOCKED_BLK)
399 dev_info(dev, "%s: it's locked error = 0x%04x\n",
400 __func__, stat);
401
402 return -EIO;
403 }
404
405 return 0;
406 }
407
408 static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
409 size_t len)
410 {
411 struct onenand_chip *this = mtd->priv;
412 unsigned int *m, *s;
413 int fba, fpa, fsa = 0;
414 unsigned int mem_addr, cmd_map_01, cmd_map_10;
415 int i, mcount, scount;
416 int index;
417
418 fba = (int) (addr >> this->erase_shift);
419 fpa = (int) (addr >> this->page_shift);
420 fpa &= this->page_mask;
421
422 mem_addr = onenand->mem_addr(fba, fpa, fsa);
423 cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
424 cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
425
426 switch (cmd) {
427 case ONENAND_CMD_READ:
428 case ONENAND_CMD_READOOB:
429 case ONENAND_CMD_BUFFERRAM:
430 ONENAND_SET_NEXT_BUFFERRAM(this);
431 default:
432 break;
433 }
434
435 index = ONENAND_CURRENT_BUFFERRAM(this);
436
437 /*
438 * Emulate Two BufferRAMs and access with 4 bytes pointer
439 */
440 m = (unsigned int *) onenand->page_buf;
441 s = (unsigned int *) onenand->oob_buf;
442
443 if (index) {
444 m += (this->writesize >> 2);
445 s += (mtd->oobsize >> 2);
446 }
447
448 mcount = mtd->writesize >> 2;
449 scount = mtd->oobsize >> 2;
450
451 switch (cmd) {
452 case ONENAND_CMD_READ:
453 /* Main */
454 for (i = 0; i < mcount; i++)
455 *m++ = s3c_read_cmd(cmd_map_01);
456 return 0;
457
458 case ONENAND_CMD_READOOB:
459 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
460 /* Main */
461 for (i = 0; i < mcount; i++)
462 *m++ = s3c_read_cmd(cmd_map_01);
463
464 /* Spare */
465 for (i = 0; i < scount; i++)
466 *s++ = s3c_read_cmd(cmd_map_01);
467
468 s3c_write_reg(0, TRANS_SPARE_OFFSET);
469 return 0;
470
471 case ONENAND_CMD_PROG:
472 /* Main */
473 for (i = 0; i < mcount; i++)
474 s3c_write_cmd(*m++, cmd_map_01);
475 return 0;
476
477 case ONENAND_CMD_PROGOOB:
478 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
479
480 /* Main - dummy write */
481 for (i = 0; i < mcount; i++)
482 s3c_write_cmd(0xffffffff, cmd_map_01);
483
484 /* Spare */
485 for (i = 0; i < scount; i++)
486 s3c_write_cmd(*s++, cmd_map_01);
487
488 s3c_write_reg(0, TRANS_SPARE_OFFSET);
489 return 0;
490
491 case ONENAND_CMD_UNLOCK_ALL:
492 s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
493 return 0;
494
495 case ONENAND_CMD_ERASE:
496 s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
497 return 0;
498
499 default:
500 break;
501 }
502
503 return 0;
504 }
505
506 static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
507 {
508 struct onenand_chip *this = mtd->priv;
509 int index = ONENAND_CURRENT_BUFFERRAM(this);
510 unsigned char *p;
511
512 if (area == ONENAND_DATARAM) {
513 p = (unsigned char *) onenand->page_buf;
514 if (index == 1)
515 p += this->writesize;
516 } else {
517 p = (unsigned char *) onenand->oob_buf;
518 if (index == 1)
519 p += mtd->oobsize;
520 }
521
522 return p;
523 }
524
525 static int onenand_read_bufferram(struct mtd_info *mtd, int area,
526 unsigned char *buffer, int offset,
527 size_t count)
528 {
529 unsigned char *p;
530
531 p = s3c_get_bufferram(mtd, area);
532 memcpy(buffer, p + offset, count);
533 return 0;
534 }
535
536 static int onenand_write_bufferram(struct mtd_info *mtd, int area,
537 const unsigned char *buffer, int offset,
538 size_t count)
539 {
540 unsigned char *p;
541
542 p = s3c_get_bufferram(mtd, area);
543 memcpy(p + offset, buffer, count);
544 return 0;
545 }
546
547 static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
548
549 static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
550 {
551 void __iomem *base = onenand->dma_addr;
552 int status;
553 unsigned long timeout;
554
555 writel(src, base + S5PC110_DMA_SRC_ADDR);
556 writel(dst, base + S5PC110_DMA_DST_ADDR);
557
558 if (direction == S5PC110_DMA_DIR_READ) {
559 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
560 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
561 } else {
562 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
563 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
564 }
565
566 writel(count, base + S5PC110_DMA_TRANS_SIZE);
567 writel(direction, base + S5PC110_DMA_TRANS_DIR);
568
569 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
570
571 /*
572 * There's no exact timeout values at Spec.
573 * In real case it takes under 1 msec.
574 * So 20 msecs are enough.
575 */
576 timeout = jiffies + msecs_to_jiffies(20);
577
578 do {
579 status = readl(base + S5PC110_DMA_TRANS_STATUS);
580 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
581 writel(S5PC110_DMA_TRANS_CMD_TEC,
582 base + S5PC110_DMA_TRANS_CMD);
583 return -EIO;
584 }
585 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
586 time_before(jiffies, timeout));
587
588 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
589
590 return 0;
591 }
592
593 static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
594 {
595 void __iomem *base = onenand->dma_addr;
596 int status, cmd = 0;
597
598 status = readl(base + S5PC110_INTC_DMA_STATUS);
599
600 if (likely(status & S5PC110_INTC_DMA_TD))
601 cmd = S5PC110_DMA_TRANS_CMD_TDC;
602
603 if (unlikely(status & S5PC110_INTC_DMA_TE))
604 cmd = S5PC110_DMA_TRANS_CMD_TEC;
605
606 writel(cmd, base + S5PC110_DMA_TRANS_CMD);
607 writel(status, base + S5PC110_INTC_DMA_CLR);
608
609 if (!onenand->complete.done)
610 complete(&onenand->complete);
611
612 return IRQ_HANDLED;
613 }
614
615 static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
616 {
617 void __iomem *base = onenand->dma_addr;
618 int status;
619
620 status = readl(base + S5PC110_INTC_DMA_MASK);
621 if (status) {
622 status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
623 writel(status, base + S5PC110_INTC_DMA_MASK);
624 }
625
626 writel(src, base + S5PC110_DMA_SRC_ADDR);
627 writel(dst, base + S5PC110_DMA_DST_ADDR);
628
629 if (direction == S5PC110_DMA_DIR_READ) {
630 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
631 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
632 } else {
633 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
634 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
635 }
636
637 writel(count, base + S5PC110_DMA_TRANS_SIZE);
638 writel(direction, base + S5PC110_DMA_TRANS_DIR);
639
640 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
641
642 wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
643
644 return 0;
645 }
646
647 static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
648 unsigned char *buffer, int offset, size_t count)
649 {
650 struct onenand_chip *this = mtd->priv;
651 void __iomem *p;
652 void *buf = (void *) buffer;
653 dma_addr_t dma_src, dma_dst;
654 int err, page_dma = 0;
655 struct device *dev = &onenand->pdev->dev;
656
657 p = this->base + area;
658 if (ONENAND_CURRENT_BUFFERRAM(this)) {
659 if (area == ONENAND_DATARAM)
660 p += this->writesize;
661 else
662 p += mtd->oobsize;
663 }
664
665 if (offset & 3 || (size_t) buf & 3 ||
666 !onenand->dma_addr || count != mtd->writesize)
667 goto normal;
668
669 /* Handle vmalloc address */
670 if (buf >= high_memory) {
671 struct page *page;
672
673 if (((size_t) buf & PAGE_MASK) !=
674 ((size_t) (buf + count - 1) & PAGE_MASK))
675 goto normal;
676 page = vmalloc_to_page(buf);
677 if (!page)
678 goto normal;
679
680 page_dma = 1;
681 /* DMA routine */
682 dma_src = onenand->phys_base + (p - this->base);
683 dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
684 } else {
685 /* DMA routine */
686 dma_src = onenand->phys_base + (p - this->base);
687 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
688 }
689 if (dma_mapping_error(dev, dma_dst)) {
690 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
691 goto normal;
692 }
693 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
694 count, S5PC110_DMA_DIR_READ);
695
696 if (page_dma)
697 dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
698 else
699 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
700
701 if (!err)
702 return 0;
703
704 normal:
705 if (count != mtd->writesize) {
706 /* Copy the bufferram to memory to prevent unaligned access */
707 memcpy(this->page_buf, p, mtd->writesize);
708 p = this->page_buf + offset;
709 }
710
711 memcpy(buffer, p, count);
712
713 return 0;
714 }
715
716 static int s5pc110_chip_probe(struct mtd_info *mtd)
717 {
718 /* Now just return 0 */
719 return 0;
720 }
721
722 static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
723 {
724 unsigned int flags = INT_ACT | LOAD_CMP;
725 unsigned int stat;
726 unsigned long timeout;
727
728 /* The 20 msec is enough */
729 timeout = jiffies + msecs_to_jiffies(20);
730 while (time_before(jiffies, timeout)) {
731 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
732 if (stat & flags)
733 break;
734 }
735 /* To get correct interrupt status in timeout case */
736 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
737 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
738
739 if (stat & LD_FAIL_ECC_ERR) {
740 s3c_onenand_reset();
741 return ONENAND_BBT_READ_ERROR;
742 }
743
744 if (stat & LOAD_CMP) {
745 int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
746 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
747 s3c_onenand_reset();
748 return ONENAND_BBT_READ_ERROR;
749 }
750 }
751
752 return 0;
753 }
754
755 static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
756 {
757 struct onenand_chip *this = mtd->priv;
758 struct device *dev = &onenand->pdev->dev;
759 unsigned int block, end;
760 int tmp;
761
762 end = this->chipsize >> this->erase_shift;
763
764 for (block = 0; block < end; block++) {
765 unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
766 tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
767
768 if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
769 dev_err(dev, "block %d is write-protected!\n", block);
770 s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
771 }
772 }
773 }
774
775 static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
776 size_t len, int cmd)
777 {
778 struct onenand_chip *this = mtd->priv;
779 int start, end, start_mem_addr, end_mem_addr;
780
781 start = ofs >> this->erase_shift;
782 start_mem_addr = onenand->mem_addr(start, 0, 0);
783 end = start + (len >> this->erase_shift) - 1;
784 end_mem_addr = onenand->mem_addr(end, 0, 0);
785
786 if (cmd == ONENAND_CMD_LOCK) {
787 s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
788 start_mem_addr));
789 s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
790 end_mem_addr));
791 } else {
792 s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
793 start_mem_addr));
794 s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
795 end_mem_addr));
796 }
797
798 this->wait(mtd, FL_LOCKING);
799 }
800
801 static void s3c_unlock_all(struct mtd_info *mtd)
802 {
803 struct onenand_chip *this = mtd->priv;
804 loff_t ofs = 0;
805 size_t len = this->chipsize;
806
807 if (this->options & ONENAND_HAS_UNLOCK_ALL) {
808 /* Write unlock command */
809 this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
810
811 /* No need to check return value */
812 this->wait(mtd, FL_LOCKING);
813
814 /* Workaround for all block unlock in DDP */
815 if (!ONENAND_IS_DDP(this)) {
816 s3c_onenand_check_lock_status(mtd);
817 return;
818 }
819
820 /* All blocks on another chip */
821 ofs = this->chipsize >> 1;
822 len = this->chipsize >> 1;
823 }
824
825 s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
826
827 s3c_onenand_check_lock_status(mtd);
828 }
829
830 static void s3c_onenand_setup(struct mtd_info *mtd)
831 {
832 struct onenand_chip *this = mtd->priv;
833
834 onenand->mtd = mtd;
835
836 if (onenand->type == TYPE_S3C6400) {
837 onenand->mem_addr = s3c6400_mem_addr;
838 onenand->cmd_map = s3c64xx_cmd_map;
839 } else if (onenand->type == TYPE_S3C6410) {
840 onenand->mem_addr = s3c6410_mem_addr;
841 onenand->cmd_map = s3c64xx_cmd_map;
842 } else if (onenand->type == TYPE_S5PC100) {
843 onenand->mem_addr = s5pc100_mem_addr;
844 onenand->cmd_map = s5pc1xx_cmd_map;
845 } else if (onenand->type == TYPE_S5PC110) {
846 /* Use generic onenand functions */
847 this->read_bufferram = s5pc110_read_bufferram;
848 this->chip_probe = s5pc110_chip_probe;
849 return;
850 } else {
851 BUG();
852 }
853
854 this->read_word = s3c_onenand_readw;
855 this->write_word = s3c_onenand_writew;
856
857 this->wait = s3c_onenand_wait;
858 this->bbt_wait = s3c_onenand_bbt_wait;
859 this->unlock_all = s3c_unlock_all;
860 this->command = s3c_onenand_command;
861
862 this->read_bufferram = onenand_read_bufferram;
863 this->write_bufferram = onenand_write_bufferram;
864 }
865
866 static int s3c_onenand_probe(struct platform_device *pdev)
867 {
868 struct onenand_platform_data *pdata;
869 struct onenand_chip *this;
870 struct mtd_info *mtd;
871 struct resource *r;
872 int size, err;
873
874 pdata = pdev->dev.platform_data;
875 /* No need to check pdata. the platform data is optional */
876
877 size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
878 mtd = kzalloc(size, GFP_KERNEL);
879 if (!mtd) {
880 dev_err(&pdev->dev, "failed to allocate memory\n");
881 return -ENOMEM;
882 }
883
884 onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
885 if (!onenand) {
886 err = -ENOMEM;
887 goto onenand_fail;
888 }
889
890 this = (struct onenand_chip *) &mtd[1];
891 mtd->priv = this;
892 mtd->dev.parent = &pdev->dev;
893 mtd->owner = THIS_MODULE;
894 onenand->pdev = pdev;
895 onenand->type = platform_get_device_id(pdev)->driver_data;
896
897 s3c_onenand_setup(mtd);
898
899 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
900 if (!r) {
901 dev_err(&pdev->dev, "no memory resource defined\n");
902 return -ENOENT;
903 goto ahb_resource_failed;
904 }
905
906 onenand->base_res = request_mem_region(r->start, resource_size(r),
907 pdev->name);
908 if (!onenand->base_res) {
909 dev_err(&pdev->dev, "failed to request memory resource\n");
910 err = -EBUSY;
911 goto resource_failed;
912 }
913
914 onenand->base = ioremap(r->start, resource_size(r));
915 if (!onenand->base) {
916 dev_err(&pdev->dev, "failed to map memory resource\n");
917 err = -EFAULT;
918 goto ioremap_failed;
919 }
920 /* Set onenand_chip also */
921 this->base = onenand->base;
922
923 /* Use runtime badblock check */
924 this->options |= ONENAND_SKIP_UNLOCK_CHECK;
925
926 if (onenand->type != TYPE_S5PC110) {
927 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
928 if (!r) {
929 dev_err(&pdev->dev, "no buffer memory resource defined\n");
930 return -ENOENT;
931 goto ahb_resource_failed;
932 }
933
934 onenand->ahb_res = request_mem_region(r->start, resource_size(r),
935 pdev->name);
936 if (!onenand->ahb_res) {
937 dev_err(&pdev->dev, "failed to request buffer memory resource\n");
938 err = -EBUSY;
939 goto ahb_resource_failed;
940 }
941
942 onenand->ahb_addr = ioremap(r->start, resource_size(r));
943 if (!onenand->ahb_addr) {
944 dev_err(&pdev->dev, "failed to map buffer memory resource\n");
945 err = -EINVAL;
946 goto ahb_ioremap_failed;
947 }
948
949 /* Allocate 4KiB BufferRAM */
950 onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
951 if (!onenand->page_buf) {
952 err = -ENOMEM;
953 goto page_buf_fail;
954 }
955
956 /* Allocate 128 SpareRAM */
957 onenand->oob_buf = kzalloc(128, GFP_KERNEL);
958 if (!onenand->oob_buf) {
959 err = -ENOMEM;
960 goto oob_buf_fail;
961 }
962
963 /* S3C doesn't handle subpage write */
964 mtd->subpage_sft = 0;
965 this->subpagesize = mtd->writesize;
966
967 } else { /* S5PC110 */
968 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
969 if (!r) {
970 dev_err(&pdev->dev, "no dma memory resource defined\n");
971 return -ENOENT;
972 goto dma_resource_failed;
973 }
974
975 onenand->dma_res = request_mem_region(r->start, resource_size(r),
976 pdev->name);
977 if (!onenand->dma_res) {
978 dev_err(&pdev->dev, "failed to request dma memory resource\n");
979 err = -EBUSY;
980 goto dma_resource_failed;
981 }
982
983 onenand->dma_addr = ioremap(r->start, resource_size(r));
984 if (!onenand->dma_addr) {
985 dev_err(&pdev->dev, "failed to map dma memory resource\n");
986 err = -EINVAL;
987 goto dma_ioremap_failed;
988 }
989
990 onenand->phys_base = onenand->base_res->start;
991
992 s5pc110_dma_ops = s5pc110_dma_poll;
993 /* Interrupt support */
994 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
995 if (r) {
996 init_completion(&onenand->complete);
997 s5pc110_dma_ops = s5pc110_dma_irq;
998 err = request_irq(r->start, s5pc110_onenand_irq,
999 IRQF_SHARED, "onenand", &onenand);
1000 if (err) {
1001 dev_err(&pdev->dev, "failed to get irq\n");
1002 goto scan_failed;
1003 }
1004 }
1005 }
1006
1007 if (onenand_scan(mtd, 1)) {
1008 err = -EFAULT;
1009 goto scan_failed;
1010 }
1011
1012 if (onenand->type != TYPE_S5PC110) {
1013 /* S3C doesn't handle subpage write */
1014 mtd->subpage_sft = 0;
1015 this->subpagesize = mtd->writesize;
1016 }
1017
1018 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1019 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1020
1021 #ifdef CONFIG_MTD_PARTITIONS
1022 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1023 if (err > 0)
1024 add_mtd_partitions(mtd, onenand->parts, err);
1025 else if (err <= 0 && pdata && pdata->parts)
1026 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1027 else
1028 #endif
1029 err = add_mtd_device(mtd);
1030
1031 platform_set_drvdata(pdev, mtd);
1032
1033 return 0;
1034
1035 scan_failed:
1036 if (onenand->dma_addr)
1037 iounmap(onenand->dma_addr);
1038 dma_ioremap_failed:
1039 if (onenand->dma_res)
1040 release_mem_region(onenand->dma_res->start,
1041 resource_size(onenand->dma_res));
1042 kfree(onenand->oob_buf);
1043 oob_buf_fail:
1044 kfree(onenand->page_buf);
1045 page_buf_fail:
1046 if (onenand->ahb_addr)
1047 iounmap(onenand->ahb_addr);
1048 ahb_ioremap_failed:
1049 if (onenand->ahb_res)
1050 release_mem_region(onenand->ahb_res->start,
1051 resource_size(onenand->ahb_res));
1052 dma_resource_failed:
1053 ahb_resource_failed:
1054 iounmap(onenand->base);
1055 ioremap_failed:
1056 if (onenand->base_res)
1057 release_mem_region(onenand->base_res->start,
1058 resource_size(onenand->base_res));
1059 resource_failed:
1060 kfree(onenand);
1061 onenand_fail:
1062 kfree(mtd);
1063 return err;
1064 }
1065
1066 static int __devexit s3c_onenand_remove(struct platform_device *pdev)
1067 {
1068 struct mtd_info *mtd = platform_get_drvdata(pdev);
1069
1070 onenand_release(mtd);
1071 if (onenand->ahb_addr)
1072 iounmap(onenand->ahb_addr);
1073 if (onenand->ahb_res)
1074 release_mem_region(onenand->ahb_res->start,
1075 resource_size(onenand->ahb_res));
1076 if (onenand->dma_addr)
1077 iounmap(onenand->dma_addr);
1078 if (onenand->dma_res)
1079 release_mem_region(onenand->dma_res->start,
1080 resource_size(onenand->dma_res));
1081
1082 iounmap(onenand->base);
1083 release_mem_region(onenand->base_res->start,
1084 resource_size(onenand->base_res));
1085
1086 platform_set_drvdata(pdev, NULL);
1087 kfree(onenand->oob_buf);
1088 kfree(onenand->page_buf);
1089 kfree(onenand);
1090 kfree(mtd);
1091 return 0;
1092 }
1093
1094 static int s3c_pm_ops_suspend(struct device *dev)
1095 {
1096 struct platform_device *pdev = to_platform_device(dev);
1097 struct mtd_info *mtd = platform_get_drvdata(pdev);
1098 struct onenand_chip *this = mtd->priv;
1099
1100 this->wait(mtd, FL_PM_SUSPENDED);
1101 return 0;
1102 }
1103
1104 static int s3c_pm_ops_resume(struct device *dev)
1105 {
1106 struct platform_device *pdev = to_platform_device(dev);
1107 struct mtd_info *mtd = platform_get_drvdata(pdev);
1108 struct onenand_chip *this = mtd->priv;
1109
1110 this->unlock_all(mtd);
1111 return 0;
1112 }
1113
1114 static const struct dev_pm_ops s3c_pm_ops = {
1115 .suspend = s3c_pm_ops_suspend,
1116 .resume = s3c_pm_ops_resume,
1117 };
1118
1119 static struct platform_device_id s3c_onenand_driver_ids[] = {
1120 {
1121 .name = "s3c6400-onenand",
1122 .driver_data = TYPE_S3C6400,
1123 }, {
1124 .name = "s3c6410-onenand",
1125 .driver_data = TYPE_S3C6410,
1126 }, {
1127 .name = "s5pc100-onenand",
1128 .driver_data = TYPE_S5PC100,
1129 }, {
1130 .name = "s5pc110-onenand",
1131 .driver_data = TYPE_S5PC110,
1132 }, { },
1133 };
1134 MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
1135
1136 static struct platform_driver s3c_onenand_driver = {
1137 .driver = {
1138 .name = "samsung-onenand",
1139 .pm = &s3c_pm_ops,
1140 },
1141 .id_table = s3c_onenand_driver_ids,
1142 .probe = s3c_onenand_probe,
1143 .remove = __devexit_p(s3c_onenand_remove),
1144 };
1145
1146 static int __init s3c_onenand_init(void)
1147 {
1148 return platform_driver_register(&s3c_onenand_driver);
1149 }
1150
1151 static void __exit s3c_onenand_exit(void)
1152 {
1153 platform_driver_unregister(&s3c_onenand_driver);
1154 }
1155
1156 module_init(s3c_onenand_init);
1157 module_exit(s3c_onenand_exit);
1158
1159 MODULE_LICENSE("GPL");
1160 MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
1161 MODULE_DESCRIPTION("Samsung OneNAND controller support");
This page took 0.078884 seconds and 6 git commands to generate.