2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
28 #include <asm/mach-types.h>
29 #include <plat/gpmc.h>
32 #include <plat/gpmc.h>
33 #include <plat/sdrc.h>
34 #include <plat/omap_device.h>
39 #define DEVICE_NAME "omap-gpmc"
41 /* GPMC register offsets */
42 #define GPMC_REVISION 0x00
43 #define GPMC_SYSCONFIG 0x10
44 #define GPMC_SYSSTATUS 0x14
45 #define GPMC_IRQSTATUS 0x18
46 #define GPMC_IRQENABLE 0x1c
47 #define GPMC_TIMEOUT_CONTROL 0x40
48 #define GPMC_ERR_ADDRESS 0x44
49 #define GPMC_ERR_TYPE 0x48
50 #define GPMC_CONFIG 0x50
51 #define GPMC_STATUS 0x54
52 #define GPMC_PREFETCH_CONFIG1 0x1e0
53 #define GPMC_PREFETCH_CONFIG2 0x1e4
54 #define GPMC_PREFETCH_CONTROL 0x1ec
55 #define GPMC_PREFETCH_STATUS 0x1f0
56 #define GPMC_ECC_CONFIG 0x1f4
57 #define GPMC_ECC_CONTROL 0x1f8
58 #define GPMC_ECC_SIZE_CONFIG 0x1fc
59 #define GPMC_ECC1_RESULT 0x200
60 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
62 /* GPMC ECC control settings */
63 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
64 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
65 #define GPMC_ECC_CTRL_ECCREG1 0x001
66 #define GPMC_ECC_CTRL_ECCREG2 0x002
67 #define GPMC_ECC_CTRL_ECCREG3 0x003
68 #define GPMC_ECC_CTRL_ECCREG4 0x004
69 #define GPMC_ECC_CTRL_ECCREG5 0x005
70 #define GPMC_ECC_CTRL_ECCREG6 0x006
71 #define GPMC_ECC_CTRL_ECCREG7 0x007
72 #define GPMC_ECC_CTRL_ECCREG8 0x008
73 #define GPMC_ECC_CTRL_ECCREG9 0x009
75 #define GPMC_CS0_OFFSET 0x60
76 #define GPMC_CS_SIZE 0x30
78 #define GPMC_MEM_START 0x00000000
79 #define GPMC_MEM_END 0x3FFFFFFF
80 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
82 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
83 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
85 #define CS_NUM_SHIFT 24
86 #define ENABLE_PREFETCH (0x1 << 7)
87 #define DMA_MPU_MODE 2
89 /* XXX: Only NAND irq has been considered,currently these are the only ones used
93 struct gpmc_client_irq
{
98 /* Structure to save gpmc cs context */
99 struct gpmc_cs_config
{
111 * Structure to save/restore gpmc context
112 * to support core off on OMAP3
114 struct omap3_gpmc_regs
{
119 u32 prefetch_config1
;
120 u32 prefetch_config2
;
121 u32 prefetch_control
;
122 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
125 static struct gpmc_client_irq gpmc_client_irq
[GPMC_NR_IRQ
];
126 static struct irq_chip gpmc_irq_chip
;
127 static unsigned gpmc_irq_start
;
129 static struct resource gpmc_mem_root
;
130 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
131 static DEFINE_SPINLOCK(gpmc_mem_lock
);
132 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
133 static int gpmc_ecc_used
= -EINVAL
; /* cs using ecc engine */
135 static void __iomem
*gpmc_base
;
137 static struct clk
*gpmc_l3_clk
;
139 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
141 static void gpmc_write_reg(int idx
, u32 val
)
143 __raw_writel(val
, gpmc_base
+ idx
);
146 static u32
gpmc_read_reg(int idx
)
148 return __raw_readl(gpmc_base
+ idx
);
151 static void gpmc_cs_write_byte(int cs
, int idx
, u8 val
)
153 void __iomem
*reg_addr
;
155 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
156 __raw_writeb(val
, reg_addr
);
159 static u8
gpmc_cs_read_byte(int cs
, int idx
)
161 void __iomem
*reg_addr
;
163 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
164 return __raw_readb(reg_addr
);
167 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
169 void __iomem
*reg_addr
;
171 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
172 __raw_writel(val
, reg_addr
);
175 u32
gpmc_cs_read_reg(int cs
, int idx
)
177 void __iomem
*reg_addr
;
179 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
180 return __raw_readl(reg_addr
);
183 /* TODO: Add support for gpmc_fck to clock framework and use it */
184 unsigned long gpmc_get_fclk_period(void)
186 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
189 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
194 rate
= 1000000000 / rate
; /* In picoseconds */
199 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
201 unsigned long tick_ps
;
203 /* Calculate in picosecs to yield more exact results */
204 tick_ps
= gpmc_get_fclk_period();
206 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
209 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
211 unsigned long tick_ps
;
213 /* Calculate in picosecs to yield more exact results */
214 tick_ps
= gpmc_get_fclk_period();
216 return (time_ps
+ tick_ps
- 1) / tick_ps
;
219 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
221 return ticks
* gpmc_get_fclk_period() / 1000;
224 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
226 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
228 return ticks
* gpmc_get_fclk_period() / 1000;
232 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
233 int time
, const char *name
)
235 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
240 int ticks
, mask
, nr_bits
;
245 ticks
= gpmc_ns_to_ticks(time
);
246 nr_bits
= end_bit
- st_bit
+ 1;
247 if (ticks
>= 1 << nr_bits
) {
249 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
250 cs
, name
, time
, ticks
, 1 << nr_bits
);
255 mask
= (1 << nr_bits
) - 1;
256 l
= gpmc_cs_read_reg(cs
, reg
);
259 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
260 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
261 (l
>> st_bit
) & mask
, time
);
263 l
&= ~(mask
<< st_bit
);
264 l
|= ticks
<< st_bit
;
265 gpmc_cs_write_reg(cs
, reg
, l
);
271 #define GPMC_SET_ONE(reg, st, end, field) \
272 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
273 t->field, #field) < 0) \
276 #define GPMC_SET_ONE(reg, st, end, field) \
277 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
281 int gpmc_cs_calc_divider(int cs
, unsigned int sync_clk
)
286 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
287 div
= l
/ gpmc_get_fclk_period();
296 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
301 div
= gpmc_cs_calc_divider(cs
, t
->sync_clk
);
305 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
306 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
307 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
309 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
310 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
311 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
313 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
314 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
315 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
316 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
318 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
319 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
320 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
322 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
324 if (cpu_is_omap34xx()) {
325 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
326 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
329 /* caller is expected to have initialized CONFIG1 to cover
330 * at least sync vs async
332 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
333 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
335 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
336 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
340 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
346 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
351 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
352 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
354 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
356 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
357 l
|= GPMC_CONFIG7_CSVALID
;
358 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
361 static void gpmc_cs_disable_mem(int cs
)
365 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
366 l
&= ~GPMC_CONFIG7_CSVALID
;
367 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
370 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
375 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
376 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
377 mask
= (l
>> 8) & 0x0f;
378 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
381 static int gpmc_cs_mem_enabled(int cs
)
385 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
386 return l
& GPMC_CONFIG7_CSVALID
;
389 int gpmc_cs_set_reserved(int cs
, int reserved
)
391 if (cs
> GPMC_CS_NUM
)
394 gpmc_cs_map
&= ~(1 << cs
);
395 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
400 int gpmc_cs_reserved(int cs
)
402 if (cs
> GPMC_CS_NUM
)
405 return gpmc_cs_map
& (1 << cs
);
408 static unsigned long gpmc_mem_align(unsigned long size
)
412 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
413 order
= GPMC_CHUNK_SHIFT
- 1;
422 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
424 struct resource
*res
= &gpmc_cs_mem
[cs
];
427 size
= gpmc_mem_align(size
);
428 spin_lock(&gpmc_mem_lock
);
430 res
->end
= base
+ size
- 1;
431 r
= request_resource(&gpmc_mem_root
, res
);
432 spin_unlock(&gpmc_mem_lock
);
437 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
439 struct resource
*res
= &gpmc_cs_mem
[cs
];
442 if (cs
> GPMC_CS_NUM
)
445 size
= gpmc_mem_align(size
);
446 if (size
> (1 << GPMC_SECTION_SHIFT
))
449 spin_lock(&gpmc_mem_lock
);
450 if (gpmc_cs_reserved(cs
)) {
454 if (gpmc_cs_mem_enabled(cs
))
455 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
457 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
462 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
464 gpmc_cs_set_reserved(cs
, 1);
466 spin_unlock(&gpmc_mem_lock
);
469 EXPORT_SYMBOL(gpmc_cs_request
);
471 void gpmc_cs_free(int cs
)
473 spin_lock(&gpmc_mem_lock
);
474 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
475 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
477 spin_unlock(&gpmc_mem_lock
);
480 gpmc_cs_disable_mem(cs
);
481 release_resource(&gpmc_cs_mem
[cs
]);
482 gpmc_cs_set_reserved(cs
, 0);
483 spin_unlock(&gpmc_mem_lock
);
485 EXPORT_SYMBOL(gpmc_cs_free
);
488 * gpmc_read_status - read access request to get the different gpmc status
492 int gpmc_read_status(int cmd
)
494 int status
= -EINVAL
;
498 case GPMC_GET_IRQ_STATUS
:
499 status
= gpmc_read_reg(GPMC_IRQSTATUS
);
502 case GPMC_PREFETCH_FIFO_CNT
:
503 regval
= gpmc_read_reg(GPMC_PREFETCH_STATUS
);
504 status
= GPMC_PREFETCH_STATUS_FIFO_CNT(regval
);
507 case GPMC_PREFETCH_COUNT
:
508 regval
= gpmc_read_reg(GPMC_PREFETCH_STATUS
);
509 status
= GPMC_PREFETCH_STATUS_COUNT(regval
);
512 case GPMC_STATUS_BUFFER
:
513 regval
= gpmc_read_reg(GPMC_STATUS
);
514 /* 1 : buffer is available to write */
515 status
= regval
& GPMC_STATUS_BUFF_EMPTY
;
519 printk(KERN_ERR
"gpmc_read_status: Not supported\n");
523 EXPORT_SYMBOL(gpmc_read_status
);
526 * gpmc_cs_configure - write request to configure gpmc
527 * @cs: chip select number
529 * @wval: value to write
530 * @return status of the operation
532 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
538 case GPMC_ENABLE_IRQ
:
539 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
542 case GPMC_SET_IRQ_STATUS
:
543 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
547 regval
= gpmc_read_reg(GPMC_CONFIG
);
549 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
551 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
552 gpmc_write_reg(GPMC_CONFIG
, regval
);
555 case GPMC_CONFIG_RDY_BSY
:
556 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
558 regval
|= WR_RD_PIN_MONITORING
;
560 regval
&= ~WR_RD_PIN_MONITORING
;
561 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
564 case GPMC_CONFIG_DEV_SIZE
:
565 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
567 /* clear 2 target bits */
568 regval
&= ~GPMC_CONFIG1_DEVICESIZE(3);
570 /* set the proper value */
571 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
573 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
576 case GPMC_CONFIG_DEV_TYPE
:
577 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
578 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
579 if (wval
== GPMC_DEVICETYPE_NOR
)
580 regval
|= GPMC_CONFIG1_MUXADDDATA
;
581 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
585 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
591 EXPORT_SYMBOL(gpmc_cs_configure
);
594 * gpmc_nand_read - nand specific read access request
595 * @cs: chip select number
598 int gpmc_nand_read(int cs
, int cmd
)
604 rval
= gpmc_cs_read_byte(cs
, GPMC_CS_NAND_DATA
);
608 printk(KERN_ERR
"gpmc_read_nand_ctrl: Not supported\n");
612 EXPORT_SYMBOL(gpmc_nand_read
);
615 * gpmc_nand_write - nand specific write request
616 * @cs: chip select number
618 * @wval: value to write
620 int gpmc_nand_write(int cs
, int cmd
, int wval
)
625 case GPMC_NAND_COMMAND
:
626 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_COMMAND
, wval
);
629 case GPMC_NAND_ADDRESS
:
630 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_ADDRESS
, wval
);
634 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_DATA
, wval
);
637 printk(KERN_ERR
"gpmc_write_nand_ctrl: Not supported\n");
642 EXPORT_SYMBOL(gpmc_nand_write
);
647 * gpmc_prefetch_enable - configures and starts prefetch transfer
648 * @cs: cs (chip select) number
649 * @fifo_th: fifo threshold to be used for read/ write
650 * @dma_mode: dma mode enable (1) or disable (0)
651 * @u32_count: number of bytes to be transferred
652 * @is_write: prefetch read(0) or write post(1) mode
654 int gpmc_prefetch_enable(int cs
, int fifo_th
, int dma_mode
,
655 unsigned int u32_count
, int is_write
)
658 if (fifo_th
> PREFETCH_FIFOTHRESHOLD_MAX
) {
659 pr_err("gpmc: fifo threshold is not supported\n");
661 } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL
))) {
662 /* Set the amount of bytes to be prefetched */
663 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, u32_count
);
665 /* Set dma/mpu mode, the prefetch read / post write and
666 * enable the engine. Set which cs is has requested for.
668 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, ((cs
<< CS_NUM_SHIFT
) |
669 PREFETCH_FIFOTHRESHOLD(fifo_th
) |
671 (dma_mode
<< DMA_MPU_MODE
) |
674 /* Start the prefetch engine */
675 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x1);
682 EXPORT_SYMBOL(gpmc_prefetch_enable
);
685 * gpmc_prefetch_reset - disables and stops the prefetch engine
687 int gpmc_prefetch_reset(int cs
)
691 /* check if the same module/cs is trying to reset */
692 config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
693 if (((config1
>> CS_NUM_SHIFT
) & 0x7) != cs
)
696 /* Stop the PFPW engine */
697 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x0);
699 /* Reset/disable the PFPW engine */
700 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, 0x0);
704 EXPORT_SYMBOL(gpmc_prefetch_reset
);
706 void gpmc_update_nand_reg(struct gpmc_nand_regs
*reg
, int cs
)
708 reg
->gpmc_status
= gpmc_base
+ GPMC_STATUS
;
709 reg
->gpmc_nand_command
= gpmc_base
+ GPMC_CS0_OFFSET
+
710 GPMC_CS_NAND_COMMAND
+ GPMC_CS_SIZE
* cs
;
711 reg
->gpmc_nand_address
= gpmc_base
+ GPMC_CS0_OFFSET
+
712 GPMC_CS_NAND_ADDRESS
+ GPMC_CS_SIZE
* cs
;
713 reg
->gpmc_nand_data
= gpmc_base
+ GPMC_CS0_OFFSET
+
714 GPMC_CS_NAND_DATA
+ GPMC_CS_SIZE
* cs
;
715 reg
->gpmc_prefetch_config1
= gpmc_base
+ GPMC_PREFETCH_CONFIG1
;
716 reg
->gpmc_prefetch_config2
= gpmc_base
+ GPMC_PREFETCH_CONFIG2
;
717 reg
->gpmc_prefetch_control
= gpmc_base
+ GPMC_PREFETCH_CONTROL
;
718 reg
->gpmc_prefetch_status
= gpmc_base
+ GPMC_PREFETCH_STATUS
;
719 reg
->gpmc_ecc_config
= gpmc_base
+ GPMC_ECC_CONFIG
;
720 reg
->gpmc_ecc_control
= gpmc_base
+ GPMC_ECC_CONTROL
;
721 reg
->gpmc_ecc_size_config
= gpmc_base
+ GPMC_ECC_SIZE_CONFIG
;
722 reg
->gpmc_ecc1_result
= gpmc_base
+ GPMC_ECC1_RESULT
;
723 reg
->gpmc_bch_result0
= gpmc_base
+ GPMC_ECC_BCH_RESULT_0
;
726 int gpmc_get_client_irq(unsigned irq_config
)
730 if (hweight32(irq_config
) > 1)
733 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
734 if (gpmc_client_irq
[i
].bitmask
& irq_config
)
735 return gpmc_client_irq
[i
].irq
;
740 static int gpmc_irq_endis(unsigned irq
, bool endis
)
745 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
746 if (irq
== gpmc_client_irq
[i
].irq
) {
747 regval
= gpmc_read_reg(GPMC_IRQENABLE
);
749 regval
|= gpmc_client_irq
[i
].bitmask
;
751 regval
&= ~gpmc_client_irq
[i
].bitmask
;
752 gpmc_write_reg(GPMC_IRQENABLE
, regval
);
759 static void gpmc_irq_disable(struct irq_data
*p
)
761 gpmc_irq_endis(p
->irq
, false);
764 static void gpmc_irq_enable(struct irq_data
*p
)
766 gpmc_irq_endis(p
->irq
, true);
769 static void gpmc_irq_noop(struct irq_data
*data
) { }
771 static unsigned int gpmc_irq_noop_ret(struct irq_data
*data
) { return 0; }
773 static int gpmc_setup_irq(int gpmc_irq
)
781 gpmc_irq_start
= irq_alloc_descs(-1, 0, GPMC_NR_IRQ
, 0);
782 if (IS_ERR_VALUE(gpmc_irq_start
)) {
783 pr_err("irq_alloc_descs failed\n");
784 return gpmc_irq_start
;
787 gpmc_irq_chip
.name
= "gpmc";
788 gpmc_irq_chip
.irq_startup
= gpmc_irq_noop_ret
;
789 gpmc_irq_chip
.irq_enable
= gpmc_irq_enable
;
790 gpmc_irq_chip
.irq_disable
= gpmc_irq_disable
;
791 gpmc_irq_chip
.irq_shutdown
= gpmc_irq_noop
;
792 gpmc_irq_chip
.irq_ack
= gpmc_irq_noop
;
793 gpmc_irq_chip
.irq_mask
= gpmc_irq_noop
;
794 gpmc_irq_chip
.irq_unmask
= gpmc_irq_noop
;
796 gpmc_client_irq
[0].bitmask
= GPMC_IRQ_FIFOEVENTENABLE
;
797 gpmc_client_irq
[1].bitmask
= GPMC_IRQ_COUNT_EVENT
;
799 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
800 gpmc_client_irq
[i
].irq
= gpmc_irq_start
+ i
;
801 irq_set_chip_and_handler(gpmc_client_irq
[i
].irq
,
802 &gpmc_irq_chip
, handle_simple_irq
);
803 set_irq_flags(gpmc_client_irq
[i
].irq
,
804 IRQF_VALID
| IRQF_NOAUTOEN
);
807 /* Disable interrupts */
808 gpmc_write_reg(GPMC_IRQENABLE
, 0);
810 /* clear interrupts */
811 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
812 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
814 return request_irq(gpmc_irq
, gpmc_handle_irq
, 0, "gpmc", NULL
);
817 static void __init
gpmc_mem_init(void)
820 unsigned long boot_rom_space
= 0;
822 /* never allocate the first page, to facilitate bug detection;
823 * even if we didn't boot from ROM.
825 boot_rom_space
= BOOT_ROM_SPACE
;
826 /* In apollon the CS0 is mapped as 0x0000 0000 */
827 if (machine_is_omap_apollon())
829 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
830 gpmc_mem_root
.end
= GPMC_MEM_END
;
832 /* Reserve all regions that has been set up by bootloader */
833 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
836 if (!gpmc_cs_mem_enabled(cs
))
838 gpmc_cs_get_memconf(cs
, &base
, &size
);
839 if (gpmc_cs_insert_mem(cs
, base
, size
) < 0)
844 static int __init
gpmc_init(void)
851 if (cpu_is_omap24xx()) {
853 if (cpu_is_omap2420())
854 l
= OMAP2420_GPMC_BASE
;
856 l
= OMAP34XX_GPMC_BASE
;
857 gpmc_irq
= 20 + OMAP_INTC_START
;
858 } else if (cpu_is_omap34xx()) {
860 l
= OMAP34XX_GPMC_BASE
;
861 gpmc_irq
= 20 + OMAP_INTC_START
;
862 } else if (cpu_is_omap44xx() || soc_is_omap54xx()) {
863 /* Base address and irq number are same for OMAP4/5 */
865 l
= OMAP44XX_GPMC_BASE
;
866 gpmc_irq
= 20 + OMAP44XX_IRQ_GIC_START
;
872 gpmc_l3_clk
= clk_get(NULL
, ck
);
873 if (IS_ERR(gpmc_l3_clk
)) {
874 printk(KERN_ERR
"Could not get GPMC clock %s\n", ck
);
878 gpmc_base
= ioremap(l
, SZ_4K
);
880 clk_put(gpmc_l3_clk
);
881 printk(KERN_ERR
"Could not get GPMC register memory\n");
885 clk_prepare_enable(gpmc_l3_clk
);
887 l
= gpmc_read_reg(GPMC_REVISION
);
888 printk(KERN_INFO
"GPMC revision %d.%d\n", (l
>> 4) & 0x0f, l
& 0x0f);
889 /* Set smart idle mode and automatic L3 clock gating */
890 l
= gpmc_read_reg(GPMC_SYSCONFIG
);
892 l
|= (0x02 << 3) | (1 << 0);
893 gpmc_write_reg(GPMC_SYSCONFIG
, l
);
896 ret
= gpmc_setup_irq(gpmc_irq
);
898 pr_err("gpmc: irq-%d could not claim: err %d\n",
902 postcore_initcall(gpmc_init
);
904 static int __init
omap_gpmc_init(void)
906 struct omap_hwmod
*oh
;
907 struct platform_device
*pdev
;
908 char *oh_name
= "gpmc";
910 oh
= omap_hwmod_lookup(oh_name
);
912 pr_err("Could not look up %s\n", oh_name
);
916 pdev
= omap_device_build(DEVICE_NAME
, -1, oh
, NULL
, 0, NULL
, 0, 0);
917 WARN(IS_ERR(pdev
), "could not build omap_device for %s\n", oh_name
);
919 return IS_ERR(pdev
) ? PTR_ERR(pdev
) : 0;
921 postcore_initcall(omap_gpmc_init
);
923 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
928 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
933 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
934 if (regval
& gpmc_client_irq
[i
].bitmask
)
935 generic_handle_irq(gpmc_client_irq
[i
].irq
);
937 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
942 #ifdef CONFIG_ARCH_OMAP3
943 static struct omap3_gpmc_regs gpmc_context
;
945 void omap3_gpmc_save_context(void)
949 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
950 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
951 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
952 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
953 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
954 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
955 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
956 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
957 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
958 if (gpmc_context
.cs_context
[i
].is_valid
) {
959 gpmc_context
.cs_context
[i
].config1
=
960 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
961 gpmc_context
.cs_context
[i
].config2
=
962 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
963 gpmc_context
.cs_context
[i
].config3
=
964 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
965 gpmc_context
.cs_context
[i
].config4
=
966 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
967 gpmc_context
.cs_context
[i
].config5
=
968 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
969 gpmc_context
.cs_context
[i
].config6
=
970 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
971 gpmc_context
.cs_context
[i
].config7
=
972 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
977 void omap3_gpmc_restore_context(void)
981 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
982 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
983 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
984 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
985 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
986 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
987 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
988 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
989 if (gpmc_context
.cs_context
[i
].is_valid
) {
990 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
991 gpmc_context
.cs_context
[i
].config1
);
992 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
993 gpmc_context
.cs_context
[i
].config2
);
994 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
995 gpmc_context
.cs_context
[i
].config3
);
996 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
997 gpmc_context
.cs_context
[i
].config4
);
998 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
999 gpmc_context
.cs_context
[i
].config5
);
1000 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
1001 gpmc_context
.cs_context
[i
].config6
);
1002 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
1003 gpmc_context
.cs_context
[i
].config7
);
1007 #endif /* CONFIG_ARCH_OMAP3 */
1010 * gpmc_enable_hwecc - enable hardware ecc functionality
1011 * @cs: chip select number
1012 * @mode: read/write mode
1013 * @dev_width: device bus width(1 for x16, 0 for x8)
1014 * @ecc_size: bytes for which ECC will be generated
1016 int gpmc_enable_hwecc(int cs
, int mode
, int dev_width
, int ecc_size
)
1020 /* check if ecc module is in used */
1021 if (gpmc_ecc_used
!= -EINVAL
)
1026 /* clear ecc and enable bits */
1027 gpmc_write_reg(GPMC_ECC_CONTROL
,
1028 GPMC_ECC_CTRL_ECCCLEAR
|
1029 GPMC_ECC_CTRL_ECCREG1
);
1031 /* program ecc and result sizes */
1032 val
= ((((ecc_size
>> 1) - 1) << 22) | (0x0000000F));
1033 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG
, val
);
1037 case GPMC_ECC_WRITE
:
1038 gpmc_write_reg(GPMC_ECC_CONTROL
,
1039 GPMC_ECC_CTRL_ECCCLEAR
|
1040 GPMC_ECC_CTRL_ECCREG1
);
1042 case GPMC_ECC_READSYN
:
1043 gpmc_write_reg(GPMC_ECC_CONTROL
,
1044 GPMC_ECC_CTRL_ECCCLEAR
|
1045 GPMC_ECC_CTRL_ECCDISABLE
);
1048 printk(KERN_INFO
"Error: Unrecognized Mode[%d]!\n", mode
);
1052 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
1053 val
= (dev_width
<< 7) | (cs
<< 1) | (0x1);
1054 gpmc_write_reg(GPMC_ECC_CONFIG
, val
);
1057 EXPORT_SYMBOL_GPL(gpmc_enable_hwecc
);
1060 * gpmc_calculate_ecc - generate non-inverted ecc bytes
1061 * @cs: chip select number
1062 * @dat: data pointer over which ecc is computed
1063 * @ecc_code: ecc code buffer
1065 * Using non-inverted ECC is considered ugly since writing a blank
1066 * page (padding) will clear the ECC bytes. This is not a problem as long
1067 * no one is trying to write data on the seemingly unused page. Reading
1068 * an erased page will produce an ECC mismatch between generated and read
1069 * ECC bytes that has to be dealt with separately.
1071 int gpmc_calculate_ecc(int cs
, const u_char
*dat
, u_char
*ecc_code
)
1073 unsigned int val
= 0x0;
1075 if (gpmc_ecc_used
!= cs
)
1078 /* read ecc result */
1079 val
= gpmc_read_reg(GPMC_ECC1_RESULT
);
1080 *ecc_code
++ = val
; /* P128e, ..., P1e */
1081 *ecc_code
++ = val
>> 16; /* P128o, ..., P1o */
1082 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
1083 *ecc_code
++ = ((val
>> 8) & 0x0f) | ((val
>> 20) & 0xf0);
1085 gpmc_ecc_used
= -EINVAL
;
1088 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc
);
1090 #ifdef CONFIG_ARCH_OMAP3
1093 * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
1094 * @cs: chip select number
1095 * @nsectors: how many 512-byte sectors to process
1096 * @nerrors: how many errors to correct per sector (4 or 8)
1098 * This function must be executed before any call to gpmc_enable_hwecc_bch.
1100 int gpmc_init_hwecc_bch(int cs
, int nsectors
, int nerrors
)
1102 /* check if ecc module is in use */
1103 if (gpmc_ecc_used
!= -EINVAL
)
1106 /* support only OMAP3 class */
1107 if (!cpu_is_omap34xx()) {
1108 printk(KERN_ERR
"BCH ecc is not supported on this CPU\n");
1113 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
1114 * Other chips may be added if confirmed to work.
1116 if ((nerrors
== 4) &&
1117 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
1118 printk(KERN_ERR
"BCH 4-bit mode is not supported on this CPU\n");
1124 printk(KERN_ERR
"BCH cannot process %d sectors (max is 8)\n",
1131 EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch
);
1134 * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
1135 * @cs: chip select number
1136 * @mode: read/write mode
1137 * @dev_width: device bus width(1 for x16, 0 for x8)
1138 * @nsectors: how many 512-byte sectors to process
1139 * @nerrors: how many errors to correct per sector (4 or 8)
1141 int gpmc_enable_hwecc_bch(int cs
, int mode
, int dev_width
, int nsectors
,
1146 /* check if ecc module is in use */
1147 if (gpmc_ecc_used
!= -EINVAL
)
1152 /* clear ecc and enable bits */
1153 gpmc_write_reg(GPMC_ECC_CONTROL
, 0x1);
1156 * When using BCH, sector size is hardcoded to 512 bytes.
1157 * Here we are using wrapping mode 6 both for reading and writing, with:
1158 * size0 = 0 (no additional protected byte in spare area)
1159 * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1161 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG
, (32 << 22) | (0 << 12));
1163 /* BCH configuration */
1164 val
= ((1 << 16) | /* enable BCH */
1165 (((nerrors
== 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1166 (0x06 << 8) | /* wrap mode = 6 */
1167 (dev_width
<< 7) | /* bus width */
1168 (((nsectors
-1) & 0x7) << 4) | /* number of sectors */
1169 (cs
<< 1) | /* ECC CS */
1170 (0x1)); /* enable ECC */
1172 gpmc_write_reg(GPMC_ECC_CONFIG
, val
);
1173 gpmc_write_reg(GPMC_ECC_CONTROL
, 0x101);
1176 EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch
);
1179 * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
1180 * @cs: chip select number
1181 * @dat: The pointer to data on which ecc is computed
1182 * @ecc: The ecc output buffer
1184 int gpmc_calculate_ecc_bch4(int cs
, const u_char
*dat
, u_char
*ecc
)
1187 unsigned long nsectors
, reg
, val1
, val2
;
1189 if (gpmc_ecc_used
!= cs
)
1192 nsectors
= ((gpmc_read_reg(GPMC_ECC_CONFIG
) >> 4) & 0x7) + 1;
1194 for (i
= 0; i
< nsectors
; i
++) {
1196 reg
= GPMC_ECC_BCH_RESULT_0
+ 16*i
;
1198 /* Read hw-computed remainder */
1199 val1
= gpmc_read_reg(reg
+ 0);
1200 val2
= gpmc_read_reg(reg
+ 4);
1203 * Add constant polynomial to remainder, in order to get an ecc
1204 * sequence of 0xFFs for a buffer filled with 0xFFs; and
1205 * left-justify the resulting polynomial.
1207 *ecc
++ = 0x28 ^ ((val2
>> 12) & 0xFF);
1208 *ecc
++ = 0x13 ^ ((val2
>> 4) & 0xFF);
1209 *ecc
++ = 0xcc ^ (((val2
& 0xF) << 4)|((val1
>> 28) & 0xF));
1210 *ecc
++ = 0x39 ^ ((val1
>> 20) & 0xFF);
1211 *ecc
++ = 0x96 ^ ((val1
>> 12) & 0xFF);
1212 *ecc
++ = 0xac ^ ((val1
>> 4) & 0xFF);
1213 *ecc
++ = 0x7f ^ ((val1
& 0xF) << 4);
1216 gpmc_ecc_used
= -EINVAL
;
1219 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4
);
1222 * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
1223 * @cs: chip select number
1224 * @dat: The pointer to data on which ecc is computed
1225 * @ecc: The ecc output buffer
1227 int gpmc_calculate_ecc_bch8(int cs
, const u_char
*dat
, u_char
*ecc
)
1230 unsigned long nsectors
, reg
, val1
, val2
, val3
, val4
;
1232 if (gpmc_ecc_used
!= cs
)
1235 nsectors
= ((gpmc_read_reg(GPMC_ECC_CONFIG
) >> 4) & 0x7) + 1;
1237 for (i
= 0; i
< nsectors
; i
++) {
1239 reg
= GPMC_ECC_BCH_RESULT_0
+ 16*i
;
1241 /* Read hw-computed remainder */
1242 val1
= gpmc_read_reg(reg
+ 0);
1243 val2
= gpmc_read_reg(reg
+ 4);
1244 val3
= gpmc_read_reg(reg
+ 8);
1245 val4
= gpmc_read_reg(reg
+ 12);
1248 * Add constant polynomial to remainder, in order to get an ecc
1249 * sequence of 0xFFs for a buffer filled with 0xFFs.
1251 *ecc
++ = 0xef ^ (val4
& 0xFF);
1252 *ecc
++ = 0x51 ^ ((val3
>> 24) & 0xFF);
1253 *ecc
++ = 0x2e ^ ((val3
>> 16) & 0xFF);
1254 *ecc
++ = 0x09 ^ ((val3
>> 8) & 0xFF);
1255 *ecc
++ = 0xed ^ (val3
& 0xFF);
1256 *ecc
++ = 0x93 ^ ((val2
>> 24) & 0xFF);
1257 *ecc
++ = 0x9a ^ ((val2
>> 16) & 0xFF);
1258 *ecc
++ = 0xc2 ^ ((val2
>> 8) & 0xFF);
1259 *ecc
++ = 0x97 ^ (val2
& 0xFF);
1260 *ecc
++ = 0x79 ^ ((val1
>> 24) & 0xFF);
1261 *ecc
++ = 0xe5 ^ ((val1
>> 16) & 0xFF);
1262 *ecc
++ = 0x24 ^ ((val1
>> 8) & 0xFF);
1263 *ecc
++ = 0xb5 ^ (val1
& 0xFF);
1266 gpmc_ecc_used
= -EINVAL
;
1269 EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8
);
1271 #endif /* CONFIG_ARCH_OMAP3 */