2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
29 #include <linux/platform_data/mtd-nand-omap2.h>
31 #include <asm/mach-types.h>
37 #include "omap_device.h"
40 #define DEVICE_NAME "omap-gpmc"
42 /* GPMC register offsets */
43 #define GPMC_REVISION 0x00
44 #define GPMC_SYSCONFIG 0x10
45 #define GPMC_SYSSTATUS 0x14
46 #define GPMC_IRQSTATUS 0x18
47 #define GPMC_IRQENABLE 0x1c
48 #define GPMC_TIMEOUT_CONTROL 0x40
49 #define GPMC_ERR_ADDRESS 0x44
50 #define GPMC_ERR_TYPE 0x48
51 #define GPMC_CONFIG 0x50
52 #define GPMC_STATUS 0x54
53 #define GPMC_PREFETCH_CONFIG1 0x1e0
54 #define GPMC_PREFETCH_CONFIG2 0x1e4
55 #define GPMC_PREFETCH_CONTROL 0x1ec
56 #define GPMC_PREFETCH_STATUS 0x1f0
57 #define GPMC_ECC_CONFIG 0x1f4
58 #define GPMC_ECC_CONTROL 0x1f8
59 #define GPMC_ECC_SIZE_CONFIG 0x1fc
60 #define GPMC_ECC1_RESULT 0x200
61 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
62 #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
63 #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
64 #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
66 /* GPMC ECC control settings */
67 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
68 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
69 #define GPMC_ECC_CTRL_ECCREG1 0x001
70 #define GPMC_ECC_CTRL_ECCREG2 0x002
71 #define GPMC_ECC_CTRL_ECCREG3 0x003
72 #define GPMC_ECC_CTRL_ECCREG4 0x004
73 #define GPMC_ECC_CTRL_ECCREG5 0x005
74 #define GPMC_ECC_CTRL_ECCREG6 0x006
75 #define GPMC_ECC_CTRL_ECCREG7 0x007
76 #define GPMC_ECC_CTRL_ECCREG8 0x008
77 #define GPMC_ECC_CTRL_ECCREG9 0x009
79 #define GPMC_CS0_OFFSET 0x60
80 #define GPMC_CS_SIZE 0x30
81 #define GPMC_BCH_SIZE 0x10
83 #define GPMC_MEM_START 0x00000000
84 #define GPMC_MEM_END 0x3FFFFFFF
85 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
87 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
88 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
90 #define CS_NUM_SHIFT 24
91 #define ENABLE_PREFETCH (0x1 << 7)
92 #define DMA_MPU_MODE 2
94 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
95 #define GPMC_REVISION_MINOR(l) (l & 0xf)
97 #define GPMC_HAS_WR_ACCESS 0x1
98 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
100 /* XXX: Only NAND irq has been considered,currently these are the only ones used
102 #define GPMC_NR_IRQ 2
104 struct gpmc_client_irq
{
109 /* Structure to save gpmc cs context */
110 struct gpmc_cs_config
{
122 * Structure to save/restore gpmc context
123 * to support core off on OMAP3
125 struct omap3_gpmc_regs
{
130 u32 prefetch_config1
;
131 u32 prefetch_config2
;
132 u32 prefetch_control
;
133 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
136 static struct gpmc_client_irq gpmc_client_irq
[GPMC_NR_IRQ
];
137 static struct irq_chip gpmc_irq_chip
;
138 static unsigned gpmc_irq_start
;
140 static struct resource gpmc_mem_root
;
141 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
142 static DEFINE_SPINLOCK(gpmc_mem_lock
);
143 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
144 static struct device
*gpmc_dev
;
146 static resource_size_t phys_base
, mem_size
;
147 static unsigned gpmc_capability
;
148 static void __iomem
*gpmc_base
;
150 static struct clk
*gpmc_l3_clk
;
152 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
154 static void gpmc_write_reg(int idx
, u32 val
)
156 __raw_writel(val
, gpmc_base
+ idx
);
159 static u32
gpmc_read_reg(int idx
)
161 return __raw_readl(gpmc_base
+ idx
);
164 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
166 void __iomem
*reg_addr
;
168 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
169 __raw_writel(val
, reg_addr
);
172 u32
gpmc_cs_read_reg(int cs
, int idx
)
174 void __iomem
*reg_addr
;
176 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
177 return __raw_readl(reg_addr
);
180 /* TODO: Add support for gpmc_fck to clock framework and use it */
181 unsigned long gpmc_get_fclk_period(void)
183 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
186 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
191 rate
= 1000000000 / rate
; /* In picoseconds */
196 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
198 unsigned long tick_ps
;
200 /* Calculate in picosecs to yield more exact results */
201 tick_ps
= gpmc_get_fclk_period();
203 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
206 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
208 unsigned long tick_ps
;
210 /* Calculate in picosecs to yield more exact results */
211 tick_ps
= gpmc_get_fclk_period();
213 return (time_ps
+ tick_ps
- 1) / tick_ps
;
216 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
218 return ticks
* gpmc_get_fclk_period() / 1000;
221 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
223 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
225 return ticks
* gpmc_get_fclk_period() / 1000;
229 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
230 int time
, const char *name
)
232 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
237 int ticks
, mask
, nr_bits
;
242 ticks
= gpmc_ns_to_ticks(time
);
243 nr_bits
= end_bit
- st_bit
+ 1;
244 if (ticks
>= 1 << nr_bits
) {
246 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
247 cs
, name
, time
, ticks
, 1 << nr_bits
);
252 mask
= (1 << nr_bits
) - 1;
253 l
= gpmc_cs_read_reg(cs
, reg
);
256 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
257 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
258 (l
>> st_bit
) & mask
, time
);
260 l
&= ~(mask
<< st_bit
);
261 l
|= ticks
<< st_bit
;
262 gpmc_cs_write_reg(cs
, reg
, l
);
268 #define GPMC_SET_ONE(reg, st, end, field) \
269 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
270 t->field, #field) < 0) \
273 #define GPMC_SET_ONE(reg, st, end, field) \
274 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
278 int gpmc_calc_divider(unsigned int sync_clk
)
283 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
284 div
= l
/ gpmc_get_fclk_period();
293 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
298 div
= gpmc_calc_divider(t
->sync_clk
);
302 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
303 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
304 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
306 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
307 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
308 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
310 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
311 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
312 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
313 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
315 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
316 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
317 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
319 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
321 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
322 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
323 if (gpmc_capability
& GPMC_HAS_WR_ACCESS
)
324 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
326 /* caller is expected to have initialized CONFIG1 to cover
327 * at least sync vs async
329 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
330 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
332 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
333 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
337 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
343 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
348 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
349 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
351 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
353 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
354 l
|= GPMC_CONFIG7_CSVALID
;
355 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
358 static void gpmc_cs_disable_mem(int cs
)
362 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
363 l
&= ~GPMC_CONFIG7_CSVALID
;
364 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
367 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
372 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
373 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
374 mask
= (l
>> 8) & 0x0f;
375 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
378 static int gpmc_cs_mem_enabled(int cs
)
382 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
383 return l
& GPMC_CONFIG7_CSVALID
;
386 int gpmc_cs_set_reserved(int cs
, int reserved
)
388 if (cs
> GPMC_CS_NUM
)
391 gpmc_cs_map
&= ~(1 << cs
);
392 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
397 int gpmc_cs_reserved(int cs
)
399 if (cs
> GPMC_CS_NUM
)
402 return gpmc_cs_map
& (1 << cs
);
405 static unsigned long gpmc_mem_align(unsigned long size
)
409 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
410 order
= GPMC_CHUNK_SHIFT
- 1;
419 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
421 struct resource
*res
= &gpmc_cs_mem
[cs
];
424 size
= gpmc_mem_align(size
);
425 spin_lock(&gpmc_mem_lock
);
427 res
->end
= base
+ size
- 1;
428 r
= request_resource(&gpmc_mem_root
, res
);
429 spin_unlock(&gpmc_mem_lock
);
434 static int gpmc_cs_delete_mem(int cs
)
436 struct resource
*res
= &gpmc_cs_mem
[cs
];
439 spin_lock(&gpmc_mem_lock
);
440 r
= release_resource(&gpmc_cs_mem
[cs
]);
443 spin_unlock(&gpmc_mem_lock
);
448 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
450 struct resource
*res
= &gpmc_cs_mem
[cs
];
453 if (cs
> GPMC_CS_NUM
)
456 size
= gpmc_mem_align(size
);
457 if (size
> (1 << GPMC_SECTION_SHIFT
))
460 spin_lock(&gpmc_mem_lock
);
461 if (gpmc_cs_reserved(cs
)) {
465 if (gpmc_cs_mem_enabled(cs
))
466 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
468 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
473 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
475 gpmc_cs_set_reserved(cs
, 1);
477 spin_unlock(&gpmc_mem_lock
);
480 EXPORT_SYMBOL(gpmc_cs_request
);
482 void gpmc_cs_free(int cs
)
484 spin_lock(&gpmc_mem_lock
);
485 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
486 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
488 spin_unlock(&gpmc_mem_lock
);
491 gpmc_cs_disable_mem(cs
);
492 release_resource(&gpmc_cs_mem
[cs
]);
493 gpmc_cs_set_reserved(cs
, 0);
494 spin_unlock(&gpmc_mem_lock
);
496 EXPORT_SYMBOL(gpmc_cs_free
);
499 * gpmc_cs_configure - write request to configure gpmc
500 * @cs: chip select number
502 * @wval: value to write
503 * @return status of the operation
505 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
511 case GPMC_ENABLE_IRQ
:
512 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
515 case GPMC_SET_IRQ_STATUS
:
516 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
520 regval
= gpmc_read_reg(GPMC_CONFIG
);
522 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
524 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
525 gpmc_write_reg(GPMC_CONFIG
, regval
);
528 case GPMC_CONFIG_RDY_BSY
:
529 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
531 regval
|= WR_RD_PIN_MONITORING
;
533 regval
&= ~WR_RD_PIN_MONITORING
;
534 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
537 case GPMC_CONFIG_DEV_SIZE
:
538 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
540 /* clear 2 target bits */
541 regval
&= ~GPMC_CONFIG1_DEVICESIZE(3);
543 /* set the proper value */
544 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
546 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
549 case GPMC_CONFIG_DEV_TYPE
:
550 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
551 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
552 if (wval
== GPMC_DEVICETYPE_NOR
)
553 regval
|= GPMC_CONFIG1_MUXADDDATA
;
554 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
558 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
564 EXPORT_SYMBOL(gpmc_cs_configure
);
566 void gpmc_update_nand_reg(struct gpmc_nand_regs
*reg
, int cs
)
570 reg
->gpmc_status
= gpmc_base
+ GPMC_STATUS
;
571 reg
->gpmc_nand_command
= gpmc_base
+ GPMC_CS0_OFFSET
+
572 GPMC_CS_NAND_COMMAND
+ GPMC_CS_SIZE
* cs
;
573 reg
->gpmc_nand_address
= gpmc_base
+ GPMC_CS0_OFFSET
+
574 GPMC_CS_NAND_ADDRESS
+ GPMC_CS_SIZE
* cs
;
575 reg
->gpmc_nand_data
= gpmc_base
+ GPMC_CS0_OFFSET
+
576 GPMC_CS_NAND_DATA
+ GPMC_CS_SIZE
* cs
;
577 reg
->gpmc_prefetch_config1
= gpmc_base
+ GPMC_PREFETCH_CONFIG1
;
578 reg
->gpmc_prefetch_config2
= gpmc_base
+ GPMC_PREFETCH_CONFIG2
;
579 reg
->gpmc_prefetch_control
= gpmc_base
+ GPMC_PREFETCH_CONTROL
;
580 reg
->gpmc_prefetch_status
= gpmc_base
+ GPMC_PREFETCH_STATUS
;
581 reg
->gpmc_ecc_config
= gpmc_base
+ GPMC_ECC_CONFIG
;
582 reg
->gpmc_ecc_control
= gpmc_base
+ GPMC_ECC_CONTROL
;
583 reg
->gpmc_ecc_size_config
= gpmc_base
+ GPMC_ECC_SIZE_CONFIG
;
584 reg
->gpmc_ecc1_result
= gpmc_base
+ GPMC_ECC1_RESULT
;
586 for (i
= 0; i
< GPMC_BCH_NUM_REMAINDER
; i
++) {
587 reg
->gpmc_bch_result0
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_0
+
589 reg
->gpmc_bch_result1
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_1
+
591 reg
->gpmc_bch_result2
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_2
+
593 reg
->gpmc_bch_result3
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_3
+
598 int gpmc_get_client_irq(unsigned irq_config
)
602 if (hweight32(irq_config
) > 1)
605 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
606 if (gpmc_client_irq
[i
].bitmask
& irq_config
)
607 return gpmc_client_irq
[i
].irq
;
612 static int gpmc_irq_endis(unsigned irq
, bool endis
)
617 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
618 if (irq
== gpmc_client_irq
[i
].irq
) {
619 regval
= gpmc_read_reg(GPMC_IRQENABLE
);
621 regval
|= gpmc_client_irq
[i
].bitmask
;
623 regval
&= ~gpmc_client_irq
[i
].bitmask
;
624 gpmc_write_reg(GPMC_IRQENABLE
, regval
);
631 static void gpmc_irq_disable(struct irq_data
*p
)
633 gpmc_irq_endis(p
->irq
, false);
636 static void gpmc_irq_enable(struct irq_data
*p
)
638 gpmc_irq_endis(p
->irq
, true);
641 static void gpmc_irq_noop(struct irq_data
*data
) { }
643 static unsigned int gpmc_irq_noop_ret(struct irq_data
*data
) { return 0; }
645 static int gpmc_setup_irq(void)
653 gpmc_irq_start
= irq_alloc_descs(-1, 0, GPMC_NR_IRQ
, 0);
654 if (IS_ERR_VALUE(gpmc_irq_start
)) {
655 pr_err("irq_alloc_descs failed\n");
656 return gpmc_irq_start
;
659 gpmc_irq_chip
.name
= "gpmc";
660 gpmc_irq_chip
.irq_startup
= gpmc_irq_noop_ret
;
661 gpmc_irq_chip
.irq_enable
= gpmc_irq_enable
;
662 gpmc_irq_chip
.irq_disable
= gpmc_irq_disable
;
663 gpmc_irq_chip
.irq_shutdown
= gpmc_irq_noop
;
664 gpmc_irq_chip
.irq_ack
= gpmc_irq_noop
;
665 gpmc_irq_chip
.irq_mask
= gpmc_irq_noop
;
666 gpmc_irq_chip
.irq_unmask
= gpmc_irq_noop
;
668 gpmc_client_irq
[0].bitmask
= GPMC_IRQ_FIFOEVENTENABLE
;
669 gpmc_client_irq
[1].bitmask
= GPMC_IRQ_COUNT_EVENT
;
671 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
672 gpmc_client_irq
[i
].irq
= gpmc_irq_start
+ i
;
673 irq_set_chip_and_handler(gpmc_client_irq
[i
].irq
,
674 &gpmc_irq_chip
, handle_simple_irq
);
675 set_irq_flags(gpmc_client_irq
[i
].irq
,
676 IRQF_VALID
| IRQF_NOAUTOEN
);
679 /* Disable interrupts */
680 gpmc_write_reg(GPMC_IRQENABLE
, 0);
682 /* clear interrupts */
683 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
684 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
686 return request_irq(gpmc_irq
, gpmc_handle_irq
, 0, "gpmc", NULL
);
689 static __devexit
int gpmc_free_irq(void)
694 free_irq(gpmc_irq
, NULL
);
696 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
697 irq_set_handler(gpmc_client_irq
[i
].irq
, NULL
);
698 irq_set_chip(gpmc_client_irq
[i
].irq
, &no_irq_chip
);
699 irq_modify_status(gpmc_client_irq
[i
].irq
, 0, 0);
702 irq_free_descs(gpmc_irq_start
, GPMC_NR_IRQ
);
707 static void __devexit
gpmc_mem_exit(void)
711 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
712 if (!gpmc_cs_mem_enabled(cs
))
714 gpmc_cs_delete_mem(cs
);
719 static void __devinit
gpmc_mem_init(void)
722 unsigned long boot_rom_space
= 0;
724 /* never allocate the first page, to facilitate bug detection;
725 * even if we didn't boot from ROM.
727 boot_rom_space
= BOOT_ROM_SPACE
;
728 /* In apollon the CS0 is mapped as 0x0000 0000 */
729 if (machine_is_omap_apollon())
731 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
732 gpmc_mem_root
.end
= GPMC_MEM_END
;
734 /* Reserve all regions that has been set up by bootloader */
735 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
738 if (!gpmc_cs_mem_enabled(cs
))
740 gpmc_cs_get_memconf(cs
, &base
, &size
);
741 if (gpmc_cs_insert_mem(cs
, base
, size
) < 0)
746 static __devinit
int gpmc_probe(struct platform_device
*pdev
)
749 struct resource
*res
;
751 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
755 phys_base
= res
->start
;
756 mem_size
= resource_size(res
);
758 gpmc_base
= devm_request_and_ioremap(&pdev
->dev
, res
);
760 dev_err(&pdev
->dev
, "error: request memory / ioremap\n");
761 return -EADDRNOTAVAIL
;
764 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
766 dev_warn(&pdev
->dev
, "Failed to get resource: irq\n");
768 gpmc_irq
= res
->start
;
770 gpmc_l3_clk
= clk_get(&pdev
->dev
, "fck");
771 if (IS_ERR(gpmc_l3_clk
)) {
772 dev_err(&pdev
->dev
, "error: clk_get\n");
774 return PTR_ERR(gpmc_l3_clk
);
777 clk_prepare_enable(gpmc_l3_clk
);
779 gpmc_dev
= &pdev
->dev
;
781 l
= gpmc_read_reg(GPMC_REVISION
);
782 if (GPMC_REVISION_MAJOR(l
) > 0x4)
783 gpmc_capability
= GPMC_HAS_WR_ACCESS
| GPMC_HAS_WR_DATA_MUX_BUS
;
784 dev_info(gpmc_dev
, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l
),
785 GPMC_REVISION_MINOR(l
));
789 if (IS_ERR_VALUE(gpmc_setup_irq()))
790 dev_warn(gpmc_dev
, "gpmc_setup_irq failed\n");
795 static __devexit
int gpmc_remove(struct platform_device
*pdev
)
803 static struct platform_driver gpmc_driver
= {
805 .remove
= __devexit_p(gpmc_remove
),
808 .owner
= THIS_MODULE
,
812 static __init
int gpmc_init(void)
814 return platform_driver_register(&gpmc_driver
);
817 static __exit
void gpmc_exit(void)
819 platform_driver_unregister(&gpmc_driver
);
823 postcore_initcall(gpmc_init
);
824 module_exit(gpmc_exit
);
826 static int __init
omap_gpmc_init(void)
828 struct omap_hwmod
*oh
;
829 struct platform_device
*pdev
;
830 char *oh_name
= "gpmc";
832 oh
= omap_hwmod_lookup(oh_name
);
834 pr_err("Could not look up %s\n", oh_name
);
838 pdev
= omap_device_build(DEVICE_NAME
, -1, oh
, NULL
, 0, NULL
, 0, 0);
839 WARN(IS_ERR(pdev
), "could not build omap_device for %s\n", oh_name
);
841 return IS_ERR(pdev
) ? PTR_ERR(pdev
) : 0;
843 postcore_initcall(omap_gpmc_init
);
845 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
850 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
855 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
856 if (regval
& gpmc_client_irq
[i
].bitmask
)
857 generic_handle_irq(gpmc_client_irq
[i
].irq
);
859 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
864 #ifdef CONFIG_ARCH_OMAP3
865 static struct omap3_gpmc_regs gpmc_context
;
867 void omap3_gpmc_save_context(void)
871 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
872 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
873 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
874 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
875 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
876 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
877 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
878 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
879 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
880 if (gpmc_context
.cs_context
[i
].is_valid
) {
881 gpmc_context
.cs_context
[i
].config1
=
882 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
883 gpmc_context
.cs_context
[i
].config2
=
884 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
885 gpmc_context
.cs_context
[i
].config3
=
886 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
887 gpmc_context
.cs_context
[i
].config4
=
888 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
889 gpmc_context
.cs_context
[i
].config5
=
890 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
891 gpmc_context
.cs_context
[i
].config6
=
892 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
893 gpmc_context
.cs_context
[i
].config7
=
894 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
899 void omap3_gpmc_restore_context(void)
903 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
904 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
905 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
906 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
907 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
908 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
909 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
910 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
911 if (gpmc_context
.cs_context
[i
].is_valid
) {
912 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
913 gpmc_context
.cs_context
[i
].config1
);
914 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
915 gpmc_context
.cs_context
[i
].config2
);
916 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
917 gpmc_context
.cs_context
[i
].config3
);
918 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
919 gpmc_context
.cs_context
[i
].config4
);
920 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
921 gpmc_context
.cs_context
[i
].config5
);
922 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
923 gpmc_context
.cs_context
[i
].config6
);
924 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
925 gpmc_context
.cs_context
[i
].config7
);
929 #endif /* CONFIG_ARCH_OMAP3 */