2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
29 #include <linux/platform_data/mtd-nand-omap2.h>
31 #include <asm/mach-types.h>
35 #include "omap_device.h"
38 #define DEVICE_NAME "omap-gpmc"
40 /* GPMC register offsets */
41 #define GPMC_REVISION 0x00
42 #define GPMC_SYSCONFIG 0x10
43 #define GPMC_SYSSTATUS 0x14
44 #define GPMC_IRQSTATUS 0x18
45 #define GPMC_IRQENABLE 0x1c
46 #define GPMC_TIMEOUT_CONTROL 0x40
47 #define GPMC_ERR_ADDRESS 0x44
48 #define GPMC_ERR_TYPE 0x48
49 #define GPMC_CONFIG 0x50
50 #define GPMC_STATUS 0x54
51 #define GPMC_PREFETCH_CONFIG1 0x1e0
52 #define GPMC_PREFETCH_CONFIG2 0x1e4
53 #define GPMC_PREFETCH_CONTROL 0x1ec
54 #define GPMC_PREFETCH_STATUS 0x1f0
55 #define GPMC_ECC_CONFIG 0x1f4
56 #define GPMC_ECC_CONTROL 0x1f8
57 #define GPMC_ECC_SIZE_CONFIG 0x1fc
58 #define GPMC_ECC1_RESULT 0x200
59 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
60 #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
61 #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
62 #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
64 /* GPMC ECC control settings */
65 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
66 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
67 #define GPMC_ECC_CTRL_ECCREG1 0x001
68 #define GPMC_ECC_CTRL_ECCREG2 0x002
69 #define GPMC_ECC_CTRL_ECCREG3 0x003
70 #define GPMC_ECC_CTRL_ECCREG4 0x004
71 #define GPMC_ECC_CTRL_ECCREG5 0x005
72 #define GPMC_ECC_CTRL_ECCREG6 0x006
73 #define GPMC_ECC_CTRL_ECCREG7 0x007
74 #define GPMC_ECC_CTRL_ECCREG8 0x008
75 #define GPMC_ECC_CTRL_ECCREG9 0x009
77 #define GPMC_CS0_OFFSET 0x60
78 #define GPMC_CS_SIZE 0x30
79 #define GPMC_BCH_SIZE 0x10
81 #define GPMC_MEM_START 0x00000000
82 #define GPMC_MEM_END 0x3FFFFFFF
83 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
85 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
86 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
88 #define CS_NUM_SHIFT 24
89 #define ENABLE_PREFETCH (0x1 << 7)
90 #define DMA_MPU_MODE 2
92 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
93 #define GPMC_REVISION_MINOR(l) (l & 0xf)
95 #define GPMC_HAS_WR_ACCESS 0x1
96 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
98 /* XXX: Only NAND irq has been considered,currently these are the only ones used
100 #define GPMC_NR_IRQ 2
102 struct gpmc_client_irq
{
107 /* Structure to save gpmc cs context */
108 struct gpmc_cs_config
{
120 * Structure to save/restore gpmc context
121 * to support core off on OMAP3
123 struct omap3_gpmc_regs
{
128 u32 prefetch_config1
;
129 u32 prefetch_config2
;
130 u32 prefetch_control
;
131 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
134 static struct gpmc_client_irq gpmc_client_irq
[GPMC_NR_IRQ
];
135 static struct irq_chip gpmc_irq_chip
;
136 static unsigned gpmc_irq_start
;
138 static struct resource gpmc_mem_root
;
139 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
140 static DEFINE_SPINLOCK(gpmc_mem_lock
);
141 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
142 static struct device
*gpmc_dev
;
144 static resource_size_t phys_base
, mem_size
;
145 static unsigned gpmc_capability
;
146 static void __iomem
*gpmc_base
;
148 static struct clk
*gpmc_l3_clk
;
150 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
152 static void gpmc_write_reg(int idx
, u32 val
)
154 __raw_writel(val
, gpmc_base
+ idx
);
157 static u32
gpmc_read_reg(int idx
)
159 return __raw_readl(gpmc_base
+ idx
);
162 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
164 void __iomem
*reg_addr
;
166 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
167 __raw_writel(val
, reg_addr
);
170 u32
gpmc_cs_read_reg(int cs
, int idx
)
172 void __iomem
*reg_addr
;
174 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
175 return __raw_readl(reg_addr
);
178 /* TODO: Add support for gpmc_fck to clock framework and use it */
179 unsigned long gpmc_get_fclk_period(void)
181 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
184 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
189 rate
= 1000000000 / rate
; /* In picoseconds */
194 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
196 unsigned long tick_ps
;
198 /* Calculate in picosecs to yield more exact results */
199 tick_ps
= gpmc_get_fclk_period();
201 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
204 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
206 unsigned long tick_ps
;
208 /* Calculate in picosecs to yield more exact results */
209 tick_ps
= gpmc_get_fclk_period();
211 return (time_ps
+ tick_ps
- 1) / tick_ps
;
214 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
216 return ticks
* gpmc_get_fclk_period() / 1000;
219 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
221 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
223 return ticks
* gpmc_get_fclk_period() / 1000;
227 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
228 int time
, const char *name
)
230 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
235 int ticks
, mask
, nr_bits
;
240 ticks
= gpmc_ns_to_ticks(time
);
241 nr_bits
= end_bit
- st_bit
+ 1;
242 if (ticks
>= 1 << nr_bits
) {
244 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
245 cs
, name
, time
, ticks
, 1 << nr_bits
);
250 mask
= (1 << nr_bits
) - 1;
251 l
= gpmc_cs_read_reg(cs
, reg
);
254 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
255 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
256 (l
>> st_bit
) & mask
, time
);
258 l
&= ~(mask
<< st_bit
);
259 l
|= ticks
<< st_bit
;
260 gpmc_cs_write_reg(cs
, reg
, l
);
266 #define GPMC_SET_ONE(reg, st, end, field) \
267 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
268 t->field, #field) < 0) \
271 #define GPMC_SET_ONE(reg, st, end, field) \
272 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
276 int gpmc_calc_divider(unsigned int sync_clk
)
281 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
282 div
= l
/ gpmc_get_fclk_period();
291 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
296 div
= gpmc_calc_divider(t
->sync_clk
);
300 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
301 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
302 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
304 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
305 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
306 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
308 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
309 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
310 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
311 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
313 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
314 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
315 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
317 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
319 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
320 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
321 if (gpmc_capability
& GPMC_HAS_WR_ACCESS
)
322 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
324 /* caller is expected to have initialized CONFIG1 to cover
325 * at least sync vs async
327 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
328 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
330 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
331 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
335 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
341 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
346 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
347 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
349 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
351 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
352 l
|= GPMC_CONFIG7_CSVALID
;
353 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
356 static void gpmc_cs_disable_mem(int cs
)
360 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
361 l
&= ~GPMC_CONFIG7_CSVALID
;
362 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
365 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
370 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
371 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
372 mask
= (l
>> 8) & 0x0f;
373 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
376 static int gpmc_cs_mem_enabled(int cs
)
380 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
381 return l
& GPMC_CONFIG7_CSVALID
;
384 int gpmc_cs_set_reserved(int cs
, int reserved
)
386 if (cs
> GPMC_CS_NUM
)
389 gpmc_cs_map
&= ~(1 << cs
);
390 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
395 int gpmc_cs_reserved(int cs
)
397 if (cs
> GPMC_CS_NUM
)
400 return gpmc_cs_map
& (1 << cs
);
403 static unsigned long gpmc_mem_align(unsigned long size
)
407 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
408 order
= GPMC_CHUNK_SHIFT
- 1;
417 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
419 struct resource
*res
= &gpmc_cs_mem
[cs
];
422 size
= gpmc_mem_align(size
);
423 spin_lock(&gpmc_mem_lock
);
425 res
->end
= base
+ size
- 1;
426 r
= request_resource(&gpmc_mem_root
, res
);
427 spin_unlock(&gpmc_mem_lock
);
432 static int gpmc_cs_delete_mem(int cs
)
434 struct resource
*res
= &gpmc_cs_mem
[cs
];
437 spin_lock(&gpmc_mem_lock
);
438 r
= release_resource(&gpmc_cs_mem
[cs
]);
441 spin_unlock(&gpmc_mem_lock
);
446 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
448 struct resource
*res
= &gpmc_cs_mem
[cs
];
451 if (cs
> GPMC_CS_NUM
)
454 size
= gpmc_mem_align(size
);
455 if (size
> (1 << GPMC_SECTION_SHIFT
))
458 spin_lock(&gpmc_mem_lock
);
459 if (gpmc_cs_reserved(cs
)) {
463 if (gpmc_cs_mem_enabled(cs
))
464 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
466 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
471 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
473 gpmc_cs_set_reserved(cs
, 1);
475 spin_unlock(&gpmc_mem_lock
);
478 EXPORT_SYMBOL(gpmc_cs_request
);
480 void gpmc_cs_free(int cs
)
482 spin_lock(&gpmc_mem_lock
);
483 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
484 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
486 spin_unlock(&gpmc_mem_lock
);
489 gpmc_cs_disable_mem(cs
);
490 release_resource(&gpmc_cs_mem
[cs
]);
491 gpmc_cs_set_reserved(cs
, 0);
492 spin_unlock(&gpmc_mem_lock
);
494 EXPORT_SYMBOL(gpmc_cs_free
);
497 * gpmc_cs_configure - write request to configure gpmc
498 * @cs: chip select number
500 * @wval: value to write
501 * @return status of the operation
503 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
509 case GPMC_ENABLE_IRQ
:
510 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
513 case GPMC_SET_IRQ_STATUS
:
514 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
518 regval
= gpmc_read_reg(GPMC_CONFIG
);
520 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
522 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
523 gpmc_write_reg(GPMC_CONFIG
, regval
);
526 case GPMC_CONFIG_RDY_BSY
:
527 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
529 regval
|= WR_RD_PIN_MONITORING
;
531 regval
&= ~WR_RD_PIN_MONITORING
;
532 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
535 case GPMC_CONFIG_DEV_SIZE
:
536 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
538 /* clear 2 target bits */
539 regval
&= ~GPMC_CONFIG1_DEVICESIZE(3);
541 /* set the proper value */
542 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
544 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
547 case GPMC_CONFIG_DEV_TYPE
:
548 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
549 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
550 if (wval
== GPMC_DEVICETYPE_NOR
)
551 regval
|= GPMC_CONFIG1_MUXADDDATA
;
552 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
556 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
562 EXPORT_SYMBOL(gpmc_cs_configure
);
564 void gpmc_update_nand_reg(struct gpmc_nand_regs
*reg
, int cs
)
568 reg
->gpmc_status
= gpmc_base
+ GPMC_STATUS
;
569 reg
->gpmc_nand_command
= gpmc_base
+ GPMC_CS0_OFFSET
+
570 GPMC_CS_NAND_COMMAND
+ GPMC_CS_SIZE
* cs
;
571 reg
->gpmc_nand_address
= gpmc_base
+ GPMC_CS0_OFFSET
+
572 GPMC_CS_NAND_ADDRESS
+ GPMC_CS_SIZE
* cs
;
573 reg
->gpmc_nand_data
= gpmc_base
+ GPMC_CS0_OFFSET
+
574 GPMC_CS_NAND_DATA
+ GPMC_CS_SIZE
* cs
;
575 reg
->gpmc_prefetch_config1
= gpmc_base
+ GPMC_PREFETCH_CONFIG1
;
576 reg
->gpmc_prefetch_config2
= gpmc_base
+ GPMC_PREFETCH_CONFIG2
;
577 reg
->gpmc_prefetch_control
= gpmc_base
+ GPMC_PREFETCH_CONTROL
;
578 reg
->gpmc_prefetch_status
= gpmc_base
+ GPMC_PREFETCH_STATUS
;
579 reg
->gpmc_ecc_config
= gpmc_base
+ GPMC_ECC_CONFIG
;
580 reg
->gpmc_ecc_control
= gpmc_base
+ GPMC_ECC_CONTROL
;
581 reg
->gpmc_ecc_size_config
= gpmc_base
+ GPMC_ECC_SIZE_CONFIG
;
582 reg
->gpmc_ecc1_result
= gpmc_base
+ GPMC_ECC1_RESULT
;
584 for (i
= 0; i
< GPMC_BCH_NUM_REMAINDER
; i
++) {
585 reg
->gpmc_bch_result0
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_0
+
587 reg
->gpmc_bch_result1
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_1
+
589 reg
->gpmc_bch_result2
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_2
+
591 reg
->gpmc_bch_result3
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_3
+
596 int gpmc_get_client_irq(unsigned irq_config
)
600 if (hweight32(irq_config
) > 1)
603 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
604 if (gpmc_client_irq
[i
].bitmask
& irq_config
)
605 return gpmc_client_irq
[i
].irq
;
610 static int gpmc_irq_endis(unsigned irq
, bool endis
)
615 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
616 if (irq
== gpmc_client_irq
[i
].irq
) {
617 regval
= gpmc_read_reg(GPMC_IRQENABLE
);
619 regval
|= gpmc_client_irq
[i
].bitmask
;
621 regval
&= ~gpmc_client_irq
[i
].bitmask
;
622 gpmc_write_reg(GPMC_IRQENABLE
, regval
);
629 static void gpmc_irq_disable(struct irq_data
*p
)
631 gpmc_irq_endis(p
->irq
, false);
634 static void gpmc_irq_enable(struct irq_data
*p
)
636 gpmc_irq_endis(p
->irq
, true);
639 static void gpmc_irq_noop(struct irq_data
*data
) { }
641 static unsigned int gpmc_irq_noop_ret(struct irq_data
*data
) { return 0; }
643 static int gpmc_setup_irq(void)
651 gpmc_irq_start
= irq_alloc_descs(-1, 0, GPMC_NR_IRQ
, 0);
652 if (IS_ERR_VALUE(gpmc_irq_start
)) {
653 pr_err("irq_alloc_descs failed\n");
654 return gpmc_irq_start
;
657 gpmc_irq_chip
.name
= "gpmc";
658 gpmc_irq_chip
.irq_startup
= gpmc_irq_noop_ret
;
659 gpmc_irq_chip
.irq_enable
= gpmc_irq_enable
;
660 gpmc_irq_chip
.irq_disable
= gpmc_irq_disable
;
661 gpmc_irq_chip
.irq_shutdown
= gpmc_irq_noop
;
662 gpmc_irq_chip
.irq_ack
= gpmc_irq_noop
;
663 gpmc_irq_chip
.irq_mask
= gpmc_irq_noop
;
664 gpmc_irq_chip
.irq_unmask
= gpmc_irq_noop
;
666 gpmc_client_irq
[0].bitmask
= GPMC_IRQ_FIFOEVENTENABLE
;
667 gpmc_client_irq
[1].bitmask
= GPMC_IRQ_COUNT_EVENT
;
669 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
670 gpmc_client_irq
[i
].irq
= gpmc_irq_start
+ i
;
671 irq_set_chip_and_handler(gpmc_client_irq
[i
].irq
,
672 &gpmc_irq_chip
, handle_simple_irq
);
673 set_irq_flags(gpmc_client_irq
[i
].irq
,
674 IRQF_VALID
| IRQF_NOAUTOEN
);
677 /* Disable interrupts */
678 gpmc_write_reg(GPMC_IRQENABLE
, 0);
680 /* clear interrupts */
681 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
682 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
684 return request_irq(gpmc_irq
, gpmc_handle_irq
, 0, "gpmc", NULL
);
687 static __devexit
int gpmc_free_irq(void)
692 free_irq(gpmc_irq
, NULL
);
694 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
695 irq_set_handler(gpmc_client_irq
[i
].irq
, NULL
);
696 irq_set_chip(gpmc_client_irq
[i
].irq
, &no_irq_chip
);
697 irq_modify_status(gpmc_client_irq
[i
].irq
, 0, 0);
700 irq_free_descs(gpmc_irq_start
, GPMC_NR_IRQ
);
705 static void __devexit
gpmc_mem_exit(void)
709 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
710 if (!gpmc_cs_mem_enabled(cs
))
712 gpmc_cs_delete_mem(cs
);
717 static int __devinit
gpmc_mem_init(void)
720 unsigned long boot_rom_space
= 0;
722 /* never allocate the first page, to facilitate bug detection;
723 * even if we didn't boot from ROM.
725 boot_rom_space
= BOOT_ROM_SPACE
;
726 /* In apollon the CS0 is mapped as 0x0000 0000 */
727 if (machine_is_omap_apollon())
729 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
730 gpmc_mem_root
.end
= GPMC_MEM_END
;
732 /* Reserve all regions that has been set up by bootloader */
733 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
736 if (!gpmc_cs_mem_enabled(cs
))
738 gpmc_cs_get_memconf(cs
, &base
, &size
);
739 rc
= gpmc_cs_insert_mem(cs
, base
, size
);
740 if (IS_ERR_VALUE(rc
)) {
742 if (gpmc_cs_mem_enabled(cs
))
743 gpmc_cs_delete_mem(cs
);
751 static __devinit
int gpmc_probe(struct platform_device
*pdev
)
755 struct resource
*res
;
757 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
761 phys_base
= res
->start
;
762 mem_size
= resource_size(res
);
764 gpmc_base
= devm_request_and_ioremap(&pdev
->dev
, res
);
766 dev_err(&pdev
->dev
, "error: request memory / ioremap\n");
767 return -EADDRNOTAVAIL
;
770 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
772 dev_warn(&pdev
->dev
, "Failed to get resource: irq\n");
774 gpmc_irq
= res
->start
;
776 gpmc_l3_clk
= clk_get(&pdev
->dev
, "fck");
777 if (IS_ERR(gpmc_l3_clk
)) {
778 dev_err(&pdev
->dev
, "error: clk_get\n");
780 return PTR_ERR(gpmc_l3_clk
);
783 clk_prepare_enable(gpmc_l3_clk
);
785 gpmc_dev
= &pdev
->dev
;
787 l
= gpmc_read_reg(GPMC_REVISION
);
788 if (GPMC_REVISION_MAJOR(l
) > 0x4)
789 gpmc_capability
= GPMC_HAS_WR_ACCESS
| GPMC_HAS_WR_DATA_MUX_BUS
;
790 dev_info(gpmc_dev
, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l
),
791 GPMC_REVISION_MINOR(l
));
793 rc
= gpmc_mem_init();
794 if (IS_ERR_VALUE(rc
)) {
795 clk_disable_unprepare(gpmc_l3_clk
);
796 clk_put(gpmc_l3_clk
);
797 dev_err(gpmc_dev
, "failed to reserve memory\n");
801 if (IS_ERR_VALUE(gpmc_setup_irq()))
802 dev_warn(gpmc_dev
, "gpmc_setup_irq failed\n");
807 static __devexit
int gpmc_remove(struct platform_device
*pdev
)
815 static struct platform_driver gpmc_driver
= {
817 .remove
= __devexit_p(gpmc_remove
),
820 .owner
= THIS_MODULE
,
824 static __init
int gpmc_init(void)
826 return platform_driver_register(&gpmc_driver
);
829 static __exit
void gpmc_exit(void)
831 platform_driver_unregister(&gpmc_driver
);
835 postcore_initcall(gpmc_init
);
836 module_exit(gpmc_exit
);
838 static int __init
omap_gpmc_init(void)
840 struct omap_hwmod
*oh
;
841 struct platform_device
*pdev
;
842 char *oh_name
= "gpmc";
844 oh
= omap_hwmod_lookup(oh_name
);
846 pr_err("Could not look up %s\n", oh_name
);
850 pdev
= omap_device_build(DEVICE_NAME
, -1, oh
, NULL
, 0, NULL
, 0, 0);
851 WARN(IS_ERR(pdev
), "could not build omap_device for %s\n", oh_name
);
853 return IS_ERR(pdev
) ? PTR_ERR(pdev
) : 0;
855 postcore_initcall(omap_gpmc_init
);
857 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
862 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
867 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
868 if (regval
& gpmc_client_irq
[i
].bitmask
)
869 generic_handle_irq(gpmc_client_irq
[i
].irq
);
871 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
876 #ifdef CONFIG_ARCH_OMAP3
877 static struct omap3_gpmc_regs gpmc_context
;
879 void omap3_gpmc_save_context(void)
883 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
884 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
885 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
886 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
887 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
888 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
889 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
890 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
891 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
892 if (gpmc_context
.cs_context
[i
].is_valid
) {
893 gpmc_context
.cs_context
[i
].config1
=
894 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
895 gpmc_context
.cs_context
[i
].config2
=
896 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
897 gpmc_context
.cs_context
[i
].config3
=
898 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
899 gpmc_context
.cs_context
[i
].config4
=
900 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
901 gpmc_context
.cs_context
[i
].config5
=
902 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
903 gpmc_context
.cs_context
[i
].config6
=
904 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
905 gpmc_context
.cs_context
[i
].config7
=
906 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
911 void omap3_gpmc_restore_context(void)
915 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
916 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
917 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
918 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
919 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
920 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
921 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
922 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
923 if (gpmc_context
.cs_context
[i
].is_valid
) {
924 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
925 gpmc_context
.cs_context
[i
].config1
);
926 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
927 gpmc_context
.cs_context
[i
].config2
);
928 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
929 gpmc_context
.cs_context
[i
].config3
);
930 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
931 gpmc_context
.cs_context
[i
].config4
);
932 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
933 gpmc_context
.cs_context
[i
].config5
);
934 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
935 gpmc_context
.cs_context
[i
].config6
);
936 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
937 gpmc_context
.cs_context
[i
].config7
);
941 #endif /* CONFIG_ARCH_OMAP3 */