2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
29 #include <linux/platform_data/mtd-nand-omap2.h>
31 #include <asm/mach-types.h>
35 #include "omap_device.h"
38 #define DEVICE_NAME "omap-gpmc"
40 /* GPMC register offsets */
41 #define GPMC_REVISION 0x00
42 #define GPMC_SYSCONFIG 0x10
43 #define GPMC_SYSSTATUS 0x14
44 #define GPMC_IRQSTATUS 0x18
45 #define GPMC_IRQENABLE 0x1c
46 #define GPMC_TIMEOUT_CONTROL 0x40
47 #define GPMC_ERR_ADDRESS 0x44
48 #define GPMC_ERR_TYPE 0x48
49 #define GPMC_CONFIG 0x50
50 #define GPMC_STATUS 0x54
51 #define GPMC_PREFETCH_CONFIG1 0x1e0
52 #define GPMC_PREFETCH_CONFIG2 0x1e4
53 #define GPMC_PREFETCH_CONTROL 0x1ec
54 #define GPMC_PREFETCH_STATUS 0x1f0
55 #define GPMC_ECC_CONFIG 0x1f4
56 #define GPMC_ECC_CONTROL 0x1f8
57 #define GPMC_ECC_SIZE_CONFIG 0x1fc
58 #define GPMC_ECC1_RESULT 0x200
59 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
60 #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
61 #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
62 #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
64 /* GPMC ECC control settings */
65 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
66 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
67 #define GPMC_ECC_CTRL_ECCREG1 0x001
68 #define GPMC_ECC_CTRL_ECCREG2 0x002
69 #define GPMC_ECC_CTRL_ECCREG3 0x003
70 #define GPMC_ECC_CTRL_ECCREG4 0x004
71 #define GPMC_ECC_CTRL_ECCREG5 0x005
72 #define GPMC_ECC_CTRL_ECCREG6 0x006
73 #define GPMC_ECC_CTRL_ECCREG7 0x007
74 #define GPMC_ECC_CTRL_ECCREG8 0x008
75 #define GPMC_ECC_CTRL_ECCREG9 0x009
77 #define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
78 #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
79 #define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
80 #define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
81 #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
82 #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
84 #define GPMC_CS0_OFFSET 0x60
85 #define GPMC_CS_SIZE 0x30
86 #define GPMC_BCH_SIZE 0x10
88 #define GPMC_MEM_START 0x00000000
89 #define GPMC_MEM_END 0x3FFFFFFF
90 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
92 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
93 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
95 #define CS_NUM_SHIFT 24
96 #define ENABLE_PREFETCH (0x1 << 7)
97 #define DMA_MPU_MODE 2
99 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
100 #define GPMC_REVISION_MINOR(l) (l & 0xf)
102 #define GPMC_HAS_WR_ACCESS 0x1
103 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
105 /* XXX: Only NAND irq has been considered,currently these are the only ones used
107 #define GPMC_NR_IRQ 2
109 struct gpmc_client_irq
{
114 /* Structure to save gpmc cs context */
115 struct gpmc_cs_config
{
127 * Structure to save/restore gpmc context
128 * to support core off on OMAP3
130 struct omap3_gpmc_regs
{
135 u32 prefetch_config1
;
136 u32 prefetch_config2
;
137 u32 prefetch_control
;
138 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
141 static struct gpmc_client_irq gpmc_client_irq
[GPMC_NR_IRQ
];
142 static struct irq_chip gpmc_irq_chip
;
143 static unsigned gpmc_irq_start
;
145 static struct resource gpmc_mem_root
;
146 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
147 static DEFINE_SPINLOCK(gpmc_mem_lock
);
148 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
149 static struct device
*gpmc_dev
;
151 static resource_size_t phys_base
, mem_size
;
152 static unsigned gpmc_capability
;
153 static void __iomem
*gpmc_base
;
155 static struct clk
*gpmc_l3_clk
;
157 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
159 static void gpmc_write_reg(int idx
, u32 val
)
161 __raw_writel(val
, gpmc_base
+ idx
);
164 static u32
gpmc_read_reg(int idx
)
166 return __raw_readl(gpmc_base
+ idx
);
169 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
171 void __iomem
*reg_addr
;
173 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
174 __raw_writel(val
, reg_addr
);
177 u32
gpmc_cs_read_reg(int cs
, int idx
)
179 void __iomem
*reg_addr
;
181 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
182 return __raw_readl(reg_addr
);
185 /* TODO: Add support for gpmc_fck to clock framework and use it */
186 unsigned long gpmc_get_fclk_period(void)
188 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
191 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
196 rate
= 1000000000 / rate
; /* In picoseconds */
201 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
203 unsigned long tick_ps
;
205 /* Calculate in picosecs to yield more exact results */
206 tick_ps
= gpmc_get_fclk_period();
208 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
211 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
213 unsigned long tick_ps
;
215 /* Calculate in picosecs to yield more exact results */
216 tick_ps
= gpmc_get_fclk_period();
218 return (time_ps
+ tick_ps
- 1) / tick_ps
;
221 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
223 return ticks
* gpmc_get_fclk_period() / 1000;
226 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
228 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
230 return ticks
* gpmc_get_fclk_period() / 1000;
233 static inline void gpmc_cs_modify_reg(int cs
, int reg
, u32 mask
, bool value
)
237 l
= gpmc_cs_read_reg(cs
, reg
);
242 gpmc_cs_write_reg(cs
, reg
, l
);
245 static void gpmc_cs_bool_timings(int cs
, const struct gpmc_bool_timings
*p
)
247 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG1
,
248 GPMC_CONFIG1_TIME_PARA_GRAN
,
249 p
->time_para_granularity
);
250 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG2
,
251 GPMC_CONFIG2_CSEXTRADELAY
, p
->cs_extra_delay
);
252 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG3
,
253 GPMC_CONFIG3_ADVEXTRADELAY
, p
->adv_extra_delay
);
254 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG4
,
255 GPMC_CONFIG4_OEEXTRADELAY
, p
->oe_extra_delay
);
256 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG4
,
257 GPMC_CONFIG4_OEEXTRADELAY
, p
->we_extra_delay
);
258 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG6
,
259 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN
,
260 p
->cycle2cyclesamecsen
);
261 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG6
,
262 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN
,
263 p
->cycle2cyclediffcsen
);
267 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
268 int time
, const char *name
)
270 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
275 int ticks
, mask
, nr_bits
;
280 ticks
= gpmc_ns_to_ticks(time
);
281 nr_bits
= end_bit
- st_bit
+ 1;
282 if (ticks
>= 1 << nr_bits
) {
284 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
285 cs
, name
, time
, ticks
, 1 << nr_bits
);
290 mask
= (1 << nr_bits
) - 1;
291 l
= gpmc_cs_read_reg(cs
, reg
);
294 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
295 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
296 (l
>> st_bit
) & mask
, time
);
298 l
&= ~(mask
<< st_bit
);
299 l
|= ticks
<< st_bit
;
300 gpmc_cs_write_reg(cs
, reg
, l
);
306 #define GPMC_SET_ONE(reg, st, end, field) \
307 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
308 t->field, #field) < 0) \
311 #define GPMC_SET_ONE(reg, st, end, field) \
312 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
316 int gpmc_calc_divider(unsigned int sync_clk
)
321 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
322 div
= l
/ gpmc_get_fclk_period();
331 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
336 div
= gpmc_calc_divider(t
->sync_clk
);
340 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
341 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
342 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
344 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
345 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
346 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
348 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
349 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
350 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
351 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
353 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
354 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
355 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
357 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
359 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 0, 3, bus_turnaround
);
360 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 8, 11, cycle2cycle_delay
);
362 GPMC_SET_ONE(GPMC_CS_CONFIG1
, 18, 19, wait_monitoring
);
363 GPMC_SET_ONE(GPMC_CS_CONFIG1
, 25, 26, clk_activation
);
365 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
366 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
367 if (gpmc_capability
& GPMC_HAS_WR_ACCESS
)
368 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
370 /* caller is expected to have initialized CONFIG1 to cover
371 * at least sync vs async
373 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
374 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
376 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
377 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
381 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
384 gpmc_cs_bool_timings(cs
, &t
->bool_timings
);
389 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
394 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
395 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
397 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
399 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
400 l
|= GPMC_CONFIG7_CSVALID
;
401 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
404 static void gpmc_cs_disable_mem(int cs
)
408 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
409 l
&= ~GPMC_CONFIG7_CSVALID
;
410 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
413 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
418 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
419 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
420 mask
= (l
>> 8) & 0x0f;
421 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
424 static int gpmc_cs_mem_enabled(int cs
)
428 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
429 return l
& GPMC_CONFIG7_CSVALID
;
432 int gpmc_cs_set_reserved(int cs
, int reserved
)
434 if (cs
> GPMC_CS_NUM
)
437 gpmc_cs_map
&= ~(1 << cs
);
438 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
443 int gpmc_cs_reserved(int cs
)
445 if (cs
> GPMC_CS_NUM
)
448 return gpmc_cs_map
& (1 << cs
);
451 static unsigned long gpmc_mem_align(unsigned long size
)
455 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
456 order
= GPMC_CHUNK_SHIFT
- 1;
465 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
467 struct resource
*res
= &gpmc_cs_mem
[cs
];
470 size
= gpmc_mem_align(size
);
471 spin_lock(&gpmc_mem_lock
);
473 res
->end
= base
+ size
- 1;
474 r
= request_resource(&gpmc_mem_root
, res
);
475 spin_unlock(&gpmc_mem_lock
);
480 static int gpmc_cs_delete_mem(int cs
)
482 struct resource
*res
= &gpmc_cs_mem
[cs
];
485 spin_lock(&gpmc_mem_lock
);
486 r
= release_resource(&gpmc_cs_mem
[cs
]);
489 spin_unlock(&gpmc_mem_lock
);
494 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
496 struct resource
*res
= &gpmc_cs_mem
[cs
];
499 if (cs
> GPMC_CS_NUM
)
502 size
= gpmc_mem_align(size
);
503 if (size
> (1 << GPMC_SECTION_SHIFT
))
506 spin_lock(&gpmc_mem_lock
);
507 if (gpmc_cs_reserved(cs
)) {
511 if (gpmc_cs_mem_enabled(cs
))
512 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
514 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
519 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
521 gpmc_cs_set_reserved(cs
, 1);
523 spin_unlock(&gpmc_mem_lock
);
526 EXPORT_SYMBOL(gpmc_cs_request
);
528 void gpmc_cs_free(int cs
)
530 spin_lock(&gpmc_mem_lock
);
531 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
532 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
534 spin_unlock(&gpmc_mem_lock
);
537 gpmc_cs_disable_mem(cs
);
538 release_resource(&gpmc_cs_mem
[cs
]);
539 gpmc_cs_set_reserved(cs
, 0);
540 spin_unlock(&gpmc_mem_lock
);
542 EXPORT_SYMBOL(gpmc_cs_free
);
545 * gpmc_cs_configure - write request to configure gpmc
546 * @cs: chip select number
548 * @wval: value to write
549 * @return status of the operation
551 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
557 case GPMC_ENABLE_IRQ
:
558 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
561 case GPMC_SET_IRQ_STATUS
:
562 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
566 regval
= gpmc_read_reg(GPMC_CONFIG
);
568 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
570 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
571 gpmc_write_reg(GPMC_CONFIG
, regval
);
574 case GPMC_CONFIG_RDY_BSY
:
575 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
577 regval
|= WR_RD_PIN_MONITORING
;
579 regval
&= ~WR_RD_PIN_MONITORING
;
580 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
583 case GPMC_CONFIG_DEV_SIZE
:
584 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
586 /* clear 2 target bits */
587 regval
&= ~GPMC_CONFIG1_DEVICESIZE(3);
589 /* set the proper value */
590 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
592 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
595 case GPMC_CONFIG_DEV_TYPE
:
596 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
597 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
598 if (wval
== GPMC_DEVICETYPE_NOR
)
599 regval
|= GPMC_CONFIG1_MUXADDDATA
;
600 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
604 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
610 EXPORT_SYMBOL(gpmc_cs_configure
);
612 void gpmc_update_nand_reg(struct gpmc_nand_regs
*reg
, int cs
)
616 reg
->gpmc_status
= gpmc_base
+ GPMC_STATUS
;
617 reg
->gpmc_nand_command
= gpmc_base
+ GPMC_CS0_OFFSET
+
618 GPMC_CS_NAND_COMMAND
+ GPMC_CS_SIZE
* cs
;
619 reg
->gpmc_nand_address
= gpmc_base
+ GPMC_CS0_OFFSET
+
620 GPMC_CS_NAND_ADDRESS
+ GPMC_CS_SIZE
* cs
;
621 reg
->gpmc_nand_data
= gpmc_base
+ GPMC_CS0_OFFSET
+
622 GPMC_CS_NAND_DATA
+ GPMC_CS_SIZE
* cs
;
623 reg
->gpmc_prefetch_config1
= gpmc_base
+ GPMC_PREFETCH_CONFIG1
;
624 reg
->gpmc_prefetch_config2
= gpmc_base
+ GPMC_PREFETCH_CONFIG2
;
625 reg
->gpmc_prefetch_control
= gpmc_base
+ GPMC_PREFETCH_CONTROL
;
626 reg
->gpmc_prefetch_status
= gpmc_base
+ GPMC_PREFETCH_STATUS
;
627 reg
->gpmc_ecc_config
= gpmc_base
+ GPMC_ECC_CONFIG
;
628 reg
->gpmc_ecc_control
= gpmc_base
+ GPMC_ECC_CONTROL
;
629 reg
->gpmc_ecc_size_config
= gpmc_base
+ GPMC_ECC_SIZE_CONFIG
;
630 reg
->gpmc_ecc1_result
= gpmc_base
+ GPMC_ECC1_RESULT
;
632 for (i
= 0; i
< GPMC_BCH_NUM_REMAINDER
; i
++) {
633 reg
->gpmc_bch_result0
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_0
+
635 reg
->gpmc_bch_result1
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_1
+
637 reg
->gpmc_bch_result2
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_2
+
639 reg
->gpmc_bch_result3
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_3
+
644 int gpmc_get_client_irq(unsigned irq_config
)
648 if (hweight32(irq_config
) > 1)
651 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
652 if (gpmc_client_irq
[i
].bitmask
& irq_config
)
653 return gpmc_client_irq
[i
].irq
;
658 static int gpmc_irq_endis(unsigned irq
, bool endis
)
663 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
664 if (irq
== gpmc_client_irq
[i
].irq
) {
665 regval
= gpmc_read_reg(GPMC_IRQENABLE
);
667 regval
|= gpmc_client_irq
[i
].bitmask
;
669 regval
&= ~gpmc_client_irq
[i
].bitmask
;
670 gpmc_write_reg(GPMC_IRQENABLE
, regval
);
677 static void gpmc_irq_disable(struct irq_data
*p
)
679 gpmc_irq_endis(p
->irq
, false);
682 static void gpmc_irq_enable(struct irq_data
*p
)
684 gpmc_irq_endis(p
->irq
, true);
687 static void gpmc_irq_noop(struct irq_data
*data
) { }
689 static unsigned int gpmc_irq_noop_ret(struct irq_data
*data
) { return 0; }
691 static int gpmc_setup_irq(void)
699 gpmc_irq_start
= irq_alloc_descs(-1, 0, GPMC_NR_IRQ
, 0);
700 if (IS_ERR_VALUE(gpmc_irq_start
)) {
701 pr_err("irq_alloc_descs failed\n");
702 return gpmc_irq_start
;
705 gpmc_irq_chip
.name
= "gpmc";
706 gpmc_irq_chip
.irq_startup
= gpmc_irq_noop_ret
;
707 gpmc_irq_chip
.irq_enable
= gpmc_irq_enable
;
708 gpmc_irq_chip
.irq_disable
= gpmc_irq_disable
;
709 gpmc_irq_chip
.irq_shutdown
= gpmc_irq_noop
;
710 gpmc_irq_chip
.irq_ack
= gpmc_irq_noop
;
711 gpmc_irq_chip
.irq_mask
= gpmc_irq_noop
;
712 gpmc_irq_chip
.irq_unmask
= gpmc_irq_noop
;
714 gpmc_client_irq
[0].bitmask
= GPMC_IRQ_FIFOEVENTENABLE
;
715 gpmc_client_irq
[1].bitmask
= GPMC_IRQ_COUNT_EVENT
;
717 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
718 gpmc_client_irq
[i
].irq
= gpmc_irq_start
+ i
;
719 irq_set_chip_and_handler(gpmc_client_irq
[i
].irq
,
720 &gpmc_irq_chip
, handle_simple_irq
);
721 set_irq_flags(gpmc_client_irq
[i
].irq
,
722 IRQF_VALID
| IRQF_NOAUTOEN
);
725 /* Disable interrupts */
726 gpmc_write_reg(GPMC_IRQENABLE
, 0);
728 /* clear interrupts */
729 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
730 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
732 return request_irq(gpmc_irq
, gpmc_handle_irq
, 0, "gpmc", NULL
);
735 static __devexit
int gpmc_free_irq(void)
740 free_irq(gpmc_irq
, NULL
);
742 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
743 irq_set_handler(gpmc_client_irq
[i
].irq
, NULL
);
744 irq_set_chip(gpmc_client_irq
[i
].irq
, &no_irq_chip
);
745 irq_modify_status(gpmc_client_irq
[i
].irq
, 0, 0);
748 irq_free_descs(gpmc_irq_start
, GPMC_NR_IRQ
);
753 static void __devexit
gpmc_mem_exit(void)
757 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
758 if (!gpmc_cs_mem_enabled(cs
))
760 gpmc_cs_delete_mem(cs
);
765 static int __devinit
gpmc_mem_init(void)
768 unsigned long boot_rom_space
= 0;
770 /* never allocate the first page, to facilitate bug detection;
771 * even if we didn't boot from ROM.
773 boot_rom_space
= BOOT_ROM_SPACE
;
774 /* In apollon the CS0 is mapped as 0x0000 0000 */
775 if (machine_is_omap_apollon())
777 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
778 gpmc_mem_root
.end
= GPMC_MEM_END
;
780 /* Reserve all regions that has been set up by bootloader */
781 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
784 if (!gpmc_cs_mem_enabled(cs
))
786 gpmc_cs_get_memconf(cs
, &base
, &size
);
787 rc
= gpmc_cs_insert_mem(cs
, base
, size
);
788 if (IS_ERR_VALUE(rc
)) {
790 if (gpmc_cs_mem_enabled(cs
))
791 gpmc_cs_delete_mem(cs
);
799 static __devinit
int gpmc_probe(struct platform_device
*pdev
)
803 struct resource
*res
;
805 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
809 phys_base
= res
->start
;
810 mem_size
= resource_size(res
);
812 gpmc_base
= devm_request_and_ioremap(&pdev
->dev
, res
);
814 dev_err(&pdev
->dev
, "error: request memory / ioremap\n");
815 return -EADDRNOTAVAIL
;
818 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
820 dev_warn(&pdev
->dev
, "Failed to get resource: irq\n");
822 gpmc_irq
= res
->start
;
824 gpmc_l3_clk
= clk_get(&pdev
->dev
, "fck");
825 if (IS_ERR(gpmc_l3_clk
)) {
826 dev_err(&pdev
->dev
, "error: clk_get\n");
828 return PTR_ERR(gpmc_l3_clk
);
831 clk_prepare_enable(gpmc_l3_clk
);
833 gpmc_dev
= &pdev
->dev
;
835 l
= gpmc_read_reg(GPMC_REVISION
);
836 if (GPMC_REVISION_MAJOR(l
) > 0x4)
837 gpmc_capability
= GPMC_HAS_WR_ACCESS
| GPMC_HAS_WR_DATA_MUX_BUS
;
838 dev_info(gpmc_dev
, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l
),
839 GPMC_REVISION_MINOR(l
));
841 rc
= gpmc_mem_init();
842 if (IS_ERR_VALUE(rc
)) {
843 clk_disable_unprepare(gpmc_l3_clk
);
844 clk_put(gpmc_l3_clk
);
845 dev_err(gpmc_dev
, "failed to reserve memory\n");
849 if (IS_ERR_VALUE(gpmc_setup_irq()))
850 dev_warn(gpmc_dev
, "gpmc_setup_irq failed\n");
855 static __devexit
int gpmc_remove(struct platform_device
*pdev
)
863 static struct platform_driver gpmc_driver
= {
865 .remove
= __devexit_p(gpmc_remove
),
868 .owner
= THIS_MODULE
,
872 static __init
int gpmc_init(void)
874 return platform_driver_register(&gpmc_driver
);
877 static __exit
void gpmc_exit(void)
879 platform_driver_unregister(&gpmc_driver
);
883 postcore_initcall(gpmc_init
);
884 module_exit(gpmc_exit
);
886 static int __init
omap_gpmc_init(void)
888 struct omap_hwmod
*oh
;
889 struct platform_device
*pdev
;
890 char *oh_name
= "gpmc";
892 oh
= omap_hwmod_lookup(oh_name
);
894 pr_err("Could not look up %s\n", oh_name
);
898 pdev
= omap_device_build(DEVICE_NAME
, -1, oh
, NULL
, 0, NULL
, 0, 0);
899 WARN(IS_ERR(pdev
), "could not build omap_device for %s\n", oh_name
);
901 return IS_ERR(pdev
) ? PTR_ERR(pdev
) : 0;
903 postcore_initcall(omap_gpmc_init
);
905 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
910 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
915 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
916 if (regval
& gpmc_client_irq
[i
].bitmask
)
917 generic_handle_irq(gpmc_client_irq
[i
].irq
);
919 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
924 #ifdef CONFIG_ARCH_OMAP3
925 static struct omap3_gpmc_regs gpmc_context
;
927 void omap3_gpmc_save_context(void)
931 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
932 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
933 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
934 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
935 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
936 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
937 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
938 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
939 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
940 if (gpmc_context
.cs_context
[i
].is_valid
) {
941 gpmc_context
.cs_context
[i
].config1
=
942 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
943 gpmc_context
.cs_context
[i
].config2
=
944 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
945 gpmc_context
.cs_context
[i
].config3
=
946 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
947 gpmc_context
.cs_context
[i
].config4
=
948 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
949 gpmc_context
.cs_context
[i
].config5
=
950 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
951 gpmc_context
.cs_context
[i
].config6
=
952 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
953 gpmc_context
.cs_context
[i
].config7
=
954 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
959 void omap3_gpmc_restore_context(void)
963 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
964 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
965 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
966 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
967 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
968 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
969 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
970 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
971 if (gpmc_context
.cs_context
[i
].is_valid
) {
972 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
973 gpmc_context
.cs_context
[i
].config1
);
974 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
975 gpmc_context
.cs_context
[i
].config2
);
976 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
977 gpmc_context
.cs_context
[i
].config3
);
978 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
979 gpmc_context
.cs_context
[i
].config4
);
980 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
981 gpmc_context
.cs_context
[i
].config5
);
982 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
983 gpmc_context
.cs_context
[i
].config6
);
984 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
985 gpmc_context
.cs_context
[i
].config7
);
989 #endif /* CONFIG_ARCH_OMAP3 */