ARM: OMAP2+: Don't configure of chip-select options in gpmc_cs_configure()
[deliverable/linux.git] / arch / arm / mach-omap2 / gpmc.c
1 /*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15 #undef DEBUG
16
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/of.h>
29 #include <linux/of_mtd.h>
30 #include <linux/of_device.h>
31 #include <linux/mtd/nand.h>
32
33 #include <linux/platform_data/mtd-nand-omap2.h>
34
35 #include <asm/mach-types.h>
36
37 #include "soc.h"
38 #include "common.h"
39 #include "omap_device.h"
40 #include "gpmc.h"
41 #include "gpmc-nand.h"
42 #include "gpmc-onenand.h"
43
44 #define DEVICE_NAME "omap-gpmc"
45
46 /* GPMC register offsets */
47 #define GPMC_REVISION 0x00
48 #define GPMC_SYSCONFIG 0x10
49 #define GPMC_SYSSTATUS 0x14
50 #define GPMC_IRQSTATUS 0x18
51 #define GPMC_IRQENABLE 0x1c
52 #define GPMC_TIMEOUT_CONTROL 0x40
53 #define GPMC_ERR_ADDRESS 0x44
54 #define GPMC_ERR_TYPE 0x48
55 #define GPMC_CONFIG 0x50
56 #define GPMC_STATUS 0x54
57 #define GPMC_PREFETCH_CONFIG1 0x1e0
58 #define GPMC_PREFETCH_CONFIG2 0x1e4
59 #define GPMC_PREFETCH_CONTROL 0x1ec
60 #define GPMC_PREFETCH_STATUS 0x1f0
61 #define GPMC_ECC_CONFIG 0x1f4
62 #define GPMC_ECC_CONTROL 0x1f8
63 #define GPMC_ECC_SIZE_CONFIG 0x1fc
64 #define GPMC_ECC1_RESULT 0x200
65 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
66 #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
67 #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
68 #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
69
70 /* GPMC ECC control settings */
71 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
72 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
73 #define GPMC_ECC_CTRL_ECCREG1 0x001
74 #define GPMC_ECC_CTRL_ECCREG2 0x002
75 #define GPMC_ECC_CTRL_ECCREG3 0x003
76 #define GPMC_ECC_CTRL_ECCREG4 0x004
77 #define GPMC_ECC_CTRL_ECCREG5 0x005
78 #define GPMC_ECC_CTRL_ECCREG6 0x006
79 #define GPMC_ECC_CTRL_ECCREG7 0x007
80 #define GPMC_ECC_CTRL_ECCREG8 0x008
81 #define GPMC_ECC_CTRL_ECCREG9 0x009
82
83 #define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
84 #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
85 #define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
86 #define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
87 #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
88 #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
89
90 #define GPMC_CS0_OFFSET 0x60
91 #define GPMC_CS_SIZE 0x30
92 #define GPMC_BCH_SIZE 0x10
93
94 #define GPMC_MEM_START 0x00000000
95 #define GPMC_MEM_END 0x3FFFFFFF
96 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
97
98 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
99 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
100
101 #define CS_NUM_SHIFT 24
102 #define ENABLE_PREFETCH (0x1 << 7)
103 #define DMA_MPU_MODE 2
104
105 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
106 #define GPMC_REVISION_MINOR(l) (l & 0xf)
107
108 #define GPMC_HAS_WR_ACCESS 0x1
109 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
110 #define GPMC_HAS_MUX_AAD 0x4
111
112 #define GPMC_NR_WAITPINS 4
113
114 /* XXX: Only NAND irq has been considered,currently these are the only ones used
115 */
116 #define GPMC_NR_IRQ 2
117
118 struct gpmc_client_irq {
119 unsigned irq;
120 u32 bitmask;
121 };
122
123 /* Structure to save gpmc cs context */
124 struct gpmc_cs_config {
125 u32 config1;
126 u32 config2;
127 u32 config3;
128 u32 config4;
129 u32 config5;
130 u32 config6;
131 u32 config7;
132 int is_valid;
133 };
134
135 /*
136 * Structure to save/restore gpmc context
137 * to support core off on OMAP3
138 */
139 struct omap3_gpmc_regs {
140 u32 sysconfig;
141 u32 irqenable;
142 u32 timeout_ctrl;
143 u32 config;
144 u32 prefetch_config1;
145 u32 prefetch_config2;
146 u32 prefetch_control;
147 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
148 };
149
150 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
151 static struct irq_chip gpmc_irq_chip;
152 static unsigned gpmc_irq_start;
153
154 static struct resource gpmc_mem_root;
155 static struct resource gpmc_cs_mem[GPMC_CS_NUM];
156 static DEFINE_SPINLOCK(gpmc_mem_lock);
157 /* Define chip-selects as reserved by default until probe completes */
158 static unsigned int gpmc_cs_map = ((1 << GPMC_CS_NUM) - 1);
159 static unsigned int gpmc_nr_waitpins;
160 static struct device *gpmc_dev;
161 static int gpmc_irq;
162 static resource_size_t phys_base, mem_size;
163 static unsigned gpmc_capability;
164 static void __iomem *gpmc_base;
165
166 static struct clk *gpmc_l3_clk;
167
168 static irqreturn_t gpmc_handle_irq(int irq, void *dev);
169
170 static void gpmc_write_reg(int idx, u32 val)
171 {
172 __raw_writel(val, gpmc_base + idx);
173 }
174
175 static u32 gpmc_read_reg(int idx)
176 {
177 return __raw_readl(gpmc_base + idx);
178 }
179
180 void gpmc_cs_write_reg(int cs, int idx, u32 val)
181 {
182 void __iomem *reg_addr;
183
184 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
185 __raw_writel(val, reg_addr);
186 }
187
188 static u32 gpmc_cs_read_reg(int cs, int idx)
189 {
190 void __iomem *reg_addr;
191
192 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
193 return __raw_readl(reg_addr);
194 }
195
196 /* TODO: Add support for gpmc_fck to clock framework and use it */
197 static unsigned long gpmc_get_fclk_period(void)
198 {
199 unsigned long rate = clk_get_rate(gpmc_l3_clk);
200
201 if (rate == 0) {
202 printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
203 return 0;
204 }
205
206 rate /= 1000;
207 rate = 1000000000 / rate; /* In picoseconds */
208
209 return rate;
210 }
211
212 static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
213 {
214 unsigned long tick_ps;
215
216 /* Calculate in picosecs to yield more exact results */
217 tick_ps = gpmc_get_fclk_period();
218
219 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
220 }
221
222 static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
223 {
224 unsigned long tick_ps;
225
226 /* Calculate in picosecs to yield more exact results */
227 tick_ps = gpmc_get_fclk_period();
228
229 return (time_ps + tick_ps - 1) / tick_ps;
230 }
231
232 unsigned int gpmc_ticks_to_ns(unsigned int ticks)
233 {
234 return ticks * gpmc_get_fclk_period() / 1000;
235 }
236
237 static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
238 {
239 return ticks * gpmc_get_fclk_period();
240 }
241
242 static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
243 {
244 unsigned long ticks = gpmc_ps_to_ticks(time_ps);
245
246 return ticks * gpmc_get_fclk_period();
247 }
248
249 static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
250 {
251 u32 l;
252
253 l = gpmc_cs_read_reg(cs, reg);
254 if (value)
255 l |= mask;
256 else
257 l &= ~mask;
258 gpmc_cs_write_reg(cs, reg, l);
259 }
260
261 static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
262 {
263 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
264 GPMC_CONFIG1_TIME_PARA_GRAN,
265 p->time_para_granularity);
266 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
267 GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
268 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
269 GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
270 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
271 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
272 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
273 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
274 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
275 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
276 p->cycle2cyclesamecsen);
277 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
278 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
279 p->cycle2cyclediffcsen);
280 }
281
282 #ifdef DEBUG
283 static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
284 int time, const char *name)
285 #else
286 static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
287 int time)
288 #endif
289 {
290 u32 l;
291 int ticks, mask, nr_bits;
292
293 if (time == 0)
294 ticks = 0;
295 else
296 ticks = gpmc_ns_to_ticks(time);
297 nr_bits = end_bit - st_bit + 1;
298 if (ticks >= 1 << nr_bits) {
299 #ifdef DEBUG
300 printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
301 cs, name, time, ticks, 1 << nr_bits);
302 #endif
303 return -1;
304 }
305
306 mask = (1 << nr_bits) - 1;
307 l = gpmc_cs_read_reg(cs, reg);
308 #ifdef DEBUG
309 printk(KERN_INFO
310 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
311 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
312 (l >> st_bit) & mask, time);
313 #endif
314 l &= ~(mask << st_bit);
315 l |= ticks << st_bit;
316 gpmc_cs_write_reg(cs, reg, l);
317
318 return 0;
319 }
320
321 #ifdef DEBUG
322 #define GPMC_SET_ONE(reg, st, end, field) \
323 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
324 t->field, #field) < 0) \
325 return -1
326 #else
327 #define GPMC_SET_ONE(reg, st, end, field) \
328 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
329 return -1
330 #endif
331
332 int gpmc_calc_divider(unsigned int sync_clk)
333 {
334 int div;
335 u32 l;
336
337 l = sync_clk + (gpmc_get_fclk_period() - 1);
338 div = l / gpmc_get_fclk_period();
339 if (div > 4)
340 return -1;
341 if (div <= 0)
342 div = 1;
343
344 return div;
345 }
346
347 int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
348 {
349 int div;
350 u32 l;
351
352 div = gpmc_calc_divider(t->sync_clk);
353 if (div < 0)
354 return div;
355
356 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
357 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
358 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
359
360 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
361 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
362 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
363
364 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
365 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
366 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
367 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
368
369 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
370 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
371 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
372
373 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
374
375 GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
376 GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
377
378 GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
379 GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
380
381 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
382 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
383 if (gpmc_capability & GPMC_HAS_WR_ACCESS)
384 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
385
386 /* caller is expected to have initialized CONFIG1 to cover
387 * at least sync vs async
388 */
389 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
390 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
391 #ifdef DEBUG
392 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
393 cs, (div * gpmc_get_fclk_period()) / 1000, div);
394 #endif
395 l &= ~0x03;
396 l |= (div - 1);
397 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
398 }
399
400 gpmc_cs_bool_timings(cs, &t->bool_timings);
401
402 return 0;
403 }
404
405 static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
406 {
407 u32 l;
408 u32 mask;
409
410 mask = (1 << GPMC_SECTION_SHIFT) - size;
411 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
412 l &= ~0x3f;
413 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
414 l &= ~(0x0f << 8);
415 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
416 l |= GPMC_CONFIG7_CSVALID;
417 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
418 }
419
420 static void gpmc_cs_disable_mem(int cs)
421 {
422 u32 l;
423
424 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
425 l &= ~GPMC_CONFIG7_CSVALID;
426 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
427 }
428
429 static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
430 {
431 u32 l;
432 u32 mask;
433
434 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
435 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
436 mask = (l >> 8) & 0x0f;
437 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
438 }
439
440 static int gpmc_cs_mem_enabled(int cs)
441 {
442 u32 l;
443
444 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
445 return l & GPMC_CONFIG7_CSVALID;
446 }
447
448 static void gpmc_cs_set_reserved(int cs, int reserved)
449 {
450 gpmc_cs_map &= ~(1 << cs);
451 gpmc_cs_map |= (reserved ? 1 : 0) << cs;
452 }
453
454 static bool gpmc_cs_reserved(int cs)
455 {
456 return gpmc_cs_map & (1 << cs);
457 }
458
459 static unsigned long gpmc_mem_align(unsigned long size)
460 {
461 int order;
462
463 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
464 order = GPMC_CHUNK_SHIFT - 1;
465 do {
466 size >>= 1;
467 order++;
468 } while (size);
469 size = 1 << order;
470 return size;
471 }
472
473 static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
474 {
475 struct resource *res = &gpmc_cs_mem[cs];
476 int r;
477
478 size = gpmc_mem_align(size);
479 spin_lock(&gpmc_mem_lock);
480 res->start = base;
481 res->end = base + size - 1;
482 r = request_resource(&gpmc_mem_root, res);
483 spin_unlock(&gpmc_mem_lock);
484
485 return r;
486 }
487
488 static int gpmc_cs_delete_mem(int cs)
489 {
490 struct resource *res = &gpmc_cs_mem[cs];
491 int r;
492
493 spin_lock(&gpmc_mem_lock);
494 r = release_resource(&gpmc_cs_mem[cs]);
495 res->start = 0;
496 res->end = 0;
497 spin_unlock(&gpmc_mem_lock);
498
499 return r;
500 }
501
502 int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
503 {
504 struct resource *res = &gpmc_cs_mem[cs];
505 int r = -1;
506
507 if (cs > GPMC_CS_NUM)
508 return -ENODEV;
509
510 size = gpmc_mem_align(size);
511 if (size > (1 << GPMC_SECTION_SHIFT))
512 return -ENOMEM;
513
514 spin_lock(&gpmc_mem_lock);
515 if (gpmc_cs_reserved(cs)) {
516 r = -EBUSY;
517 goto out;
518 }
519 if (gpmc_cs_mem_enabled(cs))
520 r = adjust_resource(res, res->start & ~(size - 1), size);
521 if (r < 0)
522 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
523 size, NULL, NULL);
524 if (r < 0)
525 goto out;
526
527 gpmc_cs_enable_mem(cs, res->start, resource_size(res));
528 *base = res->start;
529 gpmc_cs_set_reserved(cs, 1);
530 out:
531 spin_unlock(&gpmc_mem_lock);
532 return r;
533 }
534 EXPORT_SYMBOL(gpmc_cs_request);
535
536 void gpmc_cs_free(int cs)
537 {
538 spin_lock(&gpmc_mem_lock);
539 if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
540 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
541 BUG();
542 spin_unlock(&gpmc_mem_lock);
543 return;
544 }
545 gpmc_cs_disable_mem(cs);
546 release_resource(&gpmc_cs_mem[cs]);
547 gpmc_cs_set_reserved(cs, 0);
548 spin_unlock(&gpmc_mem_lock);
549 }
550 EXPORT_SYMBOL(gpmc_cs_free);
551
552 /**
553 * gpmc_configure - write request to configure gpmc
554 * @cmd: command type
555 * @wval: value to write
556 * @return status of the operation
557 */
558 int gpmc_configure(int cmd, int wval)
559 {
560 u32 regval;
561
562 switch (cmd) {
563 case GPMC_ENABLE_IRQ:
564 gpmc_write_reg(GPMC_IRQENABLE, wval);
565 break;
566
567 case GPMC_SET_IRQ_STATUS:
568 gpmc_write_reg(GPMC_IRQSTATUS, wval);
569 break;
570
571 case GPMC_CONFIG_WP:
572 regval = gpmc_read_reg(GPMC_CONFIG);
573 if (wval)
574 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
575 else
576 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
577 gpmc_write_reg(GPMC_CONFIG, regval);
578 break;
579
580 default:
581 pr_err("%s: command not supported\n", __func__);
582 return -EINVAL;
583 }
584
585 return 0;
586 }
587 EXPORT_SYMBOL(gpmc_configure);
588
589 void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
590 {
591 int i;
592
593 reg->gpmc_status = gpmc_base + GPMC_STATUS;
594 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
595 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
596 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
597 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
598 reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
599 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
600 reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
601 reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
602 reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
603 reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
604 reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
605 reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
606 reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
607 reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
608
609 for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
610 reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
611 GPMC_BCH_SIZE * i;
612 reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
613 GPMC_BCH_SIZE * i;
614 reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
615 GPMC_BCH_SIZE * i;
616 reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
617 GPMC_BCH_SIZE * i;
618 }
619 }
620
621 int gpmc_get_client_irq(unsigned irq_config)
622 {
623 int i;
624
625 if (hweight32(irq_config) > 1)
626 return 0;
627
628 for (i = 0; i < GPMC_NR_IRQ; i++)
629 if (gpmc_client_irq[i].bitmask & irq_config)
630 return gpmc_client_irq[i].irq;
631
632 return 0;
633 }
634
635 static int gpmc_irq_endis(unsigned irq, bool endis)
636 {
637 int i;
638 u32 regval;
639
640 for (i = 0; i < GPMC_NR_IRQ; i++)
641 if (irq == gpmc_client_irq[i].irq) {
642 regval = gpmc_read_reg(GPMC_IRQENABLE);
643 if (endis)
644 regval |= gpmc_client_irq[i].bitmask;
645 else
646 regval &= ~gpmc_client_irq[i].bitmask;
647 gpmc_write_reg(GPMC_IRQENABLE, regval);
648 break;
649 }
650
651 return 0;
652 }
653
654 static void gpmc_irq_disable(struct irq_data *p)
655 {
656 gpmc_irq_endis(p->irq, false);
657 }
658
659 static void gpmc_irq_enable(struct irq_data *p)
660 {
661 gpmc_irq_endis(p->irq, true);
662 }
663
664 static void gpmc_irq_noop(struct irq_data *data) { }
665
666 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
667
668 static int gpmc_setup_irq(void)
669 {
670 int i;
671 u32 regval;
672
673 if (!gpmc_irq)
674 return -EINVAL;
675
676 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
677 if (gpmc_irq_start < 0) {
678 pr_err("irq_alloc_descs failed\n");
679 return gpmc_irq_start;
680 }
681
682 gpmc_irq_chip.name = "gpmc";
683 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
684 gpmc_irq_chip.irq_enable = gpmc_irq_enable;
685 gpmc_irq_chip.irq_disable = gpmc_irq_disable;
686 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
687 gpmc_irq_chip.irq_ack = gpmc_irq_noop;
688 gpmc_irq_chip.irq_mask = gpmc_irq_noop;
689 gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
690
691 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
692 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
693
694 for (i = 0; i < GPMC_NR_IRQ; i++) {
695 gpmc_client_irq[i].irq = gpmc_irq_start + i;
696 irq_set_chip_and_handler(gpmc_client_irq[i].irq,
697 &gpmc_irq_chip, handle_simple_irq);
698 set_irq_flags(gpmc_client_irq[i].irq,
699 IRQF_VALID | IRQF_NOAUTOEN);
700 }
701
702 /* Disable interrupts */
703 gpmc_write_reg(GPMC_IRQENABLE, 0);
704
705 /* clear interrupts */
706 regval = gpmc_read_reg(GPMC_IRQSTATUS);
707 gpmc_write_reg(GPMC_IRQSTATUS, regval);
708
709 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
710 }
711
712 static int gpmc_free_irq(void)
713 {
714 int i;
715
716 if (gpmc_irq)
717 free_irq(gpmc_irq, NULL);
718
719 for (i = 0; i < GPMC_NR_IRQ; i++) {
720 irq_set_handler(gpmc_client_irq[i].irq, NULL);
721 irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
722 irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
723 }
724
725 irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
726
727 return 0;
728 }
729
730 static void gpmc_mem_exit(void)
731 {
732 int cs;
733
734 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
735 if (!gpmc_cs_mem_enabled(cs))
736 continue;
737 gpmc_cs_delete_mem(cs);
738 }
739
740 }
741
742 static int gpmc_mem_init(void)
743 {
744 int cs, rc;
745 unsigned long boot_rom_space = 0;
746
747 /* never allocate the first page, to facilitate bug detection;
748 * even if we didn't boot from ROM.
749 */
750 boot_rom_space = BOOT_ROM_SPACE;
751 gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
752 gpmc_mem_root.end = GPMC_MEM_END;
753
754 /* Reserve all regions that has been set up by bootloader */
755 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
756 u32 base, size;
757
758 if (!gpmc_cs_mem_enabled(cs))
759 continue;
760 gpmc_cs_get_memconf(cs, &base, &size);
761 rc = gpmc_cs_insert_mem(cs, base, size);
762 if (rc < 0) {
763 while (--cs >= 0)
764 if (gpmc_cs_mem_enabled(cs))
765 gpmc_cs_delete_mem(cs);
766 return rc;
767 }
768 }
769
770 return 0;
771 }
772
773 static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
774 {
775 u32 temp;
776 int div;
777
778 div = gpmc_calc_divider(sync_clk);
779 temp = gpmc_ps_to_ticks(time_ps);
780 temp = (temp + div - 1) / div;
781 return gpmc_ticks_to_ps(temp * div);
782 }
783
784 /* XXX: can the cycles be avoided ? */
785 static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
786 struct gpmc_device_timings *dev_t,
787 bool mux)
788 {
789 u32 temp;
790
791 /* adv_rd_off */
792 temp = dev_t->t_avdp_r;
793 /* XXX: mux check required ? */
794 if (mux) {
795 /* XXX: t_avdp not to be required for sync, only added for tusb
796 * this indirectly necessitates requirement of t_avdp_r and
797 * t_avdp_w instead of having a single t_avdp
798 */
799 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
800 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
801 }
802 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
803
804 /* oe_on */
805 temp = dev_t->t_oeasu; /* XXX: remove this ? */
806 if (mux) {
807 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
808 temp = max_t(u32, temp, gpmc_t->adv_rd_off +
809 gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
810 }
811 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
812
813 /* access */
814 /* XXX: any scope for improvement ?, by combining oe_on
815 * and clk_activation, need to check whether
816 * access = clk_activation + round to sync clk ?
817 */
818 temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
819 temp += gpmc_t->clk_activation;
820 if (dev_t->cyc_oe)
821 temp = max_t(u32, temp, gpmc_t->oe_on +
822 gpmc_ticks_to_ps(dev_t->cyc_oe));
823 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
824
825 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
826 gpmc_t->cs_rd_off = gpmc_t->oe_off;
827
828 /* rd_cycle */
829 temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
830 temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
831 gpmc_t->access;
832 /* XXX: barter t_ce_rdyz with t_cez_r ? */
833 if (dev_t->t_ce_rdyz)
834 temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
835 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
836
837 return 0;
838 }
839
840 static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
841 struct gpmc_device_timings *dev_t,
842 bool mux)
843 {
844 u32 temp;
845
846 /* adv_wr_off */
847 temp = dev_t->t_avdp_w;
848 if (mux) {
849 temp = max_t(u32, temp,
850 gpmc_t->clk_activation + dev_t->t_avdh);
851 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
852 }
853 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
854
855 /* wr_data_mux_bus */
856 temp = max_t(u32, dev_t->t_weasu,
857 gpmc_t->clk_activation + dev_t->t_rdyo);
858 /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
859 * and in that case remember to handle we_on properly
860 */
861 if (mux) {
862 temp = max_t(u32, temp,
863 gpmc_t->adv_wr_off + dev_t->t_aavdh);
864 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
865 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
866 }
867 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
868
869 /* we_on */
870 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
871 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
872 else
873 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
874
875 /* wr_access */
876 /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
877 gpmc_t->wr_access = gpmc_t->access;
878
879 /* we_off */
880 temp = gpmc_t->we_on + dev_t->t_wpl;
881 temp = max_t(u32, temp,
882 gpmc_t->wr_access + gpmc_ticks_to_ps(1));
883 temp = max_t(u32, temp,
884 gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
885 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
886
887 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
888 dev_t->t_wph);
889
890 /* wr_cycle */
891 temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
892 temp += gpmc_t->wr_access;
893 /* XXX: barter t_ce_rdyz with t_cez_w ? */
894 if (dev_t->t_ce_rdyz)
895 temp = max_t(u32, temp,
896 gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
897 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
898
899 return 0;
900 }
901
902 static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
903 struct gpmc_device_timings *dev_t,
904 bool mux)
905 {
906 u32 temp;
907
908 /* adv_rd_off */
909 temp = dev_t->t_avdp_r;
910 if (mux)
911 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
912 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
913
914 /* oe_on */
915 temp = dev_t->t_oeasu;
916 if (mux)
917 temp = max_t(u32, temp,
918 gpmc_t->adv_rd_off + dev_t->t_aavdh);
919 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
920
921 /* access */
922 temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
923 gpmc_t->oe_on + dev_t->t_oe);
924 temp = max_t(u32, temp,
925 gpmc_t->cs_on + dev_t->t_ce);
926 temp = max_t(u32, temp,
927 gpmc_t->adv_on + dev_t->t_aa);
928 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
929
930 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
931 gpmc_t->cs_rd_off = gpmc_t->oe_off;
932
933 /* rd_cycle */
934 temp = max_t(u32, dev_t->t_rd_cycle,
935 gpmc_t->cs_rd_off + dev_t->t_cez_r);
936 temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
937 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
938
939 return 0;
940 }
941
942 static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
943 struct gpmc_device_timings *dev_t,
944 bool mux)
945 {
946 u32 temp;
947
948 /* adv_wr_off */
949 temp = dev_t->t_avdp_w;
950 if (mux)
951 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
952 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
953
954 /* wr_data_mux_bus */
955 temp = dev_t->t_weasu;
956 if (mux) {
957 temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
958 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
959 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
960 }
961 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
962
963 /* we_on */
964 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
965 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
966 else
967 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
968
969 /* we_off */
970 temp = gpmc_t->we_on + dev_t->t_wpl;
971 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
972
973 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
974 dev_t->t_wph);
975
976 /* wr_cycle */
977 temp = max_t(u32, dev_t->t_wr_cycle,
978 gpmc_t->cs_wr_off + dev_t->t_cez_w);
979 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
980
981 return 0;
982 }
983
984 static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
985 struct gpmc_device_timings *dev_t)
986 {
987 u32 temp;
988
989 gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
990 gpmc_get_fclk_period();
991
992 gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
993 dev_t->t_bacc,
994 gpmc_t->sync_clk);
995
996 temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
997 gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
998
999 if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
1000 return 0;
1001
1002 if (dev_t->ce_xdelay)
1003 gpmc_t->bool_timings.cs_extra_delay = true;
1004 if (dev_t->avd_xdelay)
1005 gpmc_t->bool_timings.adv_extra_delay = true;
1006 if (dev_t->oe_xdelay)
1007 gpmc_t->bool_timings.oe_extra_delay = true;
1008 if (dev_t->we_xdelay)
1009 gpmc_t->bool_timings.we_extra_delay = true;
1010
1011 return 0;
1012 }
1013
1014 static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
1015 struct gpmc_device_timings *dev_t,
1016 bool sync)
1017 {
1018 u32 temp;
1019
1020 /* cs_on */
1021 gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
1022
1023 /* adv_on */
1024 temp = dev_t->t_avdasu;
1025 if (dev_t->t_ce_avd)
1026 temp = max_t(u32, temp,
1027 gpmc_t->cs_on + dev_t->t_ce_avd);
1028 gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
1029
1030 if (sync)
1031 gpmc_calc_sync_common_timings(gpmc_t, dev_t);
1032
1033 return 0;
1034 }
1035
1036 /* TODO: remove this function once all peripherals are confirmed to
1037 * work with generic timing. Simultaneously gpmc_cs_set_timings()
1038 * has to be modified to handle timings in ps instead of ns
1039 */
1040 static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
1041 {
1042 t->cs_on /= 1000;
1043 t->cs_rd_off /= 1000;
1044 t->cs_wr_off /= 1000;
1045 t->adv_on /= 1000;
1046 t->adv_rd_off /= 1000;
1047 t->adv_wr_off /= 1000;
1048 t->we_on /= 1000;
1049 t->we_off /= 1000;
1050 t->oe_on /= 1000;
1051 t->oe_off /= 1000;
1052 t->page_burst_access /= 1000;
1053 t->access /= 1000;
1054 t->rd_cycle /= 1000;
1055 t->wr_cycle /= 1000;
1056 t->bus_turnaround /= 1000;
1057 t->cycle2cycle_delay /= 1000;
1058 t->wait_monitoring /= 1000;
1059 t->clk_activation /= 1000;
1060 t->wr_access /= 1000;
1061 t->wr_data_mux_bus /= 1000;
1062 }
1063
1064 int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
1065 struct gpmc_settings *gpmc_s,
1066 struct gpmc_device_timings *dev_t)
1067 {
1068 bool mux = false, sync = false;
1069
1070 if (gpmc_s) {
1071 mux = gpmc_s->mux_add_data ? true : false;
1072 sync = (gpmc_s->sync_read || gpmc_s->sync_write);
1073 }
1074
1075 memset(gpmc_t, 0, sizeof(*gpmc_t));
1076
1077 gpmc_calc_common_timings(gpmc_t, dev_t, sync);
1078
1079 if (gpmc_s && gpmc_s->sync_read)
1080 gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
1081 else
1082 gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
1083
1084 if (gpmc_s && gpmc_s->sync_write)
1085 gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
1086 else
1087 gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
1088
1089 /* TODO: remove, see function definition */
1090 gpmc_convert_ps_to_ns(gpmc_t);
1091
1092 return 0;
1093 }
1094
1095 /**
1096 * gpmc_cs_program_settings - programs non-timing related settings
1097 * @cs: GPMC chip-select to program
1098 * @p: pointer to GPMC settings structure
1099 *
1100 * Programs non-timing related settings for a GPMC chip-select, such as
1101 * bus-width, burst configuration, etc. Function should be called once
1102 * for each chip-select that is being used and must be called before
1103 * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
1104 * register will be initialised to zero by this function. Returns 0 on
1105 * success and appropriate negative error code on failure.
1106 */
1107 int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
1108 {
1109 u32 config1;
1110
1111 if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
1112 pr_err("%s: invalid width %d!", __func__, p->device_width);
1113 return -EINVAL;
1114 }
1115
1116 /* Address-data multiplexing not supported for NAND devices */
1117 if (p->device_nand && p->mux_add_data) {
1118 pr_err("%s: invalid configuration!\n", __func__);
1119 return -EINVAL;
1120 }
1121
1122 if ((p->mux_add_data > GPMC_MUX_AD) ||
1123 ((p->mux_add_data == GPMC_MUX_AAD) &&
1124 !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
1125 pr_err("%s: invalid multiplex configuration!\n", __func__);
1126 return -EINVAL;
1127 }
1128
1129 /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
1130 if (p->burst_read || p->burst_write) {
1131 switch (p->burst_len) {
1132 case GPMC_BURST_4:
1133 case GPMC_BURST_8:
1134 case GPMC_BURST_16:
1135 break;
1136 default:
1137 pr_err("%s: invalid page/burst-length (%d)\n",
1138 __func__, p->burst_len);
1139 return -EINVAL;
1140 }
1141 }
1142
1143 if ((p->wait_on_read || p->wait_on_write) &&
1144 (p->wait_pin > gpmc_nr_waitpins)) {
1145 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
1146 return -EINVAL;
1147 }
1148
1149 config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
1150
1151 if (p->sync_read)
1152 config1 |= GPMC_CONFIG1_READTYPE_SYNC;
1153 if (p->sync_write)
1154 config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
1155 if (p->wait_on_read)
1156 config1 |= GPMC_CONFIG1_WAIT_READ_MON;
1157 if (p->wait_on_write)
1158 config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
1159 if (p->wait_on_read || p->wait_on_write)
1160 config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
1161 if (p->device_nand)
1162 config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
1163 if (p->mux_add_data)
1164 config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
1165 if (p->burst_read)
1166 config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
1167 if (p->burst_write)
1168 config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
1169 if (p->burst_read || p->burst_write) {
1170 config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
1171 config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
1172 }
1173
1174 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
1175
1176 return 0;
1177 }
1178
1179 #ifdef CONFIG_OF
1180 static struct of_device_id gpmc_dt_ids[] = {
1181 { .compatible = "ti,omap2420-gpmc" },
1182 { .compatible = "ti,omap2430-gpmc" },
1183 { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
1184 { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
1185 { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
1186 { }
1187 };
1188 MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
1189
1190 static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1191 struct gpmc_timings *gpmc_t)
1192 {
1193 u32 val;
1194
1195 memset(gpmc_t, 0, sizeof(*gpmc_t));
1196
1197 /* minimum clock period for syncronous mode */
1198 if (!of_property_read_u32(np, "gpmc,sync-clk", &val))
1199 gpmc_t->sync_clk = val;
1200
1201 /* chip select timtings */
1202 if (!of_property_read_u32(np, "gpmc,cs-on", &val))
1203 gpmc_t->cs_on = val;
1204
1205 if (!of_property_read_u32(np, "gpmc,cs-rd-off", &val))
1206 gpmc_t->cs_rd_off = val;
1207
1208 if (!of_property_read_u32(np, "gpmc,cs-wr-off", &val))
1209 gpmc_t->cs_wr_off = val;
1210
1211 /* ADV signal timings */
1212 if (!of_property_read_u32(np, "gpmc,adv-on", &val))
1213 gpmc_t->adv_on = val;
1214
1215 if (!of_property_read_u32(np, "gpmc,adv-rd-off", &val))
1216 gpmc_t->adv_rd_off = val;
1217
1218 if (!of_property_read_u32(np, "gpmc,adv-wr-off", &val))
1219 gpmc_t->adv_wr_off = val;
1220
1221 /* WE signal timings */
1222 if (!of_property_read_u32(np, "gpmc,we-on", &val))
1223 gpmc_t->we_on = val;
1224
1225 if (!of_property_read_u32(np, "gpmc,we-off", &val))
1226 gpmc_t->we_off = val;
1227
1228 /* OE signal timings */
1229 if (!of_property_read_u32(np, "gpmc,oe-on", &val))
1230 gpmc_t->oe_on = val;
1231
1232 if (!of_property_read_u32(np, "gpmc,oe-off", &val))
1233 gpmc_t->oe_off = val;
1234
1235 /* access and cycle timings */
1236 if (!of_property_read_u32(np, "gpmc,page-burst-access", &val))
1237 gpmc_t->page_burst_access = val;
1238
1239 if (!of_property_read_u32(np, "gpmc,access", &val))
1240 gpmc_t->access = val;
1241
1242 if (!of_property_read_u32(np, "gpmc,rd-cycle", &val))
1243 gpmc_t->rd_cycle = val;
1244
1245 if (!of_property_read_u32(np, "gpmc,wr-cycle", &val))
1246 gpmc_t->wr_cycle = val;
1247
1248 /* only for OMAP3430 */
1249 if (!of_property_read_u32(np, "gpmc,wr-access", &val))
1250 gpmc_t->wr_access = val;
1251
1252 if (!of_property_read_u32(np, "gpmc,wr-data-mux-bus", &val))
1253 gpmc_t->wr_data_mux_bus = val;
1254 }
1255
1256 #ifdef CONFIG_MTD_NAND
1257
1258 static const char * const nand_ecc_opts[] = {
1259 [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
1260 [OMAP_ECC_HAMMING_CODE_HW] = "hw",
1261 [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode",
1262 [OMAP_ECC_BCH4_CODE_HW] = "bch4",
1263 [OMAP_ECC_BCH8_CODE_HW] = "bch8",
1264 };
1265
1266 static int gpmc_probe_nand_child(struct platform_device *pdev,
1267 struct device_node *child)
1268 {
1269 u32 val;
1270 const char *s;
1271 struct gpmc_timings gpmc_t;
1272 struct omap_nand_platform_data *gpmc_nand_data;
1273
1274 if (of_property_read_u32(child, "reg", &val) < 0) {
1275 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1276 child->full_name);
1277 return -ENODEV;
1278 }
1279
1280 gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
1281 GFP_KERNEL);
1282 if (!gpmc_nand_data)
1283 return -ENOMEM;
1284
1285 gpmc_nand_data->cs = val;
1286 gpmc_nand_data->of_node = child;
1287
1288 if (!of_property_read_string(child, "ti,nand-ecc-opt", &s))
1289 for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++)
1290 if (!strcasecmp(s, nand_ecc_opts[val])) {
1291 gpmc_nand_data->ecc_opt = val;
1292 break;
1293 }
1294
1295 val = of_get_nand_bus_width(child);
1296 if (val == 16)
1297 gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
1298
1299 gpmc_read_timings_dt(child, &gpmc_t);
1300 gpmc_nand_init(gpmc_nand_data, &gpmc_t);
1301
1302 return 0;
1303 }
1304 #else
1305 static int gpmc_probe_nand_child(struct platform_device *pdev,
1306 struct device_node *child)
1307 {
1308 return 0;
1309 }
1310 #endif
1311
1312 #ifdef CONFIG_MTD_ONENAND
1313 static int gpmc_probe_onenand_child(struct platform_device *pdev,
1314 struct device_node *child)
1315 {
1316 u32 val;
1317 struct omap_onenand_platform_data *gpmc_onenand_data;
1318
1319 if (of_property_read_u32(child, "reg", &val) < 0) {
1320 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1321 child->full_name);
1322 return -ENODEV;
1323 }
1324
1325 gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1326 GFP_KERNEL);
1327 if (!gpmc_onenand_data)
1328 return -ENOMEM;
1329
1330 gpmc_onenand_data->cs = val;
1331 gpmc_onenand_data->of_node = child;
1332 gpmc_onenand_data->dma_channel = -1;
1333
1334 if (!of_property_read_u32(child, "dma-channel", &val))
1335 gpmc_onenand_data->dma_channel = val;
1336
1337 gpmc_onenand_init(gpmc_onenand_data);
1338
1339 return 0;
1340 }
1341 #else
1342 static int gpmc_probe_onenand_child(struct platform_device *pdev,
1343 struct device_node *child)
1344 {
1345 return 0;
1346 }
1347 #endif
1348
1349 static int gpmc_probe_dt(struct platform_device *pdev)
1350 {
1351 int ret;
1352 struct device_node *child;
1353 const struct of_device_id *of_id =
1354 of_match_device(gpmc_dt_ids, &pdev->dev);
1355
1356 if (!of_id)
1357 return 0;
1358
1359 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
1360 &gpmc_nr_waitpins);
1361 if (ret < 0) {
1362 pr_err("%s: number of wait pins not found!\n", __func__);
1363 return ret;
1364 }
1365
1366 for_each_node_by_name(child, "nand") {
1367 ret = gpmc_probe_nand_child(pdev, child);
1368 if (ret < 0) {
1369 of_node_put(child);
1370 return ret;
1371 }
1372 }
1373
1374 for_each_node_by_name(child, "onenand") {
1375 ret = gpmc_probe_onenand_child(pdev, child);
1376 if (ret < 0) {
1377 of_node_put(child);
1378 return ret;
1379 }
1380 }
1381 return 0;
1382 }
1383 #else
1384 static int gpmc_probe_dt(struct platform_device *pdev)
1385 {
1386 return 0;
1387 }
1388 #endif
1389
1390 static int gpmc_probe(struct platform_device *pdev)
1391 {
1392 int rc;
1393 u32 l;
1394 struct resource *res;
1395
1396 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1397 if (res == NULL)
1398 return -ENOENT;
1399
1400 phys_base = res->start;
1401 mem_size = resource_size(res);
1402
1403 gpmc_base = devm_ioremap_resource(&pdev->dev, res);
1404 if (IS_ERR(gpmc_base))
1405 return PTR_ERR(gpmc_base);
1406
1407 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1408 if (res == NULL)
1409 dev_warn(&pdev->dev, "Failed to get resource: irq\n");
1410 else
1411 gpmc_irq = res->start;
1412
1413 gpmc_l3_clk = clk_get(&pdev->dev, "fck");
1414 if (IS_ERR(gpmc_l3_clk)) {
1415 dev_err(&pdev->dev, "error: clk_get\n");
1416 gpmc_irq = 0;
1417 return PTR_ERR(gpmc_l3_clk);
1418 }
1419
1420 clk_prepare_enable(gpmc_l3_clk);
1421
1422 gpmc_dev = &pdev->dev;
1423
1424 l = gpmc_read_reg(GPMC_REVISION);
1425
1426 /*
1427 * FIXME: Once device-tree migration is complete the below flags
1428 * should be populated based upon the device-tree compatible
1429 * string. For now just use the IP revision. OMAP3+ devices have
1430 * the wr_access and wr_data_mux_bus register fields. OMAP4+
1431 * devices support the addr-addr-data multiplex protocol.
1432 *
1433 * GPMC IP revisions:
1434 * - OMAP24xx = 2.0
1435 * - OMAP3xxx = 5.0
1436 * - OMAP44xx/54xx/AM335x = 6.0
1437 */
1438 if (GPMC_REVISION_MAJOR(l) > 0x4)
1439 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
1440 if (GPMC_REVISION_MAJOR(l) > 0x5)
1441 gpmc_capability |= GPMC_HAS_MUX_AAD;
1442 dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
1443 GPMC_REVISION_MINOR(l));
1444
1445 rc = gpmc_mem_init();
1446 if (rc < 0) {
1447 clk_disable_unprepare(gpmc_l3_clk);
1448 clk_put(gpmc_l3_clk);
1449 dev_err(gpmc_dev, "failed to reserve memory\n");
1450 return rc;
1451 }
1452
1453 if (gpmc_setup_irq() < 0)
1454 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1455
1456 /* Now the GPMC is initialised, unreserve the chip-selects */
1457 gpmc_cs_map = 0;
1458
1459 if (!pdev->dev.of_node)
1460 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
1461
1462 rc = gpmc_probe_dt(pdev);
1463 if (rc < 0) {
1464 clk_disable_unprepare(gpmc_l3_clk);
1465 clk_put(gpmc_l3_clk);
1466 dev_err(gpmc_dev, "failed to probe DT parameters\n");
1467 return rc;
1468 }
1469
1470 return 0;
1471 }
1472
1473 static int gpmc_remove(struct platform_device *pdev)
1474 {
1475 gpmc_free_irq();
1476 gpmc_mem_exit();
1477 gpmc_dev = NULL;
1478 return 0;
1479 }
1480
1481 static struct platform_driver gpmc_driver = {
1482 .probe = gpmc_probe,
1483 .remove = gpmc_remove,
1484 .driver = {
1485 .name = DEVICE_NAME,
1486 .owner = THIS_MODULE,
1487 .of_match_table = of_match_ptr(gpmc_dt_ids),
1488 },
1489 };
1490
1491 static __init int gpmc_init(void)
1492 {
1493 return platform_driver_register(&gpmc_driver);
1494 }
1495
1496 static __exit void gpmc_exit(void)
1497 {
1498 platform_driver_unregister(&gpmc_driver);
1499
1500 }
1501
1502 omap_postcore_initcall(gpmc_init);
1503 module_exit(gpmc_exit);
1504
1505 static int __init omap_gpmc_init(void)
1506 {
1507 struct omap_hwmod *oh;
1508 struct platform_device *pdev;
1509 char *oh_name = "gpmc";
1510
1511 /*
1512 * if the board boots up with a populated DT, do not
1513 * manually add the device from this initcall
1514 */
1515 if (of_have_populated_dt())
1516 return -ENODEV;
1517
1518 oh = omap_hwmod_lookup(oh_name);
1519 if (!oh) {
1520 pr_err("Could not look up %s\n", oh_name);
1521 return -ENODEV;
1522 }
1523
1524 pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0);
1525 WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
1526
1527 return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
1528 }
1529 omap_postcore_initcall(omap_gpmc_init);
1530
1531 static irqreturn_t gpmc_handle_irq(int irq, void *dev)
1532 {
1533 int i;
1534 u32 regval;
1535
1536 regval = gpmc_read_reg(GPMC_IRQSTATUS);
1537
1538 if (!regval)
1539 return IRQ_NONE;
1540
1541 for (i = 0; i < GPMC_NR_IRQ; i++)
1542 if (regval & gpmc_client_irq[i].bitmask)
1543 generic_handle_irq(gpmc_client_irq[i].irq);
1544
1545 gpmc_write_reg(GPMC_IRQSTATUS, regval);
1546
1547 return IRQ_HANDLED;
1548 }
1549
1550 #ifdef CONFIG_ARCH_OMAP3
1551 static struct omap3_gpmc_regs gpmc_context;
1552
1553 void omap3_gpmc_save_context(void)
1554 {
1555 int i;
1556
1557 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
1558 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
1559 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
1560 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
1561 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
1562 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
1563 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
1564 for (i = 0; i < GPMC_CS_NUM; i++) {
1565 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
1566 if (gpmc_context.cs_context[i].is_valid) {
1567 gpmc_context.cs_context[i].config1 =
1568 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
1569 gpmc_context.cs_context[i].config2 =
1570 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
1571 gpmc_context.cs_context[i].config3 =
1572 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
1573 gpmc_context.cs_context[i].config4 =
1574 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
1575 gpmc_context.cs_context[i].config5 =
1576 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
1577 gpmc_context.cs_context[i].config6 =
1578 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
1579 gpmc_context.cs_context[i].config7 =
1580 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
1581 }
1582 }
1583 }
1584
1585 void omap3_gpmc_restore_context(void)
1586 {
1587 int i;
1588
1589 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
1590 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
1591 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
1592 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
1593 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
1594 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
1595 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
1596 for (i = 0; i < GPMC_CS_NUM; i++) {
1597 if (gpmc_context.cs_context[i].is_valid) {
1598 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
1599 gpmc_context.cs_context[i].config1);
1600 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
1601 gpmc_context.cs_context[i].config2);
1602 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
1603 gpmc_context.cs_context[i].config3);
1604 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
1605 gpmc_context.cs_context[i].config4);
1606 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
1607 gpmc_context.cs_context[i].config5);
1608 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
1609 gpmc_context.cs_context[i].config6);
1610 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
1611 gpmc_context.cs_context[i].config7);
1612 }
1613 }
1614 }
1615 #endif /* CONFIG_ARCH_OMAP3 */
This page took 0.066824 seconds and 5 git commands to generate.