Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux...
[deliverable/linux.git] / drivers / gpio / gpio-pxa.c
1 /*
2 * linux/arch/arm/plat-pxa/gpio.c
3 *
4 * Generic PXA GPIO handling
5 *
6 * Author: Nicolas Pitre
7 * Created: Jun 15, 2001
8 * Copyright: MontaVista Software Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/gpio.h>
17 #include <linux/gpio-pxa.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/io.h>
21 #include <linux/platform_device.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/slab.h>
24
25 #include <mach/irqs.h>
26
27 /*
28 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
29 * one set of registers. The register offsets are organized below:
30 *
31 * GPLR GPDR GPSR GPCR GRER GFER GEDR
32 * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
33 * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
34 * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
35 *
36 * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
37 * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
38 * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
39 *
40 * NOTE:
41 * BANK 3 is only available on PXA27x and later processors.
42 * BANK 4 and 5 are only available on PXA935
43 */
44
45 #define GPLR_OFFSET 0x00
46 #define GPDR_OFFSET 0x0C
47 #define GPSR_OFFSET 0x18
48 #define GPCR_OFFSET 0x24
49 #define GRER_OFFSET 0x30
50 #define GFER_OFFSET 0x3C
51 #define GEDR_OFFSET 0x48
52 #define GAFR_OFFSET 0x54
53 #define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
54
55 #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
56
57 int pxa_last_gpio;
58
59 struct pxa_gpio_chip {
60 struct gpio_chip chip;
61 void __iomem *regbase;
62 char label[10];
63
64 unsigned long irq_mask;
65 unsigned long irq_edge_rise;
66 unsigned long irq_edge_fall;
67
68 #ifdef CONFIG_PM
69 unsigned long saved_gplr;
70 unsigned long saved_gpdr;
71 unsigned long saved_grer;
72 unsigned long saved_gfer;
73 #endif
74 };
75
76 enum {
77 PXA25X_GPIO = 0,
78 PXA26X_GPIO,
79 PXA27X_GPIO,
80 PXA3XX_GPIO,
81 PXA93X_GPIO,
82 MMP_GPIO = 0x10,
83 MMP2_GPIO,
84 };
85
86 static DEFINE_SPINLOCK(gpio_lock);
87 static struct pxa_gpio_chip *pxa_gpio_chips;
88 static int gpio_type;
89 static void __iomem *gpio_reg_base;
90
91 #define for_each_gpio_chip(i, c) \
92 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
93
94 static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
95 {
96 return container_of(c, struct pxa_gpio_chip, chip)->regbase;
97 }
98
99 static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
100 {
101 return &pxa_gpio_chips[gpio_to_bank(gpio)];
102 }
103
104 static inline int gpio_is_pxa_type(int type)
105 {
106 return (type & MMP_GPIO) == 0;
107 }
108
109 static inline int gpio_is_mmp_type(int type)
110 {
111 return (type & MMP_GPIO) != 0;
112 }
113
114 /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
115 * as well as their Alternate Function value being '1' for GPIO in GAFRx.
116 */
117 static inline int __gpio_is_inverted(int gpio)
118 {
119 if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
120 return 1;
121 return 0;
122 }
123
124 /*
125 * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
126 * function of a GPIO, and GPDRx cannot be altered once configured. It
127 * is attributed as "occupied" here (I know this terminology isn't
128 * accurate, you are welcome to propose a better one :-)
129 */
130 static inline int __gpio_is_occupied(unsigned gpio)
131 {
132 struct pxa_gpio_chip *pxachip;
133 void __iomem *base;
134 unsigned long gafr = 0, gpdr = 0;
135 int ret, af = 0, dir = 0;
136
137 pxachip = gpio_to_pxachip(gpio);
138 base = gpio_chip_base(&pxachip->chip);
139 gpdr = readl_relaxed(base + GPDR_OFFSET);
140
141 switch (gpio_type) {
142 case PXA25X_GPIO:
143 case PXA26X_GPIO:
144 case PXA27X_GPIO:
145 gafr = readl_relaxed(base + GAFR_OFFSET);
146 af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
147 dir = gpdr & GPIO_bit(gpio);
148
149 if (__gpio_is_inverted(gpio))
150 ret = (af != 1) || (dir == 0);
151 else
152 ret = (af != 0) || (dir != 0);
153 break;
154 default:
155 ret = gpdr & GPIO_bit(gpio);
156 break;
157 }
158 return ret;
159 }
160
161 #ifdef CONFIG_ARCH_PXA
162 static inline int __pxa_gpio_to_irq(int gpio)
163 {
164 if (gpio_is_pxa_type(gpio_type))
165 return PXA_GPIO_TO_IRQ(gpio);
166 return -1;
167 }
168
169 static inline int __pxa_irq_to_gpio(int irq)
170 {
171 if (gpio_is_pxa_type(gpio_type))
172 return irq - PXA_GPIO_TO_IRQ(0);
173 return -1;
174 }
175 #else
176 static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
177 static inline int __pxa_irq_to_gpio(int irq) { return -1; }
178 #endif
179
180 #ifdef CONFIG_ARCH_MMP
181 static inline int __mmp_gpio_to_irq(int gpio)
182 {
183 if (gpio_is_mmp_type(gpio_type))
184 return MMP_GPIO_TO_IRQ(gpio);
185 return -1;
186 }
187
188 static inline int __mmp_irq_to_gpio(int irq)
189 {
190 if (gpio_is_mmp_type(gpio_type))
191 return irq - MMP_GPIO_TO_IRQ(0);
192 return -1;
193 }
194 #else
195 static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
196 static inline int __mmp_irq_to_gpio(int irq) { return -1; }
197 #endif
198
199 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
200 {
201 int gpio, ret;
202
203 gpio = chip->base + offset;
204 ret = __pxa_gpio_to_irq(gpio);
205 if (ret >= 0)
206 return ret;
207 return __mmp_gpio_to_irq(gpio);
208 }
209
210 int pxa_irq_to_gpio(int irq)
211 {
212 int ret;
213
214 ret = __pxa_irq_to_gpio(irq);
215 if (ret >= 0)
216 return ret;
217 return __mmp_irq_to_gpio(irq);
218 }
219
220 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
221 {
222 void __iomem *base = gpio_chip_base(chip);
223 uint32_t value, mask = 1 << offset;
224 unsigned long flags;
225
226 spin_lock_irqsave(&gpio_lock, flags);
227
228 value = readl_relaxed(base + GPDR_OFFSET);
229 if (__gpio_is_inverted(chip->base + offset))
230 value |= mask;
231 else
232 value &= ~mask;
233 writel_relaxed(value, base + GPDR_OFFSET);
234
235 spin_unlock_irqrestore(&gpio_lock, flags);
236 return 0;
237 }
238
239 static int pxa_gpio_direction_output(struct gpio_chip *chip,
240 unsigned offset, int value)
241 {
242 void __iomem *base = gpio_chip_base(chip);
243 uint32_t tmp, mask = 1 << offset;
244 unsigned long flags;
245
246 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
247
248 spin_lock_irqsave(&gpio_lock, flags);
249
250 tmp = readl_relaxed(base + GPDR_OFFSET);
251 if (__gpio_is_inverted(chip->base + offset))
252 tmp &= ~mask;
253 else
254 tmp |= mask;
255 writel_relaxed(tmp, base + GPDR_OFFSET);
256
257 spin_unlock_irqrestore(&gpio_lock, flags);
258 return 0;
259 }
260
261 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
262 {
263 return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
264 }
265
266 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
267 {
268 writel_relaxed(1 << offset, gpio_chip_base(chip) +
269 (value ? GPSR_OFFSET : GPCR_OFFSET));
270 }
271
272 static int __devinit pxa_init_gpio_chip(int gpio_end)
273 {
274 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
275 struct pxa_gpio_chip *chips;
276
277 chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
278 if (chips == NULL) {
279 pr_err("%s: failed to allocate GPIO chips\n", __func__);
280 return -ENOMEM;
281 }
282
283 for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
284 struct gpio_chip *c = &chips[i].chip;
285
286 sprintf(chips[i].label, "gpio-%d", i);
287 chips[i].regbase = gpio_reg_base + BANK_OFF(i);
288
289 c->base = gpio;
290 c->label = chips[i].label;
291
292 c->direction_input = pxa_gpio_direction_input;
293 c->direction_output = pxa_gpio_direction_output;
294 c->get = pxa_gpio_get;
295 c->set = pxa_gpio_set;
296 c->to_irq = pxa_gpio_to_irq;
297
298 /* number of GPIOs on last bank may be less than 32 */
299 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
300 gpiochip_add(c);
301 }
302 pxa_gpio_chips = chips;
303 return 0;
304 }
305
306 /* Update only those GRERx and GFERx edge detection register bits if those
307 * bits are set in c->irq_mask
308 */
309 static inline void update_edge_detect(struct pxa_gpio_chip *c)
310 {
311 uint32_t grer, gfer;
312
313 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
314 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
315 grer |= c->irq_edge_rise & c->irq_mask;
316 gfer |= c->irq_edge_fall & c->irq_mask;
317 writel_relaxed(grer, c->regbase + GRER_OFFSET);
318 writel_relaxed(gfer, c->regbase + GFER_OFFSET);
319 }
320
321 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
322 {
323 struct pxa_gpio_chip *c;
324 int gpio = pxa_irq_to_gpio(d->irq);
325 unsigned long gpdr, mask = GPIO_bit(gpio);
326
327 c = gpio_to_pxachip(gpio);
328
329 if (type == IRQ_TYPE_PROBE) {
330 /* Don't mess with enabled GPIOs using preconfigured edges or
331 * GPIOs set to alternate function or to output during probe
332 */
333 if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
334 return 0;
335
336 if (__gpio_is_occupied(gpio))
337 return 0;
338
339 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
340 }
341
342 gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
343
344 if (__gpio_is_inverted(gpio))
345 writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET);
346 else
347 writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
348
349 if (type & IRQ_TYPE_EDGE_RISING)
350 c->irq_edge_rise |= mask;
351 else
352 c->irq_edge_rise &= ~mask;
353
354 if (type & IRQ_TYPE_EDGE_FALLING)
355 c->irq_edge_fall |= mask;
356 else
357 c->irq_edge_fall &= ~mask;
358
359 update_edge_detect(c);
360
361 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
362 ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""),
363 ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
364 return 0;
365 }
366
367 static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
368 {
369 struct pxa_gpio_chip *c;
370 int loop, gpio, gpio_base, n;
371 unsigned long gedr;
372
373 do {
374 loop = 0;
375 for_each_gpio_chip(gpio, c) {
376 gpio_base = c->chip.base;
377
378 gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
379 gedr = gedr & c->irq_mask;
380 writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
381
382 n = find_first_bit(&gedr, BITS_PER_LONG);
383 while (n < BITS_PER_LONG) {
384 loop = 1;
385
386 generic_handle_irq(gpio_to_irq(gpio_base + n));
387 n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
388 }
389 }
390 } while (loop);
391 }
392
393 static void pxa_ack_muxed_gpio(struct irq_data *d)
394 {
395 int gpio = pxa_irq_to_gpio(d->irq);
396 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
397
398 writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
399 }
400
401 static void pxa_mask_muxed_gpio(struct irq_data *d)
402 {
403 int gpio = pxa_irq_to_gpio(d->irq);
404 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
405 uint32_t grer, gfer;
406
407 c->irq_mask &= ~GPIO_bit(gpio);
408
409 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
410 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
411 writel_relaxed(grer, c->regbase + GRER_OFFSET);
412 writel_relaxed(gfer, c->regbase + GFER_OFFSET);
413 }
414
415 static void pxa_unmask_muxed_gpio(struct irq_data *d)
416 {
417 int gpio = pxa_irq_to_gpio(d->irq);
418 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
419
420 c->irq_mask |= GPIO_bit(gpio);
421 update_edge_detect(c);
422 }
423
424 static struct irq_chip pxa_muxed_gpio_chip = {
425 .name = "GPIO",
426 .irq_ack = pxa_ack_muxed_gpio,
427 .irq_mask = pxa_mask_muxed_gpio,
428 .irq_unmask = pxa_unmask_muxed_gpio,
429 .irq_set_type = pxa_gpio_irq_type,
430 };
431
432 static int pxa_gpio_nums(void)
433 {
434 int count = 0;
435
436 #ifdef CONFIG_ARCH_PXA
437 if (cpu_is_pxa25x()) {
438 #ifdef CONFIG_CPU_PXA26x
439 count = 89;
440 gpio_type = PXA26X_GPIO;
441 #elif defined(CONFIG_PXA25x)
442 count = 84;
443 gpio_type = PXA26X_GPIO;
444 #endif /* CONFIG_CPU_PXA26x */
445 } else if (cpu_is_pxa27x()) {
446 count = 120;
447 gpio_type = PXA27X_GPIO;
448 } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
449 count = 191;
450 gpio_type = PXA93X_GPIO;
451 } else if (cpu_is_pxa3xx()) {
452 count = 127;
453 gpio_type = PXA3XX_GPIO;
454 }
455 #endif /* CONFIG_ARCH_PXA */
456
457 #ifdef CONFIG_ARCH_MMP
458 if (cpu_is_pxa168() || cpu_is_pxa910()) {
459 count = 127;
460 gpio_type = MMP_GPIO;
461 } else if (cpu_is_mmp2()) {
462 count = 191;
463 gpio_type = MMP2_GPIO;
464 }
465 #endif /* CONFIG_ARCH_MMP */
466 return count;
467 }
468
469 static int __devinit pxa_gpio_probe(struct platform_device *pdev)
470 {
471 struct pxa_gpio_chip *c;
472 struct resource *res;
473 struct clk *clk;
474 int gpio, irq, ret;
475 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
476
477 pxa_last_gpio = pxa_gpio_nums();
478 if (!pxa_last_gpio)
479 return -EINVAL;
480
481 irq0 = platform_get_irq_byname(pdev, "gpio0");
482 irq1 = platform_get_irq_byname(pdev, "gpio1");
483 irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
484 if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
485 || (irq_mux <= 0))
486 return -EINVAL;
487 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
488 if (!res)
489 return -EINVAL;
490 gpio_reg_base = ioremap(res->start, resource_size(res));
491 if (!gpio_reg_base)
492 return -EINVAL;
493
494 if (irq0 > 0)
495 gpio_offset = 2;
496
497 clk = clk_get(&pdev->dev, NULL);
498 if (IS_ERR(clk)) {
499 dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
500 PTR_ERR(clk));
501 iounmap(gpio_reg_base);
502 return PTR_ERR(clk);
503 }
504 ret = clk_prepare(clk);
505 if (ret) {
506 clk_put(clk);
507 iounmap(gpio_reg_base);
508 return ret;
509 }
510 ret = clk_enable(clk);
511 if (ret) {
512 clk_unprepare(clk);
513 clk_put(clk);
514 iounmap(gpio_reg_base);
515 return ret;
516 }
517
518 /* Initialize GPIO chips */
519 pxa_init_gpio_chip(pxa_last_gpio);
520
521 /* clear all GPIO edge detects */
522 for_each_gpio_chip(gpio, c) {
523 writel_relaxed(0, c->regbase + GFER_OFFSET);
524 writel_relaxed(0, c->regbase + GRER_OFFSET);
525 writel_relaxed(~0,c->regbase + GEDR_OFFSET);
526 /* unmask GPIO edge detect for AP side */
527 if (gpio_is_mmp_type(gpio_type))
528 writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
529 }
530
531 #ifdef CONFIG_ARCH_PXA
532 irq = gpio_to_irq(0);
533 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
534 handle_edge_irq);
535 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
536 irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
537
538 irq = gpio_to_irq(1);
539 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
540 handle_edge_irq);
541 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
542 irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
543 #endif
544
545 for (irq = gpio_to_irq(gpio_offset);
546 irq <= gpio_to_irq(pxa_last_gpio); irq++) {
547 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
548 handle_edge_irq);
549 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
550 }
551
552 irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
553 return 0;
554 }
555
556 static struct platform_driver pxa_gpio_driver = {
557 .probe = pxa_gpio_probe,
558 .driver = {
559 .name = "pxa-gpio",
560 },
561 };
562
563 static int __init pxa_gpio_init(void)
564 {
565 return platform_driver_register(&pxa_gpio_driver);
566 }
567 postcore_initcall(pxa_gpio_init);
568
569 #ifdef CONFIG_PM
570 static int pxa_gpio_suspend(void)
571 {
572 struct pxa_gpio_chip *c;
573 int gpio;
574
575 for_each_gpio_chip(gpio, c) {
576 c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
577 c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
578 c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
579 c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
580
581 /* Clear GPIO transition detect bits */
582 writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
583 }
584 return 0;
585 }
586
587 static void pxa_gpio_resume(void)
588 {
589 struct pxa_gpio_chip *c;
590 int gpio;
591
592 for_each_gpio_chip(gpio, c) {
593 /* restore level with set/clear */
594 writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
595 writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
596
597 writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
598 writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
599 writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
600 }
601 }
602 #else
603 #define pxa_gpio_suspend NULL
604 #define pxa_gpio_resume NULL
605 #endif
606
607 struct syscore_ops pxa_gpio_syscore_ops = {
608 .suspend = pxa_gpio_suspend,
609 .resume = pxa_gpio_resume,
610 };
611
612 static int __init pxa_gpio_sysinit(void)
613 {
614 register_syscore_ops(&pxa_gpio_syscore_ops);
615 return 0;
616 }
617 postcore_initcall(pxa_gpio_sysinit);
This page took 0.071549 seconds and 5 git commands to generate.