regmap: Report if we actually handled an interrupt in regmap-irq
[deliverable/linux.git] / drivers / base / regmap / regmap-irq.c
CommitLineData
f8beab2b
MB
1/*
2 * regmap based irq_chip
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/export.h>
14#include <linux/regmap.h>
15#include <linux/irq.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18
19#include "internal.h"
20
21struct regmap_irq_chip_data {
22 struct mutex lock;
23
24 struct regmap *map;
25 struct regmap_irq_chip *chip;
26
27 int irq_base;
28
29 void *status_reg_buf;
30 unsigned int *status_buf;
31 unsigned int *mask_buf;
32 unsigned int *mask_buf_def;
33};
34
35static inline const
36struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
37 int irq)
38{
39 return &data->chip->irqs[irq - data->irq_base];
40}
41
42static void regmap_irq_lock(struct irq_data *data)
43{
44 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
45
46 mutex_lock(&d->lock);
47}
48
49static void regmap_irq_sync_unlock(struct irq_data *data)
50{
51 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
52 int i, ret;
53
54 /*
55 * If there's been a change in the mask write it back to the
56 * hardware. We rely on the use of the regmap core cache to
57 * suppress pointless writes.
58 */
59 for (i = 0; i < d->chip->num_regs; i++) {
60 ret = regmap_update_bits(d->map, d->chip->mask_base + i,
61 d->mask_buf_def[i], d->mask_buf[i]);
62 if (ret != 0)
63 dev_err(d->map->dev, "Failed to sync masks in %x\n",
64 d->chip->mask_base + i);
65 }
66
67 mutex_unlock(&d->lock);
68}
69
70static void regmap_irq_enable(struct irq_data *data)
71{
72 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
73 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
74
75 d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
76}
77
78static void regmap_irq_disable(struct irq_data *data)
79{
80 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
81 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
82
83 d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
84}
85
86static struct irq_chip regmap_irq_chip = {
87 .name = "regmap",
88 .irq_bus_lock = regmap_irq_lock,
89 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
90 .irq_disable = regmap_irq_disable,
91 .irq_enable = regmap_irq_enable,
92};
93
94static irqreturn_t regmap_irq_thread(int irq, void *d)
95{
96 struct regmap_irq_chip_data *data = d;
97 struct regmap_irq_chip *chip = data->chip;
98 struct regmap *map = data->map;
99 int ret, i;
100 u8 *buf8 = data->status_reg_buf;
101 u16 *buf16 = data->status_reg_buf;
102 u32 *buf32 = data->status_reg_buf;
d23511f9 103 bool handled = false;
f8beab2b
MB
104
105 ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
106 chip->num_regs);
107 if (ret != 0) {
108 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
109 return IRQ_NONE;
110 }
111
112 /*
113 * Ignore masked IRQs and ack if we need to; we ack early so
114 * there is no race between handling and acknowleding the
115 * interrupt. We assume that typically few of the interrupts
116 * will fire simultaneously so don't worry about overhead from
117 * doing a write per register.
118 */
119 for (i = 0; i < data->chip->num_regs; i++) {
120 switch (map->format.val_bytes) {
121 case 1:
122 data->status_buf[i] = buf8[i];
123 break;
124 case 2:
125 data->status_buf[i] = buf16[i];
126 break;
127 case 4:
128 data->status_buf[i] = buf32[i];
129 break;
130 default:
131 BUG();
132 return IRQ_NONE;
133 }
134
135 data->status_buf[i] &= ~data->mask_buf[i];
136
137 if (data->status_buf[i] && chip->ack_base) {
138 ret = regmap_write(map, chip->ack_base + i,
139 data->status_buf[i]);
140 if (ret != 0)
141 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
142 chip->ack_base + i, ret);
143 }
144 }
145
146 for (i = 0; i < chip->num_irqs; i++) {
147 if (data->status_buf[chip->irqs[i].reg_offset] &
148 chip->irqs[i].mask) {
149 handle_nested_irq(data->irq_base + i);
d23511f9 150 handled = true;
f8beab2b
MB
151 }
152 }
153
d23511f9
MB
154 if (handled)
155 return IRQ_HANDLED;
156 else
157 return IRQ_NONE;
f8beab2b
MB
158}
159
160/**
161 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
162 *
163 * map: The regmap for the device.
164 * irq: The IRQ the device uses to signal interrupts
165 * irq_flags: The IRQF_ flags to use for the primary interrupt.
166 * chip: Configuration for the interrupt controller.
167 * data: Runtime data structure for the controller, allocated on success
168 *
169 * Returns 0 on success or an errno on failure.
170 *
171 * In order for this to be efficient the chip really should use a
172 * register cache. The chip driver is responsible for restoring the
173 * register values used by the IRQ controller over suspend and resume.
174 */
175int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
176 int irq_base, struct regmap_irq_chip *chip,
177 struct regmap_irq_chip_data **data)
178{
179 struct regmap_irq_chip_data *d;
180 int cur_irq, i;
181 int ret = -ENOMEM;
182
183 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
184 if (irq_base < 0) {
185 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
186 irq_base);
187 return irq_base;
188 }
189
190 d = kzalloc(sizeof(*d), GFP_KERNEL);
191 if (!d)
192 return -ENOMEM;
193
194 d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
195 GFP_KERNEL);
196 if (!d->status_buf)
197 goto err_alloc;
198
199 d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
200 GFP_KERNEL);
201 if (!d->status_reg_buf)
202 goto err_alloc;
203
204 d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
205 GFP_KERNEL);
206 if (!d->mask_buf)
207 goto err_alloc;
208
209 d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
210 GFP_KERNEL);
211 if (!d->mask_buf_def)
212 goto err_alloc;
213
214 d->map = map;
215 d->chip = chip;
216 d->irq_base = irq_base;
217 mutex_init(&d->lock);
218
219 for (i = 0; i < chip->num_irqs; i++)
220 d->mask_buf_def[chip->irqs[i].reg_offset]
221 |= chip->irqs[i].mask;
222
223 /* Mask all the interrupts by default */
224 for (i = 0; i < chip->num_regs; i++) {
225 d->mask_buf[i] = d->mask_buf_def[i];
226 ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
227 if (ret != 0) {
228 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
229 chip->mask_base + i, ret);
230 goto err_alloc;
231 }
232 }
233
234 /* Register them with genirq */
235 for (cur_irq = irq_base;
236 cur_irq < chip->num_irqs + irq_base;
237 cur_irq++) {
238 irq_set_chip_data(cur_irq, d);
239 irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
240 handle_edge_irq);
241 irq_set_nested_thread(cur_irq, 1);
242
243 /* ARM needs us to explicitly flag the IRQ as valid
244 * and will set them noprobe when we do so. */
245#ifdef CONFIG_ARM
246 set_irq_flags(cur_irq, IRQF_VALID);
247#else
248 irq_set_noprobe(cur_irq);
249#endif
250 }
251
252 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
253 chip->name, d);
254 if (ret != 0) {
255 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
256 goto err_alloc;
257 }
258
259 return 0;
260
261err_alloc:
262 kfree(d->mask_buf_def);
263 kfree(d->mask_buf);
264 kfree(d->status_reg_buf);
265 kfree(d->status_buf);
266 kfree(d);
267 return ret;
268}
269EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
270
271/**
272 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
273 *
274 * @irq: Primary IRQ for the device
275 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
276 */
277void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
278{
279 if (!d)
280 return;
281
282 free_irq(irq, d);
283 kfree(d->mask_buf_def);
284 kfree(d->mask_buf);
285 kfree(d->status_reg_buf);
286 kfree(d->status_buf);
287 kfree(d);
288}
289EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
This page took 0.064053 seconds and 5 git commands to generate.