mfd: core: Push irqdomain mapping out into devices
[deliverable/linux.git] / drivers / mfd / mfd-core.c
1 /*
2 * drivers/mfd/mfd-core.c
3 *
4 * core MFD support
5 * Copyright (c) 2006 Ian Molton
6 * Copyright (c) 2007,2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/acpi.h>
17 #include <linux/mfd/core.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of.h>
23
24 int mfd_cell_enable(struct platform_device *pdev)
25 {
26 const struct mfd_cell *cell = mfd_get_cell(pdev);
27 int err = 0;
28
29 /* only call enable hook if the cell wasn't previously enabled */
30 if (atomic_inc_return(cell->usage_count) == 1)
31 err = cell->enable(pdev);
32
33 /* if the enable hook failed, decrement counter to allow retries */
34 if (err)
35 atomic_dec(cell->usage_count);
36
37 return err;
38 }
39 EXPORT_SYMBOL(mfd_cell_enable);
40
41 int mfd_cell_disable(struct platform_device *pdev)
42 {
43 const struct mfd_cell *cell = mfd_get_cell(pdev);
44 int err = 0;
45
46 /* only disable if no other clients are using it */
47 if (atomic_dec_return(cell->usage_count) == 0)
48 err = cell->disable(pdev);
49
50 /* if the disable hook failed, increment to allow retries */
51 if (err)
52 atomic_inc(cell->usage_count);
53
54 /* sanity check; did someone call disable too many times? */
55 WARN_ON(atomic_read(cell->usage_count) < 0);
56
57 return err;
58 }
59 EXPORT_SYMBOL(mfd_cell_disable);
60
61 static int mfd_platform_add_cell(struct platform_device *pdev,
62 const struct mfd_cell *cell)
63 {
64 if (!cell)
65 return 0;
66
67 pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
68 if (!pdev->mfd_cell)
69 return -ENOMEM;
70
71 return 0;
72 }
73
74 static int mfd_add_device(struct device *parent, int id,
75 const struct mfd_cell *cell,
76 struct resource *mem_base,
77 int irq_base, struct irq_domain *domain)
78 {
79 struct resource *res;
80 struct platform_device *pdev;
81 struct device_node *np = NULL;
82 int ret = -ENOMEM;
83 int r;
84
85 pdev = platform_device_alloc(cell->name, id + cell->id);
86 if (!pdev)
87 goto fail_alloc;
88
89 res = kzalloc(sizeof(*res) * cell->num_resources, GFP_KERNEL);
90 if (!res)
91 goto fail_device;
92
93 pdev->dev.parent = parent;
94
95 if (parent->of_node && cell->of_compatible) {
96 for_each_child_of_node(parent->of_node, np) {
97 if (of_device_is_compatible(np, cell->of_compatible)) {
98 pdev->dev.of_node = np;
99 break;
100 }
101 }
102 }
103
104 if (cell->pdata_size) {
105 ret = platform_device_add_data(pdev,
106 cell->platform_data, cell->pdata_size);
107 if (ret)
108 goto fail_res;
109 }
110
111 ret = mfd_platform_add_cell(pdev, cell);
112 if (ret)
113 goto fail_res;
114
115 for (r = 0; r < cell->num_resources; r++) {
116 res[r].name = cell->resources[r].name;
117 res[r].flags = cell->resources[r].flags;
118
119 /* Find out base to use */
120 if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
121 res[r].parent = mem_base;
122 res[r].start = mem_base->start +
123 cell->resources[r].start;
124 res[r].end = mem_base->start +
125 cell->resources[r].end;
126 } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
127 if (domain) {
128 /* Unable to create mappings for IRQ ranges. */
129 WARN_ON(cell->resources[r].start !=
130 cell->resources[r].end);
131 res[r].start = res[r].end = irq_create_mapping(
132 domain, cell->resources[r].start);
133 } else {
134 res[r].start = irq_base +
135 cell->resources[r].start;
136 res[r].end = irq_base +
137 cell->resources[r].end;
138 }
139 } else {
140 res[r].parent = cell->resources[r].parent;
141 res[r].start = cell->resources[r].start;
142 res[r].end = cell->resources[r].end;
143 }
144
145 if (!cell->ignore_resource_conflicts) {
146 ret = acpi_check_resource_conflict(&res[r]);
147 if (ret)
148 goto fail_res;
149 }
150 }
151
152 ret = platform_device_add_resources(pdev, res, cell->num_resources);
153 if (ret)
154 goto fail_res;
155
156 ret = platform_device_add(pdev);
157 if (ret)
158 goto fail_res;
159
160 if (cell->pm_runtime_no_callbacks)
161 pm_runtime_no_callbacks(&pdev->dev);
162
163 kfree(res);
164
165 return 0;
166
167 fail_res:
168 kfree(res);
169 fail_device:
170 platform_device_put(pdev);
171 fail_alloc:
172 return ret;
173 }
174
175 int mfd_add_devices(struct device *parent, int id,
176 struct mfd_cell *cells, int n_devs,
177 struct resource *mem_base,
178 int irq_base, struct irq_domain *domain)
179 {
180 int i;
181 int ret = 0;
182 atomic_t *cnts;
183
184 /* initialize reference counting for all cells */
185 cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL);
186 if (!cnts)
187 return -ENOMEM;
188
189 for (i = 0; i < n_devs; i++) {
190 atomic_set(&cnts[i], 0);
191 cells[i].usage_count = &cnts[i];
192 ret = mfd_add_device(parent, id, cells + i, mem_base,
193 irq_base, domain);
194 if (ret)
195 break;
196 }
197
198 if (ret)
199 mfd_remove_devices(parent);
200
201 return ret;
202 }
203 EXPORT_SYMBOL(mfd_add_devices);
204
205 static int mfd_remove_devices_fn(struct device *dev, void *c)
206 {
207 struct platform_device *pdev = to_platform_device(dev);
208 const struct mfd_cell *cell = mfd_get_cell(pdev);
209 atomic_t **usage_count = c;
210
211 /* find the base address of usage_count pointers (for freeing) */
212 if (!*usage_count || (cell->usage_count < *usage_count))
213 *usage_count = cell->usage_count;
214
215 platform_device_unregister(pdev);
216 return 0;
217 }
218
219 void mfd_remove_devices(struct device *parent)
220 {
221 atomic_t *cnts = NULL;
222
223 device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
224 kfree(cnts);
225 }
226 EXPORT_SYMBOL(mfd_remove_devices);
227
228 int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
229 {
230 struct mfd_cell cell_entry;
231 struct device *dev;
232 struct platform_device *pdev;
233 int i;
234
235 /* fetch the parent cell's device (should already be registered!) */
236 dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
237 if (!dev) {
238 printk(KERN_ERR "failed to find device for cell %s\n", cell);
239 return -ENODEV;
240 }
241 pdev = to_platform_device(dev);
242 memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
243
244 WARN_ON(!cell_entry.enable);
245
246 for (i = 0; i < n_clones; i++) {
247 cell_entry.name = clones[i];
248 /* don't give up if a single call fails; just report error */
249 if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0,
250 NULL))
251 dev_err(dev, "failed to create platform device '%s'\n",
252 clones[i]);
253 }
254
255 return 0;
256 }
257 EXPORT_SYMBOL(mfd_clone_cell);
258
259 MODULE_LICENSE("GPL");
260 MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
This page took 0.095783 seconds and 5 git commands to generate.