Commit | Line | Data |
---|---|---|
a074ae38 PU |
1 | /* |
2 | * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com | |
3 | * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | */ | |
10 | #include <linux/slab.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/io.h> | |
a074ae38 PU |
15 | #include <linux/of_address.h> |
16 | #include <linux/of_device.h> | |
17 | #include <linux/of_dma.h> | |
18 | ||
42dbdcc6 PU |
19 | #define TI_XBAR_DRA7 0 |
20 | #define TI_XBAR_AM335X 1 | |
21 | ||
22 | static const struct of_device_id ti_dma_xbar_match[] = { | |
23 | { | |
24 | .compatible = "ti,dra7-dma-crossbar", | |
25 | .data = (void *)TI_XBAR_DRA7, | |
26 | }, | |
27 | { | |
28 | .compatible = "ti,am335x-edma-crossbar", | |
29 | .data = (void *)TI_XBAR_AM335X, | |
30 | }, | |
31 | {}, | |
32 | }; | |
33 | ||
34 | /* Crossbar on AM335x/AM437x family */ | |
35 | #define TI_AM335X_XBAR_LINES 64 | |
36 | ||
37 | struct ti_am335x_xbar_data { | |
38 | void __iomem *iomem; | |
39 | ||
40 | struct dma_router dmarouter; | |
41 | ||
42 | u32 xbar_events; /* maximum number of events to select in xbar */ | |
43 | u32 dma_requests; /* number of DMA requests on eDMA */ | |
44 | }; | |
45 | ||
46 | struct ti_am335x_xbar_map { | |
47 | u16 dma_line; | |
48 | u16 mux_val; | |
49 | }; | |
50 | ||
51 | static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) | |
52 | { | |
53 | writeb_relaxed(val & 0x1f, iomem + event); | |
54 | } | |
55 | ||
56 | static void ti_am335x_xbar_free(struct device *dev, void *route_data) | |
57 | { | |
58 | struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); | |
59 | struct ti_am335x_xbar_map *map = route_data; | |
60 | ||
61 | dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", | |
62 | map->mux_val, map->dma_line); | |
63 | ||
64 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); | |
65 | kfree(map); | |
66 | } | |
67 | ||
68 | static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, | |
69 | struct of_dma *ofdma) | |
70 | { | |
71 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | |
72 | struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); | |
73 | struct ti_am335x_xbar_map *map; | |
74 | ||
75 | if (dma_spec->args_count != 3) | |
76 | return ERR_PTR(-EINVAL); | |
77 | ||
78 | if (dma_spec->args[2] >= xbar->xbar_events) { | |
79 | dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", | |
80 | dma_spec->args[2]); | |
81 | return ERR_PTR(-EINVAL); | |
82 | } | |
83 | ||
84 | if (dma_spec->args[0] >= xbar->dma_requests) { | |
85 | dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", | |
86 | dma_spec->args[0]); | |
87 | return ERR_PTR(-EINVAL); | |
88 | } | |
89 | ||
90 | /* The of_node_put() will be done in the core for the node */ | |
91 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); | |
92 | if (!dma_spec->np) { | |
93 | dev_err(&pdev->dev, "Can't get DMA master\n"); | |
94 | return ERR_PTR(-EINVAL); | |
95 | } | |
96 | ||
97 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
98 | if (!map) { | |
99 | of_node_put(dma_spec->np); | |
100 | return ERR_PTR(-ENOMEM); | |
101 | } | |
102 | ||
103 | map->dma_line = (u16)dma_spec->args[0]; | |
104 | map->mux_val = (u16)dma_spec->args[2]; | |
105 | ||
106 | dma_spec->args[2] = 0; | |
107 | dma_spec->args_count = 2; | |
108 | ||
109 | dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", | |
110 | map->mux_val, map->dma_line); | |
111 | ||
112 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); | |
113 | ||
114 | return map; | |
115 | } | |
116 | ||
117 | static const struct of_device_id ti_am335x_master_match[] = { | |
118 | { .compatible = "ti,edma3-tpcc", }, | |
119 | {}, | |
120 | }; | |
121 | ||
122 | static int ti_am335x_xbar_probe(struct platform_device *pdev) | |
123 | { | |
124 | struct device_node *node = pdev->dev.of_node; | |
125 | const struct of_device_id *match; | |
126 | struct device_node *dma_node; | |
127 | struct ti_am335x_xbar_data *xbar; | |
128 | struct resource *res; | |
129 | void __iomem *iomem; | |
130 | int i, ret; | |
131 | ||
132 | if (!node) | |
133 | return -ENODEV; | |
134 | ||
135 | xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); | |
136 | if (!xbar) | |
137 | return -ENOMEM; | |
138 | ||
139 | dma_node = of_parse_phandle(node, "dma-masters", 0); | |
140 | if (!dma_node) { | |
141 | dev_err(&pdev->dev, "Can't get DMA master node\n"); | |
142 | return -ENODEV; | |
143 | } | |
144 | ||
145 | match = of_match_node(ti_am335x_master_match, dma_node); | |
146 | if (!match) { | |
147 | dev_err(&pdev->dev, "DMA master is not supported\n"); | |
148 | return -EINVAL; | |
149 | } | |
150 | ||
151 | if (of_property_read_u32(dma_node, "dma-requests", | |
152 | &xbar->dma_requests)) { | |
153 | dev_info(&pdev->dev, | |
154 | "Missing XBAR output information, using %u.\n", | |
155 | TI_AM335X_XBAR_LINES); | |
156 | xbar->dma_requests = TI_AM335X_XBAR_LINES; | |
157 | } | |
158 | of_node_put(dma_node); | |
159 | ||
160 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { | |
161 | dev_info(&pdev->dev, | |
162 | "Missing XBAR input information, using %u.\n", | |
163 | TI_AM335X_XBAR_LINES); | |
164 | xbar->xbar_events = TI_AM335X_XBAR_LINES; | |
165 | } | |
166 | ||
167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
168 | iomem = devm_ioremap_resource(&pdev->dev, res); | |
169 | if (IS_ERR(iomem)) | |
170 | return PTR_ERR(iomem); | |
171 | ||
172 | xbar->iomem = iomem; | |
173 | ||
174 | xbar->dmarouter.dev = &pdev->dev; | |
175 | xbar->dmarouter.route_free = ti_am335x_xbar_free; | |
176 | ||
177 | platform_set_drvdata(pdev, xbar); | |
178 | ||
179 | /* Reset the crossbar */ | |
180 | for (i = 0; i < xbar->dma_requests; i++) | |
181 | ti_am335x_xbar_write(xbar->iomem, i, 0); | |
182 | ||
183 | ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, | |
184 | &xbar->dmarouter); | |
185 | ||
186 | return ret; | |
187 | } | |
188 | ||
189 | /* Crossbar on DRA7xx family */ | |
190 | #define TI_DRA7_XBAR_OUTPUTS 127 | |
191 | #define TI_DRA7_XBAR_INPUTS 256 | |
a074ae38 | 192 | |
1eb995bb PU |
193 | #define TI_XBAR_EDMA_OFFSET 0 |
194 | #define TI_XBAR_SDMA_OFFSET 1 | |
195 | ||
42dbdcc6 | 196 | struct ti_dra7_xbar_data { |
a074ae38 PU |
197 | void __iomem *iomem; |
198 | ||
199 | struct dma_router dmarouter; | |
ec9bfa1e PU |
200 | struct mutex mutex; |
201 | unsigned long *dma_inuse; | |
a074ae38 PU |
202 | |
203 | u16 safe_val; /* Value to rest the crossbar lines */ | |
204 | u32 xbar_requests; /* number of DMA requests connected to XBAR */ | |
205 | u32 dma_requests; /* number of DMA requests forwarded to DMA */ | |
1eb995bb | 206 | u32 dma_offset; |
a074ae38 PU |
207 | }; |
208 | ||
42dbdcc6 | 209 | struct ti_dra7_xbar_map { |
a074ae38 PU |
210 | u16 xbar_in; |
211 | int xbar_out; | |
212 | }; | |
213 | ||
42dbdcc6 | 214 | static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) |
a074ae38 PU |
215 | { |
216 | writew_relaxed(val, iomem + (xbar * 2)); | |
217 | } | |
218 | ||
42dbdcc6 | 219 | static void ti_dra7_xbar_free(struct device *dev, void *route_data) |
a074ae38 | 220 | { |
42dbdcc6 PU |
221 | struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); |
222 | struct ti_dra7_xbar_map *map = route_data; | |
a074ae38 PU |
223 | |
224 | dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", | |
225 | map->xbar_in, map->xbar_out); | |
226 | ||
42dbdcc6 | 227 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); |
ec9bfa1e PU |
228 | mutex_lock(&xbar->mutex); |
229 | clear_bit(map->xbar_out, xbar->dma_inuse); | |
230 | mutex_unlock(&xbar->mutex); | |
a074ae38 PU |
231 | kfree(map); |
232 | } | |
233 | ||
42dbdcc6 PU |
234 | static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, |
235 | struct of_dma *ofdma) | |
a074ae38 PU |
236 | { |
237 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | |
42dbdcc6 PU |
238 | struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); |
239 | struct ti_dra7_xbar_map *map; | |
a074ae38 PU |
240 | |
241 | if (dma_spec->args[0] >= xbar->xbar_requests) { | |
242 | dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", | |
243 | dma_spec->args[0]); | |
244 | return ERR_PTR(-EINVAL); | |
245 | } | |
246 | ||
247 | /* The of_node_put() will be done in the core for the node */ | |
248 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); | |
249 | if (!dma_spec->np) { | |
250 | dev_err(&pdev->dev, "Can't get DMA master\n"); | |
251 | return ERR_PTR(-EINVAL); | |
252 | } | |
253 | ||
254 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
255 | if (!map) { | |
256 | of_node_put(dma_spec->np); | |
257 | return ERR_PTR(-ENOMEM); | |
258 | } | |
259 | ||
ec9bfa1e PU |
260 | mutex_lock(&xbar->mutex); |
261 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, | |
262 | xbar->dma_requests); | |
263 | mutex_unlock(&xbar->mutex); | |
264 | if (map->xbar_out == xbar->dma_requests) { | |
265 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | |
266 | kfree(map); | |
267 | return ERR_PTR(-ENOMEM); | |
268 | } | |
269 | set_bit(map->xbar_out, xbar->dma_inuse); | |
270 | ||
a074ae38 PU |
271 | map->xbar_in = (u16)dma_spec->args[0]; |
272 | ||
1eb995bb | 273 | dma_spec->args[0] = map->xbar_out + xbar->dma_offset; |
a074ae38 PU |
274 | |
275 | dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", | |
276 | map->xbar_in, map->xbar_out); | |
277 | ||
42dbdcc6 | 278 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); |
a074ae38 PU |
279 | |
280 | return map; | |
281 | } | |
282 | ||
42dbdcc6 | 283 | static const struct of_device_id ti_dra7_master_match[] = { |
1eb995bb PU |
284 | { |
285 | .compatible = "ti,omap4430-sdma", | |
286 | .data = (void *)TI_XBAR_SDMA_OFFSET, | |
287 | }, | |
288 | { | |
289 | .compatible = "ti,edma3", | |
290 | .data = (void *)TI_XBAR_EDMA_OFFSET, | |
291 | }, | |
2adb2743 PU |
292 | { |
293 | .compatible = "ti,edma3-tpcc", | |
294 | .data = (void *)TI_XBAR_EDMA_OFFSET, | |
295 | }, | |
1eb995bb PU |
296 | {}, |
297 | }; | |
298 | ||
0f73f3e8 PU |
299 | static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) |
300 | { | |
301 | for (; len > 0; len--) | |
302 | clear_bit(offset + (len - 1), p); | |
303 | } | |
304 | ||
42dbdcc6 | 305 | static int ti_dra7_xbar_probe(struct platform_device *pdev) |
a074ae38 PU |
306 | { |
307 | struct device_node *node = pdev->dev.of_node; | |
1eb995bb | 308 | const struct of_device_id *match; |
a074ae38 | 309 | struct device_node *dma_node; |
42dbdcc6 | 310 | struct ti_dra7_xbar_data *xbar; |
0f73f3e8 | 311 | struct property *prop; |
a074ae38 PU |
312 | struct resource *res; |
313 | u32 safe_val; | |
0f73f3e8 | 314 | size_t sz; |
a074ae38 PU |
315 | void __iomem *iomem; |
316 | int i, ret; | |
317 | ||
318 | if (!node) | |
319 | return -ENODEV; | |
320 | ||
321 | xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); | |
322 | if (!xbar) | |
323 | return -ENOMEM; | |
324 | ||
325 | dma_node = of_parse_phandle(node, "dma-masters", 0); | |
326 | if (!dma_node) { | |
327 | dev_err(&pdev->dev, "Can't get DMA master node\n"); | |
328 | return -ENODEV; | |
329 | } | |
330 | ||
42dbdcc6 | 331 | match = of_match_node(ti_dra7_master_match, dma_node); |
1eb995bb PU |
332 | if (!match) { |
333 | dev_err(&pdev->dev, "DMA master is not supported\n"); | |
334 | return -EINVAL; | |
335 | } | |
336 | ||
a074ae38 PU |
337 | if (of_property_read_u32(dma_node, "dma-requests", |
338 | &xbar->dma_requests)) { | |
339 | dev_info(&pdev->dev, | |
340 | "Missing XBAR output information, using %u.\n", | |
42dbdcc6 PU |
341 | TI_DRA7_XBAR_OUTPUTS); |
342 | xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; | |
a074ae38 PU |
343 | } |
344 | of_node_put(dma_node); | |
345 | ||
ec9bfa1e PU |
346 | xbar->dma_inuse = devm_kcalloc(&pdev->dev, |
347 | BITS_TO_LONGS(xbar->dma_requests), | |
348 | sizeof(unsigned long), GFP_KERNEL); | |
349 | if (!xbar->dma_inuse) | |
350 | return -ENOMEM; | |
351 | ||
a074ae38 PU |
352 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { |
353 | dev_info(&pdev->dev, | |
354 | "Missing XBAR input information, using %u.\n", | |
42dbdcc6 PU |
355 | TI_DRA7_XBAR_INPUTS); |
356 | xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; | |
a074ae38 PU |
357 | } |
358 | ||
359 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) | |
360 | xbar->safe_val = (u16)safe_val; | |
361 | ||
0f73f3e8 PU |
362 | |
363 | prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); | |
364 | if (prop) { | |
365 | const char pname[] = "ti,reserved-dma-request-ranges"; | |
366 | u32 (*rsv_events)[2]; | |
367 | size_t nelm = sz / sizeof(*rsv_events); | |
368 | int i; | |
369 | ||
370 | if (!nelm) | |
371 | return -EINVAL; | |
372 | ||
373 | rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); | |
374 | if (!rsv_events) | |
375 | return -ENOMEM; | |
376 | ||
377 | ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, | |
378 | nelm * 2); | |
379 | if (ret) | |
380 | return ret; | |
381 | ||
382 | for (i = 0; i < nelm; i++) { | |
383 | ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], | |
384 | xbar->dma_inuse); | |
385 | } | |
386 | kfree(rsv_events); | |
387 | } | |
388 | ||
a074ae38 | 389 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
a074ae38 | 390 | iomem = devm_ioremap_resource(&pdev->dev, res); |
28eb232f AL |
391 | if (IS_ERR(iomem)) |
392 | return PTR_ERR(iomem); | |
a074ae38 PU |
393 | |
394 | xbar->iomem = iomem; | |
395 | ||
396 | xbar->dmarouter.dev = &pdev->dev; | |
42dbdcc6 | 397 | xbar->dmarouter.route_free = ti_dra7_xbar_free; |
1eb995bb | 398 | xbar->dma_offset = (u32)match->data; |
a074ae38 | 399 | |
ec9bfa1e | 400 | mutex_init(&xbar->mutex); |
a074ae38 PU |
401 | platform_set_drvdata(pdev, xbar); |
402 | ||
403 | /* Reset the crossbar */ | |
0f73f3e8 PU |
404 | for (i = 0; i < xbar->dma_requests; i++) { |
405 | if (!test_bit(i, xbar->dma_inuse)) | |
406 | ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); | |
407 | } | |
a074ae38 | 408 | |
42dbdcc6 | 409 | ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, |
a074ae38 PU |
410 | &xbar->dmarouter); |
411 | if (ret) { | |
412 | /* Restore the defaults for the crossbar */ | |
0f73f3e8 PU |
413 | for (i = 0; i < xbar->dma_requests; i++) { |
414 | if (!test_bit(i, xbar->dma_inuse)) | |
415 | ti_dra7_xbar_write(xbar->iomem, i, i); | |
416 | } | |
a074ae38 PU |
417 | } |
418 | ||
419 | return ret; | |
420 | } | |
421 | ||
42dbdcc6 PU |
422 | static int ti_dma_xbar_probe(struct platform_device *pdev) |
423 | { | |
424 | const struct of_device_id *match; | |
425 | int ret; | |
426 | ||
427 | match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); | |
428 | if (unlikely(!match)) | |
429 | return -EINVAL; | |
430 | ||
431 | switch ((u32)match->data) { | |
432 | case TI_XBAR_DRA7: | |
433 | ret = ti_dra7_xbar_probe(pdev); | |
434 | break; | |
435 | case TI_XBAR_AM335X: | |
436 | ret = ti_am335x_xbar_probe(pdev); | |
437 | break; | |
438 | default: | |
439 | dev_err(&pdev->dev, "Unsupported crossbar\n"); | |
440 | ret = -ENODEV; | |
441 | break; | |
442 | } | |
443 | ||
444 | return ret; | |
445 | } | |
a074ae38 PU |
446 | |
447 | static struct platform_driver ti_dma_xbar_driver = { | |
448 | .driver = { | |
449 | .name = "ti-dma-crossbar", | |
450 | .of_match_table = of_match_ptr(ti_dma_xbar_match), | |
451 | }, | |
452 | .probe = ti_dma_xbar_probe, | |
453 | }; | |
454 | ||
455 | int omap_dmaxbar_init(void) | |
456 | { | |
457 | return platform_driver_register(&ti_dma_xbar_driver); | |
458 | } | |
459 | arch_initcall(omap_dmaxbar_init); |