Commit | Line | Data |
---|---|---|
6b884a8d | 1 | |
5019f0b1 | 2 | #include <linux/device.h> |
6b884a8d GL |
3 | #include <linux/io.h> |
4 | #include <linux/ioport.h> | |
dbbdee94 | 5 | #include <linux/module.h> |
6b884a8d | 6 | #include <linux/of_address.h> |
dbbdee94 | 7 | #include <linux/pci_regs.h> |
41f8bba7 LD |
8 | #include <linux/sizes.h> |
9 | #include <linux/slab.h> | |
dbbdee94 | 10 | #include <linux/string.h> |
6b884a8d | 11 | |
dbbdee94 GL |
12 | /* Max address size we deal with */ |
13 | #define OF_MAX_ADDR_CELLS 4 | |
5d61b165 SW |
14 | #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) |
15 | #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) | |
dbbdee94 GL |
16 | |
17 | static struct of_bus *of_match_bus(struct device_node *np); | |
0131d897 SAS |
18 | static int __of_address_to_resource(struct device_node *dev, |
19 | const __be32 *addrp, u64 size, unsigned int flags, | |
35f3da32 | 20 | const char *name, struct resource *r); |
dbbdee94 GL |
21 | |
22 | /* Debug utility */ | |
23 | #ifdef DEBUG | |
0131d897 | 24 | static void of_dump_addr(const char *s, const __be32 *addr, int na) |
dbbdee94 GL |
25 | { |
26 | printk(KERN_DEBUG "%s", s); | |
27 | while (na--) | |
154063a9 | 28 | printk(" %08x", be32_to_cpu(*(addr++))); |
dbbdee94 GL |
29 | printk("\n"); |
30 | } | |
31 | #else | |
0131d897 | 32 | static void of_dump_addr(const char *s, const __be32 *addr, int na) { } |
dbbdee94 GL |
33 | #endif |
34 | ||
35 | /* Callbacks for bus specific translators */ | |
36 | struct of_bus { | |
37 | const char *name; | |
38 | const char *addresses; | |
39 | int (*match)(struct device_node *parent); | |
40 | void (*count_cells)(struct device_node *child, | |
41 | int *addrc, int *sizec); | |
47b1e689 | 42 | u64 (*map)(__be32 *addr, const __be32 *range, |
dbbdee94 | 43 | int na, int ns, int pna); |
47b1e689 | 44 | int (*translate)(__be32 *addr, u64 offset, int na); |
0131d897 | 45 | unsigned int (*get_flags)(const __be32 *addr); |
dbbdee94 GL |
46 | }; |
47 | ||
48 | /* | |
49 | * Default translator (generic bus) | |
50 | */ | |
51 | ||
52 | static void of_bus_default_count_cells(struct device_node *dev, | |
53 | int *addrc, int *sizec) | |
54 | { | |
55 | if (addrc) | |
56 | *addrc = of_n_addr_cells(dev); | |
57 | if (sizec) | |
58 | *sizec = of_n_size_cells(dev); | |
59 | } | |
60 | ||
47b1e689 | 61 | static u64 of_bus_default_map(__be32 *addr, const __be32 *range, |
dbbdee94 GL |
62 | int na, int ns, int pna) |
63 | { | |
64 | u64 cp, s, da; | |
65 | ||
66 | cp = of_read_number(range, na); | |
67 | s = of_read_number(range + na + pna, ns); | |
68 | da = of_read_number(addr, na); | |
69 | ||
70 | pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n", | |
71 | (unsigned long long)cp, (unsigned long long)s, | |
72 | (unsigned long long)da); | |
73 | ||
74 | if (da < cp || da >= (cp + s)) | |
75 | return OF_BAD_ADDR; | |
76 | return da - cp; | |
77 | } | |
78 | ||
47b1e689 | 79 | static int of_bus_default_translate(__be32 *addr, u64 offset, int na) |
dbbdee94 GL |
80 | { |
81 | u64 a = of_read_number(addr, na); | |
82 | memset(addr, 0, na * 4); | |
83 | a += offset; | |
84 | if (na > 1) | |
154063a9 GL |
85 | addr[na - 2] = cpu_to_be32(a >> 32); |
86 | addr[na - 1] = cpu_to_be32(a & 0xffffffffu); | |
dbbdee94 GL |
87 | |
88 | return 0; | |
89 | } | |
90 | ||
0131d897 | 91 | static unsigned int of_bus_default_get_flags(const __be32 *addr) |
dbbdee94 GL |
92 | { |
93 | return IORESOURCE_MEM; | |
94 | } | |
95 | ||
25a31579 | 96 | #ifdef CONFIG_OF_ADDRESS_PCI |
dbbdee94 GL |
97 | /* |
98 | * PCI bus specific translator | |
99 | */ | |
100 | ||
101 | static int of_bus_pci_match(struct device_node *np) | |
102 | { | |
6dd18e46 | 103 | /* |
14e2abb7 | 104 | * "pciex" is PCI Express |
6dd18e46 BH |
105 | * "vci" is for the /chaos bridge on 1st-gen PCI powermacs |
106 | * "ht" is hypertransport | |
107 | */ | |
14e2abb7 KSS |
108 | return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || |
109 | !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); | |
dbbdee94 GL |
110 | } |
111 | ||
112 | static void of_bus_pci_count_cells(struct device_node *np, | |
113 | int *addrc, int *sizec) | |
114 | { | |
115 | if (addrc) | |
116 | *addrc = 3; | |
117 | if (sizec) | |
118 | *sizec = 2; | |
119 | } | |
120 | ||
0131d897 | 121 | static unsigned int of_bus_pci_get_flags(const __be32 *addr) |
dbbdee94 GL |
122 | { |
123 | unsigned int flags = 0; | |
0131d897 | 124 | u32 w = be32_to_cpup(addr); |
dbbdee94 GL |
125 | |
126 | switch((w >> 24) & 0x03) { | |
127 | case 0x01: | |
128 | flags |= IORESOURCE_IO; | |
129 | break; | |
130 | case 0x02: /* 32 bits */ | |
131 | case 0x03: /* 64 bits */ | |
132 | flags |= IORESOURCE_MEM; | |
133 | break; | |
134 | } | |
135 | if (w & 0x40000000) | |
136 | flags |= IORESOURCE_PREFETCH; | |
137 | return flags; | |
138 | } | |
139 | ||
47b1e689 | 140 | static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, |
0131d897 | 141 | int pna) |
dbbdee94 GL |
142 | { |
143 | u64 cp, s, da; | |
144 | unsigned int af, rf; | |
145 | ||
146 | af = of_bus_pci_get_flags(addr); | |
147 | rf = of_bus_pci_get_flags(range); | |
148 | ||
149 | /* Check address type match */ | |
150 | if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) | |
151 | return OF_BAD_ADDR; | |
152 | ||
153 | /* Read address values, skipping high cell */ | |
154 | cp = of_read_number(range + 1, na - 1); | |
155 | s = of_read_number(range + na + pna, ns); | |
156 | da = of_read_number(addr + 1, na - 1); | |
157 | ||
158 | pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n", | |
159 | (unsigned long long)cp, (unsigned long long)s, | |
160 | (unsigned long long)da); | |
161 | ||
162 | if (da < cp || da >= (cp + s)) | |
163 | return OF_BAD_ADDR; | |
164 | return da - cp; | |
165 | } | |
166 | ||
47b1e689 | 167 | static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) |
dbbdee94 GL |
168 | { |
169 | return of_bus_default_translate(addr + 1, offset, na - 1); | |
170 | } | |
25a31579 | 171 | #endif /* CONFIG_OF_ADDRESS_PCI */ |
dbbdee94 | 172 | |
25a31579 | 173 | #ifdef CONFIG_PCI |
0131d897 | 174 | const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, |
dbbdee94 GL |
175 | unsigned int *flags) |
176 | { | |
a9fadeef | 177 | const __be32 *prop; |
dbbdee94 GL |
178 | unsigned int psize; |
179 | struct device_node *parent; | |
180 | struct of_bus *bus; | |
181 | int onesize, i, na, ns; | |
182 | ||
183 | /* Get parent & match bus type */ | |
184 | parent = of_get_parent(dev); | |
185 | if (parent == NULL) | |
186 | return NULL; | |
187 | bus = of_match_bus(parent); | |
188 | if (strcmp(bus->name, "pci")) { | |
189 | of_node_put(parent); | |
190 | return NULL; | |
191 | } | |
192 | bus->count_cells(dev, &na, &ns); | |
193 | of_node_put(parent); | |
5d61b165 | 194 | if (!OF_CHECK_ADDR_COUNT(na)) |
dbbdee94 GL |
195 | return NULL; |
196 | ||
197 | /* Get "reg" or "assigned-addresses" property */ | |
198 | prop = of_get_property(dev, bus->addresses, &psize); | |
199 | if (prop == NULL) | |
200 | return NULL; | |
201 | psize /= 4; | |
202 | ||
203 | onesize = na + ns; | |
154063a9 GL |
204 | for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { |
205 | u32 val = be32_to_cpu(prop[0]); | |
206 | if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { | |
dbbdee94 GL |
207 | if (size) |
208 | *size = of_read_number(prop + na, ns); | |
209 | if (flags) | |
210 | *flags = bus->get_flags(prop); | |
211 | return prop; | |
212 | } | |
154063a9 | 213 | } |
dbbdee94 GL |
214 | return NULL; |
215 | } | |
216 | EXPORT_SYMBOL(of_get_pci_address); | |
217 | ||
218 | int of_pci_address_to_resource(struct device_node *dev, int bar, | |
219 | struct resource *r) | |
220 | { | |
0131d897 | 221 | const __be32 *addrp; |
dbbdee94 GL |
222 | u64 size; |
223 | unsigned int flags; | |
224 | ||
225 | addrp = of_get_pci_address(dev, bar, &size, &flags); | |
226 | if (addrp == NULL) | |
227 | return -EINVAL; | |
35f3da32 | 228 | return __of_address_to_resource(dev, addrp, size, flags, NULL, r); |
dbbdee94 GL |
229 | } |
230 | EXPORT_SYMBOL_GPL(of_pci_address_to_resource); | |
29b635c0 AM |
231 | |
232 | int of_pci_range_parser_init(struct of_pci_range_parser *parser, | |
233 | struct device_node *node) | |
234 | { | |
235 | const int na = 3, ns = 2; | |
236 | int rlen; | |
237 | ||
238 | parser->node = node; | |
239 | parser->pna = of_n_addr_cells(node); | |
240 | parser->np = parser->pna + na + ns; | |
241 | ||
242 | parser->range = of_get_property(node, "ranges", &rlen); | |
243 | if (parser->range == NULL) | |
244 | return -ENOENT; | |
245 | ||
246 | parser->end = parser->range + rlen / sizeof(__be32); | |
247 | ||
248 | return 0; | |
249 | } | |
250 | EXPORT_SYMBOL_GPL(of_pci_range_parser_init); | |
251 | ||
252 | struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, | |
253 | struct of_pci_range *range) | |
254 | { | |
255 | const int na = 3, ns = 2; | |
256 | ||
257 | if (!range) | |
258 | return NULL; | |
259 | ||
260 | if (!parser->range || parser->range + parser->np > parser->end) | |
261 | return NULL; | |
262 | ||
263 | range->pci_space = parser->range[0]; | |
264 | range->flags = of_bus_pci_get_flags(parser->range); | |
265 | range->pci_addr = of_read_number(parser->range + 1, ns); | |
266 | range->cpu_addr = of_translate_address(parser->node, | |
267 | parser->range + na); | |
268 | range->size = of_read_number(parser->range + parser->pna + na, ns); | |
269 | ||
270 | parser->range += parser->np; | |
271 | ||
272 | /* Now consume following elements while they are contiguous */ | |
273 | while (parser->range + parser->np <= parser->end) { | |
274 | u32 flags, pci_space; | |
275 | u64 pci_addr, cpu_addr, size; | |
276 | ||
277 | pci_space = be32_to_cpup(parser->range); | |
278 | flags = of_bus_pci_get_flags(parser->range); | |
279 | pci_addr = of_read_number(parser->range + 1, ns); | |
280 | cpu_addr = of_translate_address(parser->node, | |
281 | parser->range + na); | |
282 | size = of_read_number(parser->range + parser->pna + na, ns); | |
283 | ||
284 | if (flags != range->flags) | |
285 | break; | |
286 | if (pci_addr != range->pci_addr + range->size || | |
287 | cpu_addr != range->cpu_addr + range->size) | |
288 | break; | |
289 | ||
290 | range->size += size; | |
291 | parser->range += parser->np; | |
292 | } | |
293 | ||
294 | return range; | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(of_pci_range_parser_one); | |
297 | ||
0b0b0893 LD |
298 | /* |
299 | * of_pci_range_to_resource - Create a resource from an of_pci_range | |
300 | * @range: the PCI range that describes the resource | |
301 | * @np: device node where the range belongs to | |
302 | * @res: pointer to a valid resource that will be updated to | |
303 | * reflect the values contained in the range. | |
304 | * | |
305 | * Returns EINVAL if the range cannot be converted to resource. | |
306 | * | |
307 | * Note that if the range is an IO range, the resource will be converted | |
308 | * using pci_address_to_pio() which can fail if it is called too early or | |
309 | * if the range cannot be matched to any host bridge IO space (our case here). | |
310 | * To guard against that we try to register the IO range first. | |
311 | * If that fails we know that pci_address_to_pio() will do too. | |
312 | */ | |
313 | int of_pci_range_to_resource(struct of_pci_range *range, | |
314 | struct device_node *np, struct resource *res) | |
83bbde1c | 315 | { |
0b0b0893 | 316 | int err; |
83bbde1c | 317 | res->flags = range->flags; |
83bbde1c LD |
318 | res->parent = res->child = res->sibling = NULL; |
319 | res->name = np->full_name; | |
0b0b0893 LD |
320 | |
321 | if (res->flags & IORESOURCE_IO) { | |
322 | unsigned long port; | |
323 | err = pci_register_io_range(range->cpu_addr, range->size); | |
324 | if (err) | |
325 | goto invalid_range; | |
326 | port = pci_address_to_pio(range->cpu_addr); | |
327 | if (port == (unsigned long)-1) { | |
328 | err = -EINVAL; | |
329 | goto invalid_range; | |
330 | } | |
331 | res->start = port; | |
332 | } else { | |
333 | res->start = range->cpu_addr; | |
334 | } | |
335 | res->end = res->start + range->size - 1; | |
336 | return 0; | |
337 | ||
338 | invalid_range: | |
339 | res->start = (resource_size_t)OF_BAD_ADDR; | |
340 | res->end = (resource_size_t)OF_BAD_ADDR; | |
341 | return err; | |
83bbde1c | 342 | } |
dbbdee94 GL |
343 | #endif /* CONFIG_PCI */ |
344 | ||
345 | /* | |
346 | * ISA bus specific translator | |
347 | */ | |
348 | ||
349 | static int of_bus_isa_match(struct device_node *np) | |
350 | { | |
351 | return !strcmp(np->name, "isa"); | |
352 | } | |
353 | ||
354 | static void of_bus_isa_count_cells(struct device_node *child, | |
355 | int *addrc, int *sizec) | |
356 | { | |
357 | if (addrc) | |
358 | *addrc = 2; | |
359 | if (sizec) | |
360 | *sizec = 1; | |
361 | } | |
362 | ||
47b1e689 | 363 | static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, |
0131d897 | 364 | int pna) |
dbbdee94 GL |
365 | { |
366 | u64 cp, s, da; | |
367 | ||
368 | /* Check address type match */ | |
0131d897 | 369 | if ((addr[0] ^ range[0]) & cpu_to_be32(1)) |
dbbdee94 GL |
370 | return OF_BAD_ADDR; |
371 | ||
372 | /* Read address values, skipping high cell */ | |
373 | cp = of_read_number(range + 1, na - 1); | |
374 | s = of_read_number(range + na + pna, ns); | |
375 | da = of_read_number(addr + 1, na - 1); | |
376 | ||
377 | pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n", | |
378 | (unsigned long long)cp, (unsigned long long)s, | |
379 | (unsigned long long)da); | |
380 | ||
381 | if (da < cp || da >= (cp + s)) | |
382 | return OF_BAD_ADDR; | |
383 | return da - cp; | |
384 | } | |
385 | ||
47b1e689 | 386 | static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) |
dbbdee94 GL |
387 | { |
388 | return of_bus_default_translate(addr + 1, offset, na - 1); | |
389 | } | |
390 | ||
0131d897 | 391 | static unsigned int of_bus_isa_get_flags(const __be32 *addr) |
dbbdee94 GL |
392 | { |
393 | unsigned int flags = 0; | |
0131d897 | 394 | u32 w = be32_to_cpup(addr); |
dbbdee94 GL |
395 | |
396 | if (w & 1) | |
397 | flags |= IORESOURCE_IO; | |
398 | else | |
399 | flags |= IORESOURCE_MEM; | |
400 | return flags; | |
401 | } | |
402 | ||
403 | /* | |
404 | * Array of bus specific translators | |
405 | */ | |
406 | ||
407 | static struct of_bus of_busses[] = { | |
25a31579 | 408 | #ifdef CONFIG_OF_ADDRESS_PCI |
dbbdee94 GL |
409 | /* PCI */ |
410 | { | |
411 | .name = "pci", | |
412 | .addresses = "assigned-addresses", | |
413 | .match = of_bus_pci_match, | |
414 | .count_cells = of_bus_pci_count_cells, | |
415 | .map = of_bus_pci_map, | |
416 | .translate = of_bus_pci_translate, | |
417 | .get_flags = of_bus_pci_get_flags, | |
418 | }, | |
25a31579 | 419 | #endif /* CONFIG_OF_ADDRESS_PCI */ |
dbbdee94 GL |
420 | /* ISA */ |
421 | { | |
422 | .name = "isa", | |
423 | .addresses = "reg", | |
424 | .match = of_bus_isa_match, | |
425 | .count_cells = of_bus_isa_count_cells, | |
426 | .map = of_bus_isa_map, | |
427 | .translate = of_bus_isa_translate, | |
428 | .get_flags = of_bus_isa_get_flags, | |
429 | }, | |
430 | /* Default */ | |
431 | { | |
432 | .name = "default", | |
433 | .addresses = "reg", | |
434 | .match = NULL, | |
435 | .count_cells = of_bus_default_count_cells, | |
436 | .map = of_bus_default_map, | |
437 | .translate = of_bus_default_translate, | |
438 | .get_flags = of_bus_default_get_flags, | |
439 | }, | |
440 | }; | |
441 | ||
442 | static struct of_bus *of_match_bus(struct device_node *np) | |
443 | { | |
444 | int i; | |
445 | ||
446 | for (i = 0; i < ARRAY_SIZE(of_busses); i++) | |
447 | if (!of_busses[i].match || of_busses[i].match(np)) | |
448 | return &of_busses[i]; | |
449 | BUG(); | |
450 | return NULL; | |
451 | } | |
452 | ||
41d94893 | 453 | static int of_empty_ranges_quirk(struct device_node *np) |
746c9e9f BH |
454 | { |
455 | if (IS_ENABLED(CONFIG_PPC)) { | |
41d94893 | 456 | /* To save cycles, we cache the result for global "Mac" setting */ |
746c9e9f BH |
457 | static int quirk_state = -1; |
458 | ||
41d94893 BH |
459 | /* PA-SEMI sdc DT bug */ |
460 | if (of_device_is_compatible(np, "1682m-sdc")) | |
461 | return true; | |
462 | ||
463 | /* Make quirk cached */ | |
746c9e9f BH |
464 | if (quirk_state < 0) |
465 | quirk_state = | |
466 | of_machine_is_compatible("Power Macintosh") || | |
467 | of_machine_is_compatible("MacRISC"); | |
468 | return quirk_state; | |
469 | } | |
470 | return false; | |
471 | } | |
472 | ||
dbbdee94 | 473 | static int of_translate_one(struct device_node *parent, struct of_bus *bus, |
47b1e689 | 474 | struct of_bus *pbus, __be32 *addr, |
dbbdee94 GL |
475 | int na, int ns, int pna, const char *rprop) |
476 | { | |
0131d897 | 477 | const __be32 *ranges; |
dbbdee94 GL |
478 | unsigned int rlen; |
479 | int rone; | |
480 | u64 offset = OF_BAD_ADDR; | |
481 | ||
482 | /* Normally, an absence of a "ranges" property means we are | |
483 | * crossing a non-translatable boundary, and thus the addresses | |
484 | * below the current not cannot be converted to CPU physical ones. | |
485 | * Unfortunately, while this is very clear in the spec, it's not | |
486 | * what Apple understood, and they do have things like /uni-n or | |
487 | * /ht nodes with no "ranges" property and a lot of perfectly | |
488 | * useable mapped devices below them. Thus we treat the absence of | |
489 | * "ranges" as equivalent to an empty "ranges" property which means | |
490 | * a 1:1 translation at that level. It's up to the caller not to try | |
491 | * to translate addresses that aren't supposed to be translated in | |
492 | * the first place. --BenH. | |
3930f294 GL |
493 | * |
494 | * As far as we know, this damage only exists on Apple machines, so | |
495 | * This code is only enabled on powerpc. --gcl | |
dbbdee94 GL |
496 | */ |
497 | ranges = of_get_property(parent, rprop, &rlen); | |
41d94893 | 498 | if (ranges == NULL && !of_empty_ranges_quirk(parent)) { |
a0212ae0 | 499 | pr_debug("OF: no ranges; cannot translate\n"); |
3930f294 GL |
500 | return 1; |
501 | } | |
dbbdee94 GL |
502 | if (ranges == NULL || rlen == 0) { |
503 | offset = of_read_number(addr, na); | |
504 | memset(addr, 0, pna * 4); | |
3930f294 | 505 | pr_debug("OF: empty ranges; 1:1 translation\n"); |
dbbdee94 GL |
506 | goto finish; |
507 | } | |
508 | ||
509 | pr_debug("OF: walking ranges...\n"); | |
510 | ||
511 | /* Now walk through the ranges */ | |
512 | rlen /= 4; | |
513 | rone = na + pna + ns; | |
514 | for (; rlen >= rone; rlen -= rone, ranges += rone) { | |
515 | offset = bus->map(addr, ranges, na, ns, pna); | |
516 | if (offset != OF_BAD_ADDR) | |
517 | break; | |
518 | } | |
519 | if (offset == OF_BAD_ADDR) { | |
520 | pr_debug("OF: not found !\n"); | |
521 | return 1; | |
522 | } | |
523 | memcpy(addr, ranges + na, 4 * pna); | |
524 | ||
525 | finish: | |
526 | of_dump_addr("OF: parent translation for:", addr, pna); | |
527 | pr_debug("OF: with offset: %llx\n", (unsigned long long)offset); | |
528 | ||
529 | /* Translate it into parent bus space */ | |
530 | return pbus->translate(addr, offset, pna); | |
531 | } | |
532 | ||
533 | /* | |
534 | * Translate an address from the device-tree into a CPU physical address, | |
535 | * this walks up the tree and applies the various bus mappings on the | |
536 | * way. | |
537 | * | |
538 | * Note: We consider that crossing any level with #size-cells == 0 to mean | |
539 | * that translation is impossible (that is we are not dealing with a value | |
540 | * that can be mapped to a cpu physical address). This is not really specified | |
541 | * that way, but this is traditionally the way IBM at least do things | |
542 | */ | |
47b1e689 KP |
543 | static u64 __of_translate_address(struct device_node *dev, |
544 | const __be32 *in_addr, const char *rprop) | |
dbbdee94 GL |
545 | { |
546 | struct device_node *parent = NULL; | |
547 | struct of_bus *bus, *pbus; | |
47b1e689 | 548 | __be32 addr[OF_MAX_ADDR_CELLS]; |
dbbdee94 GL |
549 | int na, ns, pna, pns; |
550 | u64 result = OF_BAD_ADDR; | |
551 | ||
8804827b | 552 | pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev)); |
dbbdee94 GL |
553 | |
554 | /* Increase refcount at current level */ | |
555 | of_node_get(dev); | |
556 | ||
557 | /* Get parent & match bus type */ | |
558 | parent = of_get_parent(dev); | |
559 | if (parent == NULL) | |
560 | goto bail; | |
561 | bus = of_match_bus(parent); | |
562 | ||
59f5ca48 | 563 | /* Count address cells & copy address locally */ |
dbbdee94 GL |
564 | bus->count_cells(dev, &na, &ns); |
565 | if (!OF_CHECK_COUNTS(na, ns)) { | |
d9c6866b | 566 | pr_debug("OF: Bad cell count for %s\n", of_node_full_name(dev)); |
dbbdee94 GL |
567 | goto bail; |
568 | } | |
569 | memcpy(addr, in_addr, na * 4); | |
570 | ||
571 | pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n", | |
8804827b | 572 | bus->name, na, ns, of_node_full_name(parent)); |
dbbdee94 GL |
573 | of_dump_addr("OF: translating address:", addr, na); |
574 | ||
575 | /* Translate */ | |
576 | for (;;) { | |
577 | /* Switch to parent bus */ | |
578 | of_node_put(dev); | |
579 | dev = parent; | |
580 | parent = of_get_parent(dev); | |
581 | ||
582 | /* If root, we have finished */ | |
583 | if (parent == NULL) { | |
584 | pr_debug("OF: reached root node\n"); | |
585 | result = of_read_number(addr, na); | |
586 | break; | |
587 | } | |
588 | ||
589 | /* Get new parent bus and counts */ | |
590 | pbus = of_match_bus(parent); | |
591 | pbus->count_cells(dev, &pna, &pns); | |
592 | if (!OF_CHECK_COUNTS(pna, pns)) { | |
593 | printk(KERN_ERR "prom_parse: Bad cell count for %s\n", | |
0c02c800 | 594 | of_node_full_name(dev)); |
dbbdee94 GL |
595 | break; |
596 | } | |
597 | ||
598 | pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n", | |
0c02c800 | 599 | pbus->name, pna, pns, of_node_full_name(parent)); |
dbbdee94 GL |
600 | |
601 | /* Apply bus translation */ | |
602 | if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) | |
603 | break; | |
604 | ||
605 | /* Complete the move up one level */ | |
606 | na = pna; | |
607 | ns = pns; | |
608 | bus = pbus; | |
609 | ||
610 | of_dump_addr("OF: one level translation:", addr, na); | |
611 | } | |
612 | bail: | |
613 | of_node_put(parent); | |
614 | of_node_put(dev); | |
615 | ||
616 | return result; | |
617 | } | |
618 | ||
0131d897 | 619 | u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) |
dbbdee94 GL |
620 | { |
621 | return __of_translate_address(dev, in_addr, "ranges"); | |
622 | } | |
623 | EXPORT_SYMBOL(of_translate_address); | |
624 | ||
0131d897 | 625 | u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) |
dbbdee94 GL |
626 | { |
627 | return __of_translate_address(dev, in_addr, "dma-ranges"); | |
628 | } | |
629 | EXPORT_SYMBOL(of_translate_dma_address); | |
630 | ||
0131d897 | 631 | const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, |
dbbdee94 GL |
632 | unsigned int *flags) |
633 | { | |
0131d897 | 634 | const __be32 *prop; |
dbbdee94 GL |
635 | unsigned int psize; |
636 | struct device_node *parent; | |
637 | struct of_bus *bus; | |
638 | int onesize, i, na, ns; | |
639 | ||
640 | /* Get parent & match bus type */ | |
641 | parent = of_get_parent(dev); | |
642 | if (parent == NULL) | |
643 | return NULL; | |
644 | bus = of_match_bus(parent); | |
645 | bus->count_cells(dev, &na, &ns); | |
646 | of_node_put(parent); | |
5d61b165 | 647 | if (!OF_CHECK_ADDR_COUNT(na)) |
dbbdee94 GL |
648 | return NULL; |
649 | ||
650 | /* Get "reg" or "assigned-addresses" property */ | |
651 | prop = of_get_property(dev, bus->addresses, &psize); | |
652 | if (prop == NULL) | |
653 | return NULL; | |
654 | psize /= 4; | |
655 | ||
656 | onesize = na + ns; | |
657 | for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) | |
658 | if (i == index) { | |
659 | if (size) | |
660 | *size = of_read_number(prop + na, ns); | |
661 | if (flags) | |
662 | *flags = bus->get_flags(prop); | |
663 | return prop; | |
664 | } | |
665 | return NULL; | |
666 | } | |
667 | EXPORT_SYMBOL(of_get_address); | |
668 | ||
41f8bba7 LD |
669 | #ifdef PCI_IOBASE |
670 | struct io_range { | |
671 | struct list_head list; | |
672 | phys_addr_t start; | |
673 | resource_size_t size; | |
674 | }; | |
675 | ||
676 | static LIST_HEAD(io_range_list); | |
677 | static DEFINE_SPINLOCK(io_range_lock); | |
678 | #endif | |
679 | ||
680 | /* | |
681 | * Record the PCI IO range (expressed as CPU physical address + size). | |
682 | * Return a negative value if an error has occured, zero otherwise | |
683 | */ | |
684 | int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size) | |
685 | { | |
686 | int err = 0; | |
687 | ||
688 | #ifdef PCI_IOBASE | |
689 | struct io_range *range; | |
690 | resource_size_t allocated_size = 0; | |
691 | ||
692 | /* check if the range hasn't been previously recorded */ | |
693 | spin_lock(&io_range_lock); | |
694 | list_for_each_entry(range, &io_range_list, list) { | |
695 | if (addr >= range->start && addr + size <= range->start + size) { | |
696 | /* range already registered, bail out */ | |
697 | goto end_register; | |
698 | } | |
699 | allocated_size += range->size; | |
700 | } | |
701 | ||
702 | /* range not registed yet, check for available space */ | |
703 | if (allocated_size + size - 1 > IO_SPACE_LIMIT) { | |
704 | /* if it's too big check if 64K space can be reserved */ | |
705 | if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) { | |
706 | err = -E2BIG; | |
707 | goto end_register; | |
708 | } | |
709 | ||
710 | size = SZ_64K; | |
711 | pr_warn("Requested IO range too big, new size set to 64K\n"); | |
712 | } | |
713 | ||
714 | /* add the range to the list */ | |
294240ff | 715 | range = kzalloc(sizeof(*range), GFP_ATOMIC); |
41f8bba7 LD |
716 | if (!range) { |
717 | err = -ENOMEM; | |
718 | goto end_register; | |
719 | } | |
720 | ||
721 | range->start = addr; | |
722 | range->size = size; | |
723 | ||
724 | list_add_tail(&range->list, &io_range_list); | |
725 | ||
726 | end_register: | |
727 | spin_unlock(&io_range_lock); | |
728 | #endif | |
729 | ||
730 | return err; | |
731 | } | |
732 | ||
733 | phys_addr_t pci_pio_to_address(unsigned long pio) | |
734 | { | |
735 | phys_addr_t address = (phys_addr_t)OF_BAD_ADDR; | |
736 | ||
737 | #ifdef PCI_IOBASE | |
738 | struct io_range *range; | |
739 | resource_size_t allocated_size = 0; | |
740 | ||
741 | if (pio > IO_SPACE_LIMIT) | |
742 | return address; | |
743 | ||
744 | spin_lock(&io_range_lock); | |
745 | list_for_each_entry(range, &io_range_list, list) { | |
746 | if (pio >= allocated_size && pio < allocated_size + range->size) { | |
747 | address = range->start + pio - allocated_size; | |
748 | break; | |
749 | } | |
750 | allocated_size += range->size; | |
751 | } | |
752 | spin_unlock(&io_range_lock); | |
753 | #endif | |
754 | ||
755 | return address; | |
756 | } | |
757 | ||
25ff7944 RH |
758 | unsigned long __weak pci_address_to_pio(phys_addr_t address) |
759 | { | |
41f8bba7 LD |
760 | #ifdef PCI_IOBASE |
761 | struct io_range *res; | |
762 | resource_size_t offset = 0; | |
763 | unsigned long addr = -1; | |
764 | ||
765 | spin_lock(&io_range_lock); | |
766 | list_for_each_entry(res, &io_range_list, list) { | |
767 | if (address >= res->start && address < res->start + res->size) { | |
5dbb4c61 | 768 | addr = address - res->start + offset; |
41f8bba7 LD |
769 | break; |
770 | } | |
771 | offset += res->size; | |
772 | } | |
773 | spin_unlock(&io_range_lock); | |
774 | ||
775 | return addr; | |
776 | #else | |
25ff7944 RH |
777 | if (address > IO_SPACE_LIMIT) |
778 | return (unsigned long)-1; | |
779 | ||
780 | return (unsigned long) address; | |
41f8bba7 | 781 | #endif |
25ff7944 RH |
782 | } |
783 | ||
0131d897 SAS |
784 | static int __of_address_to_resource(struct device_node *dev, |
785 | const __be32 *addrp, u64 size, unsigned int flags, | |
35f3da32 | 786 | const char *name, struct resource *r) |
1f5bef30 GL |
787 | { |
788 | u64 taddr; | |
789 | ||
790 | if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) | |
791 | return -EINVAL; | |
792 | taddr = of_translate_address(dev, addrp); | |
793 | if (taddr == OF_BAD_ADDR) | |
794 | return -EINVAL; | |
795 | memset(r, 0, sizeof(struct resource)); | |
796 | if (flags & IORESOURCE_IO) { | |
797 | unsigned long port; | |
798 | port = pci_address_to_pio(taddr); | |
799 | if (port == (unsigned long)-1) | |
800 | return -EINVAL; | |
801 | r->start = port; | |
802 | r->end = port + size - 1; | |
803 | } else { | |
804 | r->start = taddr; | |
805 | r->end = taddr + size - 1; | |
806 | } | |
807 | r->flags = flags; | |
35f3da32 BC |
808 | r->name = name ? name : dev->full_name; |
809 | ||
1f5bef30 GL |
810 | return 0; |
811 | } | |
812 | ||
813 | /** | |
814 | * of_address_to_resource - Translate device tree address and return as resource | |
815 | * | |
816 | * Note that if your address is a PIO address, the conversion will fail if | |
817 | * the physical address can't be internally converted to an IO token with | |
818 | * pci_address_to_pio(), that is because it's either called to early or it | |
819 | * can't be matched to any host bridge IO space | |
820 | */ | |
821 | int of_address_to_resource(struct device_node *dev, int index, | |
822 | struct resource *r) | |
823 | { | |
0131d897 | 824 | const __be32 *addrp; |
1f5bef30 GL |
825 | u64 size; |
826 | unsigned int flags; | |
35f3da32 | 827 | const char *name = NULL; |
1f5bef30 GL |
828 | |
829 | addrp = of_get_address(dev, index, &size, &flags); | |
830 | if (addrp == NULL) | |
831 | return -EINVAL; | |
35f3da32 BC |
832 | |
833 | /* Get optional "reg-names" property to add a name to a resource */ | |
834 | of_property_read_string_index(dev, "reg-names", index, &name); | |
835 | ||
836 | return __of_address_to_resource(dev, addrp, size, flags, name, r); | |
1f5bef30 GL |
837 | } |
838 | EXPORT_SYMBOL_GPL(of_address_to_resource); | |
839 | ||
90e33f62 GL |
840 | struct device_node *of_find_matching_node_by_address(struct device_node *from, |
841 | const struct of_device_id *matches, | |
842 | u64 base_address) | |
843 | { | |
844 | struct device_node *dn = of_find_matching_node(from, matches); | |
845 | struct resource res; | |
846 | ||
847 | while (dn) { | |
848 | if (of_address_to_resource(dn, 0, &res)) | |
849 | continue; | |
850 | if (res.start == base_address) | |
851 | return dn; | |
852 | dn = of_find_matching_node(dn, matches); | |
853 | } | |
854 | ||
855 | return NULL; | |
856 | } | |
857 | ||
1f5bef30 | 858 | |
6b884a8d GL |
859 | /** |
860 | * of_iomap - Maps the memory mapped IO for a given device_node | |
861 | * @device: the device whose io range will be mapped | |
862 | * @index: index of the io range | |
863 | * | |
864 | * Returns a pointer to the mapped memory | |
865 | */ | |
866 | void __iomem *of_iomap(struct device_node *np, int index) | |
867 | { | |
868 | struct resource res; | |
869 | ||
870 | if (of_address_to_resource(np, index, &res)) | |
871 | return NULL; | |
872 | ||
28c1b6d6 | 873 | return ioremap(res.start, resource_size(&res)); |
6b884a8d GL |
874 | } |
875 | EXPORT_SYMBOL(of_iomap); | |
18308c94 | 876 | |
efd342fb MB |
877 | /* |
878 | * of_io_request_and_map - Requests a resource and maps the memory mapped IO | |
879 | * for a given device_node | |
880 | * @device: the device whose io range will be mapped | |
881 | * @index: index of the io range | |
882 | * @name: name of the resource | |
883 | * | |
884 | * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded | |
885 | * error code on failure. Usage example: | |
886 | * | |
887 | * base = of_io_request_and_map(node, 0, "foo"); | |
888 | * if (IS_ERR(base)) | |
889 | * return PTR_ERR(base); | |
890 | */ | |
891 | void __iomem *of_io_request_and_map(struct device_node *np, int index, | |
b75b276b | 892 | const char *name) |
efd342fb MB |
893 | { |
894 | struct resource res; | |
895 | void __iomem *mem; | |
896 | ||
897 | if (of_address_to_resource(np, index, &res)) | |
898 | return IOMEM_ERR_PTR(-EINVAL); | |
899 | ||
900 | if (!request_mem_region(res.start, resource_size(&res), name)) | |
901 | return IOMEM_ERR_PTR(-EBUSY); | |
902 | ||
903 | mem = ioremap(res.start, resource_size(&res)); | |
904 | if (!mem) { | |
905 | release_mem_region(res.start, resource_size(&res)); | |
906 | return IOMEM_ERR_PTR(-ENOMEM); | |
907 | } | |
908 | ||
909 | return mem; | |
910 | } | |
911 | EXPORT_SYMBOL(of_io_request_and_map); | |
912 | ||
18308c94 GS |
913 | /** |
914 | * of_dma_get_range - Get DMA range info | |
915 | * @np: device node to get DMA range info | |
916 | * @dma_addr: pointer to store initial DMA address of DMA range | |
917 | * @paddr: pointer to store initial CPU address of DMA range | |
918 | * @size: pointer to store size of DMA range | |
919 | * | |
920 | * Look in bottom up direction for the first "dma-ranges" property | |
921 | * and parse it. | |
922 | * dma-ranges format: | |
923 | * DMA addr (dma_addr) : naddr cells | |
924 | * CPU addr (phys_addr_t) : pna cells | |
925 | * size : nsize cells | |
926 | * | |
927 | * It returns -ENODEV if "dma-ranges" property was not found | |
928 | * for this device in DT. | |
929 | */ | |
930 | int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) | |
931 | { | |
932 | struct device_node *node = of_node_get(np); | |
933 | const __be32 *ranges = NULL; | |
934 | int len, naddr, nsize, pna; | |
935 | int ret = 0; | |
936 | u64 dmaaddr; | |
937 | ||
938 | if (!node) | |
939 | return -EINVAL; | |
940 | ||
941 | while (1) { | |
942 | naddr = of_n_addr_cells(node); | |
943 | nsize = of_n_size_cells(node); | |
944 | node = of_get_next_parent(node); | |
945 | if (!node) | |
946 | break; | |
947 | ||
948 | ranges = of_get_property(node, "dma-ranges", &len); | |
949 | ||
950 | /* Ignore empty ranges, they imply no translation required */ | |
951 | if (ranges && len > 0) | |
952 | break; | |
953 | ||
954 | /* | |
955 | * At least empty ranges has to be defined for parent node if | |
956 | * DMA is supported | |
957 | */ | |
958 | if (!ranges) | |
959 | break; | |
960 | } | |
961 | ||
962 | if (!ranges) { | |
963 | pr_debug("%s: no dma-ranges found for node(%s)\n", | |
964 | __func__, np->full_name); | |
965 | ret = -ENODEV; | |
966 | goto out; | |
967 | } | |
968 | ||
969 | len /= sizeof(u32); | |
970 | ||
971 | pna = of_n_addr_cells(node); | |
972 | ||
973 | /* dma-ranges format: | |
974 | * DMA addr : naddr cells | |
975 | * CPU addr : pna cells | |
976 | * size : nsize cells | |
977 | */ | |
978 | dmaaddr = of_read_number(ranges, naddr); | |
979 | *paddr = of_translate_dma_address(np, ranges); | |
980 | if (*paddr == OF_BAD_ADDR) { | |
981 | pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n", | |
982 | __func__, dma_addr, np->full_name); | |
983 | ret = -EINVAL; | |
984 | goto out; | |
985 | } | |
986 | *dma_addr = dmaaddr; | |
987 | ||
988 | *size = of_read_number(ranges + naddr + pna, nsize); | |
989 | ||
990 | pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", | |
991 | *dma_addr, *paddr, *size); | |
992 | ||
993 | out: | |
994 | of_node_put(node); | |
995 | ||
996 | return ret; | |
997 | } | |
998 | EXPORT_SYMBOL_GPL(of_dma_get_range); | |
92ea637e SS |
999 | |
1000 | /** | |
1001 | * of_dma_is_coherent - Check if device is coherent | |
1002 | * @np: device node | |
1003 | * | |
1004 | * It returns true if "dma-coherent" property was found | |
1005 | * for this device in DT. | |
1006 | */ | |
1007 | bool of_dma_is_coherent(struct device_node *np) | |
1008 | { | |
1009 | struct device_node *node = of_node_get(np); | |
1010 | ||
1011 | while (node) { | |
1012 | if (of_property_read_bool(node, "dma-coherent")) { | |
1013 | of_node_put(node); | |
1014 | return true; | |
1015 | } | |
1016 | node = of_get_next_parent(node); | |
1017 | } | |
1018 | of_node_put(node); | |
1019 | return false; | |
1020 | } | |
eb3d3ec5 | 1021 | EXPORT_SYMBOL_GPL(of_dma_is_coherent); |