[PATCH] powerpc: Fix platinumfb for some modes
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
CommitLineData
9b6b563c
PM
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
dcee3036 32#include <linux/kexec.h>
9b6b563c
PM
33
34#include <asm/prom.h>
35#include <asm/rtas.h>
36#include <asm/lmb.h>
37#include <asm/page.h>
38#include <asm/processor.h>
39#include <asm/irq.h>
40#include <asm/io.h>
0cc4746c 41#include <asm/kdump.h>
9b6b563c
PM
42#include <asm/smp.h>
43#include <asm/system.h>
44#include <asm/mmu.h>
45#include <asm/pgtable.h>
46#include <asm/pci.h>
47#include <asm/iommu.h>
48#include <asm/btext.h>
49#include <asm/sections.h>
50#include <asm/machdep.h>
51#include <asm/pSeries_reconfig.h>
40ef8cbc 52#include <asm/pci-bridge.h>
9b6b563c
PM
53
54#ifdef DEBUG
55#define DBG(fmt...) printk(KERN_ERR fmt)
56#else
57#define DBG(fmt...)
58#endif
59
60struct pci_reg_property {
61 struct pci_address addr;
62 u32 size_hi;
63 u32 size_lo;
64};
65
66struct isa_reg_property {
67 u32 space;
68 u32 address;
69 u32 size;
70};
71
72
73typedef int interpret_func(struct device_node *, unsigned long *,
74 int, int, int);
75
9b6b563c
PM
76static int __initdata dt_root_addr_cells;
77static int __initdata dt_root_size_cells;
78
79#ifdef CONFIG_PPC64
80static int __initdata iommu_is_off;
81int __initdata iommu_force_on;
cf00a8d1 82unsigned long tce_alloc_start, tce_alloc_end;
9b6b563c
PM
83#endif
84
85typedef u32 cell_t;
86
87#if 0
88static struct boot_param_header *initial_boot_params __initdata;
89#else
90struct boot_param_header *initial_boot_params;
91#endif
92
93static struct device_node *allnodes = NULL;
94
95/* use when traversing tree through the allnext, child, sibling,
96 * or parent members of struct device_node.
97 */
98static DEFINE_RWLOCK(devtree_lock);
99
100/* export that to outside world */
101struct device_node *of_chosen;
102
103struct device_node *dflt_interrupt_controller;
104int num_interrupt_controllers;
105
9b6b563c
PM
106/*
107 * Wrapper for allocating memory for various data that needs to be
108 * attached to device nodes as they are processed at boot or when
109 * added to the device tree later (e.g. DLPAR). At boot there is
110 * already a region reserved so we just increment *mem_start by size;
111 * otherwise we call kmalloc.
112 */
113static void * prom_alloc(unsigned long size, unsigned long *mem_start)
114{
115 unsigned long tmp;
116
117 if (!mem_start)
118 return kmalloc(size, GFP_KERNEL);
119
120 tmp = *mem_start;
121 *mem_start += size;
122 return (void *)tmp;
123}
124
125/*
126 * Find the device_node with a given phandle.
127 */
128static struct device_node * find_phandle(phandle ph)
129{
130 struct device_node *np;
131
132 for (np = allnodes; np != 0; np = np->allnext)
133 if (np->linux_phandle == ph)
134 return np;
135 return NULL;
136}
137
138/*
139 * Find the interrupt parent of a node.
140 */
141static struct device_node * __devinit intr_parent(struct device_node *p)
142{
143 phandle *parp;
144
145 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
146 if (parp == NULL)
147 return p->parent;
148 p = find_phandle(*parp);
149 if (p != NULL)
150 return p;
151 /*
152 * On a powermac booted with BootX, we don't get to know the
153 * phandles for any nodes, so find_phandle will return NULL.
154 * Fortunately these machines only have one interrupt controller
155 * so there isn't in fact any ambiguity. -- paulus
156 */
157 if (num_interrupt_controllers == 1)
158 p = dflt_interrupt_controller;
159 return p;
160}
161
162/*
163 * Find out the size of each entry of the interrupts property
164 * for a node.
165 */
166int __devinit prom_n_intr_cells(struct device_node *np)
167{
168 struct device_node *p;
169 unsigned int *icp;
170
171 for (p = np; (p = intr_parent(p)) != NULL; ) {
172 icp = (unsigned int *)
173 get_property(p, "#interrupt-cells", NULL);
174 if (icp != NULL)
175 return *icp;
176 if (get_property(p, "interrupt-controller", NULL) != NULL
177 || get_property(p, "interrupt-map", NULL) != NULL) {
178 printk("oops, node %s doesn't have #interrupt-cells\n",
179 p->full_name);
180 return 1;
181 }
182 }
183#ifdef DEBUG_IRQ
184 printk("prom_n_intr_cells failed for %s\n", np->full_name);
185#endif
186 return 1;
187}
188
189/*
190 * Map an interrupt from a device up to the platform interrupt
191 * descriptor.
192 */
193static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
194 struct device_node *np, unsigned int *ints,
195 int nintrc)
196{
197 struct device_node *p, *ipar;
198 unsigned int *imap, *imask, *ip;
199 int i, imaplen, match;
200 int newintrc = 0, newaddrc = 0;
201 unsigned int *reg;
202 int naddrc;
203
204 reg = (unsigned int *) get_property(np, "reg", NULL);
205 naddrc = prom_n_addr_cells(np);
206 p = intr_parent(np);
207 while (p != NULL) {
208 if (get_property(p, "interrupt-controller", NULL) != NULL)
209 /* this node is an interrupt controller, stop here */
210 break;
211 imap = (unsigned int *)
212 get_property(p, "interrupt-map", &imaplen);
213 if (imap == NULL) {
214 p = intr_parent(p);
215 continue;
216 }
217 imask = (unsigned int *)
218 get_property(p, "interrupt-map-mask", NULL);
219 if (imask == NULL) {
220 printk("oops, %s has interrupt-map but no mask\n",
221 p->full_name);
222 return 0;
223 }
224 imaplen /= sizeof(unsigned int);
225 match = 0;
226 ipar = NULL;
227 while (imaplen > 0 && !match) {
228 /* check the child-interrupt field */
229 match = 1;
230 for (i = 0; i < naddrc && match; ++i)
231 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
232 for (; i < naddrc + nintrc && match; ++i)
233 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
234 imap += naddrc + nintrc;
235 imaplen -= naddrc + nintrc;
236 /* grab the interrupt parent */
237 ipar = find_phandle((phandle) *imap++);
238 --imaplen;
239 if (ipar == NULL && num_interrupt_controllers == 1)
240 /* cope with BootX not giving us phandles */
241 ipar = dflt_interrupt_controller;
242 if (ipar == NULL) {
243 printk("oops, no int parent %x in map of %s\n",
244 imap[-1], p->full_name);
245 return 0;
246 }
247 /* find the parent's # addr and intr cells */
248 ip = (unsigned int *)
249 get_property(ipar, "#interrupt-cells", NULL);
250 if (ip == NULL) {
251 printk("oops, no #interrupt-cells on %s\n",
252 ipar->full_name);
253 return 0;
254 }
255 newintrc = *ip;
256 ip = (unsigned int *)
257 get_property(ipar, "#address-cells", NULL);
258 newaddrc = (ip == NULL)? 0: *ip;
259 imap += newaddrc + newintrc;
260 imaplen -= newaddrc + newintrc;
261 }
262 if (imaplen < 0) {
263 printk("oops, error decoding int-map on %s, len=%d\n",
264 p->full_name, imaplen);
265 return 0;
266 }
267 if (!match) {
268#ifdef DEBUG_IRQ
269 printk("oops, no match in %s int-map for %s\n",
270 p->full_name, np->full_name);
271#endif
272 return 0;
273 }
274 p = ipar;
275 naddrc = newaddrc;
276 nintrc = newintrc;
277 ints = imap - nintrc;
278 reg = ints - naddrc;
279 }
280 if (p == NULL) {
281#ifdef DEBUG_IRQ
282 printk("hmmm, int tree for %s doesn't have ctrler\n",
283 np->full_name);
284#endif
285 return 0;
286 }
287 *irq = ints;
288 *ictrler = p;
289 return nintrc;
290}
291
6d0124fc
PM
292static unsigned char map_isa_senses[4] = {
293 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
294 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
295 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
296 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
297};
298
299static unsigned char map_mpic_senses[4] = {
300 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
301 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
302 /* 2 seems to be used for the 8259 cascade... */
303 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
304 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
305};
306
9b6b563c
PM
307static int __devinit finish_node_interrupts(struct device_node *np,
308 unsigned long *mem_start,
309 int measure_only)
310{
311 unsigned int *ints;
312 int intlen, intrcells, intrcount;
6d0124fc 313 int i, j, n, sense;
9b6b563c
PM
314 unsigned int *irq, virq;
315 struct device_node *ic;
316
a575b807
PM
317 if (num_interrupt_controllers == 0) {
318 /*
319 * Old machines just have a list of interrupt numbers
320 * and no interrupt-controller nodes.
321 */
322 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
323 &intlen);
324 /* XXX old interpret_pci_props looked in parent too */
325 /* XXX old interpret_macio_props looked for interrupts
326 before AAPL,interrupts */
327 if (ints == NULL)
328 ints = (unsigned int *) get_property(np, "interrupts",
329 &intlen);
330 if (ints == NULL)
331 return 0;
332
333 np->n_intrs = intlen / sizeof(unsigned int);
334 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
335 mem_start);
336 if (!np->intrs)
337 return -ENOMEM;
338 if (measure_only)
339 return 0;
340
341 for (i = 0; i < np->n_intrs; ++i) {
342 np->intrs[i].line = *ints++;
6d0124fc
PM
343 np->intrs[i].sense = IRQ_SENSE_LEVEL
344 | IRQ_POLARITY_NEGATIVE;
a575b807
PM
345 }
346 return 0;
347 }
348
9b6b563c
PM
349 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
350 if (ints == NULL)
351 return 0;
352 intrcells = prom_n_intr_cells(np);
353 intlen /= intrcells * sizeof(unsigned int);
354
355 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
356 if (!np->intrs)
357 return -ENOMEM;
358
359 if (measure_only)
360 return 0;
361
362 intrcount = 0;
363 for (i = 0; i < intlen; ++i, ints += intrcells) {
364 n = map_interrupt(&irq, &ic, np, ints, intrcells);
365 if (n <= 0)
366 continue;
367
368 /* don't map IRQ numbers under a cascaded 8259 controller */
369 if (ic && device_is_compatible(ic, "chrp,iic")) {
370 np->intrs[intrcount].line = irq[0];
6d0124fc
PM
371 sense = (n > 1)? (irq[1] & 3): 3;
372 np->intrs[intrcount].sense = map_isa_senses[sense];
9b6b563c 373 } else {
9b6b563c 374 virq = virt_irq_create_mapping(irq[0]);
6d0124fc 375#ifdef CONFIG_PPC64
9b6b563c
PM
376 if (virq == NO_IRQ) {
377 printk(KERN_CRIT "Could not allocate interrupt"
378 " number for %s\n", np->full_name);
379 continue;
380 }
9b6b563c 381#endif
6d0124fc
PM
382 np->intrs[intrcount].line = irq_offset_up(virq);
383 sense = (n > 1)? (irq[1] & 3): 1;
384 np->intrs[intrcount].sense = map_mpic_senses[sense];
9b6b563c
PM
385 }
386
387#ifdef CONFIG_PPC64
388 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
799d6046 389 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
9b6b563c
PM
390 char *name = get_property(ic->parent, "name", NULL);
391 if (name && !strcmp(name, "u3"))
392 np->intrs[intrcount].line += 128;
393 else if (!(name && !strcmp(name, "mac-io")))
394 /* ignore other cascaded controllers, such as
395 the k2-sata-root */
396 break;
397 }
398#endif
9b6b563c
PM
399 if (n > 2) {
400 printk("hmmm, got %d intr cells for %s:", n,
401 np->full_name);
402 for (j = 0; j < n; ++j)
403 printk(" %d", irq[j]);
404 printk("\n");
405 }
406 ++intrcount;
407 }
408 np->n_intrs = intrcount;
409
410 return 0;
411}
412
413static int __devinit interpret_pci_props(struct device_node *np,
414 unsigned long *mem_start,
415 int naddrc, int nsizec,
416 int measure_only)
417{
418 struct address_range *adr;
419 struct pci_reg_property *pci_addrs;
420 int i, l, n_addrs;
421
422 pci_addrs = (struct pci_reg_property *)
423 get_property(np, "assigned-addresses", &l);
424 if (!pci_addrs)
425 return 0;
426
427 n_addrs = l / sizeof(*pci_addrs);
428
429 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
430 if (!adr)
431 return -ENOMEM;
432
433 if (measure_only)
434 return 0;
435
436 np->addrs = adr;
437 np->n_addrs = n_addrs;
438
439 for (i = 0; i < n_addrs; i++) {
440 adr[i].space = pci_addrs[i].addr.a_hi;
441 adr[i].address = pci_addrs[i].addr.a_lo |
442 ((u64)pci_addrs[i].addr.a_mid << 32);
443 adr[i].size = pci_addrs[i].size_lo;
444 }
445
446 return 0;
447}
448
449static int __init interpret_dbdma_props(struct device_node *np,
450 unsigned long *mem_start,
451 int naddrc, int nsizec,
452 int measure_only)
453{
454 struct reg_property32 *rp;
455 struct address_range *adr;
456 unsigned long base_address;
457 int i, l;
458 struct device_node *db;
459
460 base_address = 0;
461 if (!measure_only) {
462 for (db = np->parent; db != NULL; db = db->parent) {
463 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
464 base_address = db->addrs[0].address;
465 break;
466 }
467 }
468 }
469
470 rp = (struct reg_property32 *) get_property(np, "reg", &l);
471 if (rp != 0 && l >= sizeof(struct reg_property32)) {
472 i = 0;
473 adr = (struct address_range *) (*mem_start);
474 while ((l -= sizeof(struct reg_property32)) >= 0) {
475 if (!measure_only) {
476 adr[i].space = 2;
477 adr[i].address = rp[i].address + base_address;
478 adr[i].size = rp[i].size;
479 }
480 ++i;
481 }
482 np->addrs = adr;
483 np->n_addrs = i;
484 (*mem_start) += i * sizeof(struct address_range);
485 }
486
487 return 0;
488}
489
490static int __init interpret_macio_props(struct device_node *np,
491 unsigned long *mem_start,
492 int naddrc, int nsizec,
493 int measure_only)
494{
495 struct reg_property32 *rp;
496 struct address_range *adr;
497 unsigned long base_address;
498 int i, l;
499 struct device_node *db;
500
501 base_address = 0;
502 if (!measure_only) {
503 for (db = np->parent; db != NULL; db = db->parent) {
504 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
505 base_address = db->addrs[0].address;
506 break;
507 }
508 }
509 }
510
511 rp = (struct reg_property32 *) get_property(np, "reg", &l);
512 if (rp != 0 && l >= sizeof(struct reg_property32)) {
513 i = 0;
514 adr = (struct address_range *) (*mem_start);
515 while ((l -= sizeof(struct reg_property32)) >= 0) {
516 if (!measure_only) {
517 adr[i].space = 2;
518 adr[i].address = rp[i].address + base_address;
519 adr[i].size = rp[i].size;
520 }
521 ++i;
522 }
523 np->addrs = adr;
524 np->n_addrs = i;
525 (*mem_start) += i * sizeof(struct address_range);
526 }
527
528 return 0;
529}
530
531static int __init interpret_isa_props(struct device_node *np,
532 unsigned long *mem_start,
533 int naddrc, int nsizec,
534 int measure_only)
535{
536 struct isa_reg_property *rp;
537 struct address_range *adr;
538 int i, l;
539
540 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
541 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
542 i = 0;
543 adr = (struct address_range *) (*mem_start);
544 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
545 if (!measure_only) {
546 adr[i].space = rp[i].space;
547 adr[i].address = rp[i].address;
548 adr[i].size = rp[i].size;
549 }
550 ++i;
551 }
552 np->addrs = adr;
553 np->n_addrs = i;
554 (*mem_start) += i * sizeof(struct address_range);
555 }
556
557 return 0;
558}
559
560static int __init interpret_root_props(struct device_node *np,
561 unsigned long *mem_start,
562 int naddrc, int nsizec,
563 int measure_only)
564{
565 struct address_range *adr;
566 int i, l;
567 unsigned int *rp;
568 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
569
ba759485
ME
570 rp = (unsigned int *) get_property(np, "linux,usable-memory", &l);
571 if (rp == NULL)
572 rp = (unsigned int *) get_property(np, "reg", &l);
573
9b6b563c
PM
574 if (rp != 0 && l >= rpsize) {
575 i = 0;
576 adr = (struct address_range *) (*mem_start);
577 while ((l -= rpsize) >= 0) {
578 if (!measure_only) {
579 adr[i].space = 0;
580 adr[i].address = rp[naddrc - 1];
581 adr[i].size = rp[naddrc + nsizec - 1];
582 }
583 ++i;
584 rp += naddrc + nsizec;
585 }
586 np->addrs = adr;
587 np->n_addrs = i;
588 (*mem_start) += i * sizeof(struct address_range);
589 }
590
591 return 0;
592}
593
594static int __devinit finish_node(struct device_node *np,
595 unsigned long *mem_start,
596 interpret_func *ifunc,
597 int naddrc, int nsizec,
598 int measure_only)
599{
600 struct device_node *child;
601 int *ip, rc = 0;
602
603 /* get the device addresses and interrupts */
604 if (ifunc != NULL)
605 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
606 if (rc)
607 goto out;
608
609 rc = finish_node_interrupts(np, mem_start, measure_only);
610 if (rc)
611 goto out;
612
613 /* Look for #address-cells and #size-cells properties. */
614 ip = (int *) get_property(np, "#address-cells", NULL);
615 if (ip != NULL)
616 naddrc = *ip;
617 ip = (int *) get_property(np, "#size-cells", NULL);
618 if (ip != NULL)
619 nsizec = *ip;
620
621 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
622 ifunc = interpret_root_props;
623 else if (np->type == 0)
624 ifunc = NULL;
625 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
626 ifunc = interpret_pci_props;
627 else if (!strcmp(np->type, "dbdma"))
628 ifunc = interpret_dbdma_props;
629 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
630 ifunc = interpret_macio_props;
631 else if (!strcmp(np->type, "isa"))
632 ifunc = interpret_isa_props;
633 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
634 ifunc = interpret_root_props;
635 else if (!((ifunc == interpret_dbdma_props
636 || ifunc == interpret_macio_props)
637 && (!strcmp(np->type, "escc")
638 || !strcmp(np->type, "media-bay"))))
639 ifunc = NULL;
640
641 for (child = np->child; child != NULL; child = child->sibling) {
642 rc = finish_node(child, mem_start, ifunc,
643 naddrc, nsizec, measure_only);
644 if (rc)
645 goto out;
646 }
647out:
648 return rc;
649}
650
651static void __init scan_interrupt_controllers(void)
652{
653 struct device_node *np;
654 int n = 0;
655 char *name, *ic;
656 int iclen;
657
658 for (np = allnodes; np != NULL; np = np->allnext) {
659 ic = get_property(np, "interrupt-controller", &iclen);
660 name = get_property(np, "name", NULL);
661 /* checking iclen makes sure we don't get a false
662 match on /chosen.interrupt_controller */
663 if ((name != NULL
664 && strcmp(name, "interrupt-controller") == 0)
665 || (ic != NULL && iclen == 0
666 && strcmp(name, "AppleKiwi"))) {
667 if (n == 0)
668 dflt_interrupt_controller = np;
669 ++n;
670 }
671 }
672 num_interrupt_controllers = n;
673}
674
675/**
676 * finish_device_tree is called once things are running normally
677 * (i.e. with text and data mapped to the address they were linked at).
678 * It traverses the device tree and fills in some of the additional,
679 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
680 * mapping is also initialized at this point.
681 */
682void __init finish_device_tree(void)
683{
684 unsigned long start, end, size = 0;
685
686 DBG(" -> finish_device_tree\n");
687
688#ifdef CONFIG_PPC64
689 /* Initialize virtual IRQ map */
690 virt_irq_init();
691#endif
692 scan_interrupt_controllers();
693
694 /*
695 * Finish device-tree (pre-parsing some properties etc...)
696 * We do this in 2 passes. One with "measure_only" set, which
697 * will only measure the amount of memory needed, then we can
698 * allocate that memory, and call finish_node again. However,
699 * we must be careful as most routines will fail nowadays when
700 * prom_alloc() returns 0, so we must make sure our first pass
701 * doesn't start at 0. We pre-initialize size to 16 for that
702 * reason and then remove those additional 16 bytes
703 */
704 size = 16;
705 finish_node(allnodes, &size, NULL, 0, 0, 1);
706 size -= 16;
707 end = start = (unsigned long) __va(lmb_alloc(size, 128));
708 finish_node(allnodes, &end, NULL, 0, 0, 0);
709 BUG_ON(end != start + size);
710
711 DBG(" <- finish_device_tree\n");
712}
713
714static inline char *find_flat_dt_string(u32 offset)
715{
716 return ((char *)initial_boot_params) +
717 initial_boot_params->off_dt_strings + offset;
718}
719
720/**
721 * This function is used to scan the flattened device-tree, it is
722 * used to extract the memory informations at boot before we can
723 * unflatten the tree
724 */
3c726f8d
BH
725int __init of_scan_flat_dt(int (*it)(unsigned long node,
726 const char *uname, int depth,
727 void *data),
728 void *data)
9b6b563c
PM
729{
730 unsigned long p = ((unsigned long)initial_boot_params) +
731 initial_boot_params->off_dt_struct;
732 int rc = 0;
733 int depth = -1;
734
735 do {
736 u32 tag = *((u32 *)p);
737 char *pathp;
738
739 p += 4;
740 if (tag == OF_DT_END_NODE) {
741 depth --;
742 continue;
743 }
744 if (tag == OF_DT_NOP)
745 continue;
746 if (tag == OF_DT_END)
747 break;
748 if (tag == OF_DT_PROP) {
749 u32 sz = *((u32 *)p);
750 p += 8;
751 if (initial_boot_params->version < 0x10)
752 p = _ALIGN(p, sz >= 8 ? 8 : 4);
753 p += sz;
754 p = _ALIGN(p, 4);
755 continue;
756 }
757 if (tag != OF_DT_BEGIN_NODE) {
758 printk(KERN_WARNING "Invalid tag %x scanning flattened"
759 " device tree !\n", tag);
760 return -EINVAL;
761 }
762 depth++;
763 pathp = (char *)p;
764 p = _ALIGN(p + strlen(pathp) + 1, 4);
765 if ((*pathp) == '/') {
766 char *lp, *np;
767 for (lp = NULL, np = pathp; *np; np++)
768 if ((*np) == '/')
769 lp = np+1;
770 if (lp != NULL)
771 pathp = lp;
772 }
773 rc = it(p, pathp, depth, data);
774 if (rc != 0)
775 break;
776 } while(1);
777
778 return rc;
779}
780
781/**
782 * This function can be used within scan_flattened_dt callback to get
783 * access to properties
784 */
3c726f8d
BH
785void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
786 unsigned long *size)
9b6b563c
PM
787{
788 unsigned long p = node;
789
790 do {
791 u32 tag = *((u32 *)p);
792 u32 sz, noff;
793 const char *nstr;
794
795 p += 4;
796 if (tag == OF_DT_NOP)
797 continue;
798 if (tag != OF_DT_PROP)
799 return NULL;
800
801 sz = *((u32 *)p);
802 noff = *((u32 *)(p + 4));
803 p += 8;
804 if (initial_boot_params->version < 0x10)
805 p = _ALIGN(p, sz >= 8 ? 8 : 4);
806
807 nstr = find_flat_dt_string(noff);
808 if (nstr == NULL) {
809 printk(KERN_WARNING "Can't find property index"
810 " name !\n");
811 return NULL;
812 }
813 if (strcmp(name, nstr) == 0) {
814 if (size)
815 *size = sz;
816 return (void *)p;
817 }
818 p += sz;
819 p = _ALIGN(p, 4);
820 } while(1);
821}
822
823static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
824 unsigned long align)
825{
826 void *res;
827
828 *mem = _ALIGN(*mem, align);
829 res = (void *)*mem;
830 *mem += size;
831
832 return res;
833}
834
835static unsigned long __init unflatten_dt_node(unsigned long mem,
836 unsigned long *p,
837 struct device_node *dad,
838 struct device_node ***allnextpp,
839 unsigned long fpsize)
840{
841 struct device_node *np;
842 struct property *pp, **prev_pp = NULL;
843 char *pathp;
844 u32 tag;
845 unsigned int l, allocl;
846 int has_name = 0;
847 int new_format = 0;
848
849 tag = *((u32 *)(*p));
850 if (tag != OF_DT_BEGIN_NODE) {
851 printk("Weird tag at start of node: %x\n", tag);
852 return mem;
853 }
854 *p += 4;
855 pathp = (char *)*p;
856 l = allocl = strlen(pathp) + 1;
857 *p = _ALIGN(*p + l, 4);
858
859 /* version 0x10 has a more compact unit name here instead of the full
860 * path. we accumulate the full path size using "fpsize", we'll rebuild
861 * it later. We detect this because the first character of the name is
862 * not '/'.
863 */
864 if ((*pathp) != '/') {
865 new_format = 1;
866 if (fpsize == 0) {
867 /* root node: special case. fpsize accounts for path
868 * plus terminating zero. root node only has '/', so
869 * fpsize should be 2, but we want to avoid the first
870 * level nodes to have two '/' so we use fpsize 1 here
871 */
872 fpsize = 1;
873 allocl = 2;
874 } else {
875 /* account for '/' and path size minus terminal 0
876 * already in 'l'
877 */
878 fpsize += l;
879 allocl = fpsize;
880 }
881 }
882
883
884 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
885 __alignof__(struct device_node));
886 if (allnextpp) {
887 memset(np, 0, sizeof(*np));
888 np->full_name = ((char*)np) + sizeof(struct device_node);
889 if (new_format) {
890 char *p = np->full_name;
891 /* rebuild full path for new format */
892 if (dad && dad->parent) {
893 strcpy(p, dad->full_name);
894#ifdef DEBUG
895 if ((strlen(p) + l + 1) != allocl) {
896 DBG("%s: p: %d, l: %d, a: %d\n",
897 pathp, strlen(p), l, allocl);
898 }
899#endif
900 p += strlen(p);
901 }
902 *(p++) = '/';
903 memcpy(p, pathp, l);
904 } else
905 memcpy(np->full_name, pathp, l);
906 prev_pp = &np->properties;
907 **allnextpp = np;
908 *allnextpp = &np->allnext;
909 if (dad != NULL) {
910 np->parent = dad;
911 /* we temporarily use the next field as `last_child'*/
912 if (dad->next == 0)
913 dad->child = np;
914 else
915 dad->next->sibling = np;
916 dad->next = np;
917 }
918 kref_init(&np->kref);
919 }
920 while(1) {
921 u32 sz, noff;
922 char *pname;
923
924 tag = *((u32 *)(*p));
925 if (tag == OF_DT_NOP) {
926 *p += 4;
927 continue;
928 }
929 if (tag != OF_DT_PROP)
930 break;
931 *p += 4;
932 sz = *((u32 *)(*p));
933 noff = *((u32 *)((*p) + 4));
934 *p += 8;
935 if (initial_boot_params->version < 0x10)
936 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
937
938 pname = find_flat_dt_string(noff);
939 if (pname == NULL) {
940 printk("Can't find property name in list !\n");
941 break;
942 }
943 if (strcmp(pname, "name") == 0)
944 has_name = 1;
945 l = strlen(pname) + 1;
946 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
947 __alignof__(struct property));
948 if (allnextpp) {
949 if (strcmp(pname, "linux,phandle") == 0) {
950 np->node = *((u32 *)*p);
951 if (np->linux_phandle == 0)
952 np->linux_phandle = np->node;
953 }
954 if (strcmp(pname, "ibm,phandle") == 0)
955 np->linux_phandle = *((u32 *)*p);
956 pp->name = pname;
957 pp->length = sz;
958 pp->value = (void *)*p;
959 *prev_pp = pp;
960 prev_pp = &pp->next;
961 }
962 *p = _ALIGN((*p) + sz, 4);
963 }
964 /* with version 0x10 we may not have the name property, recreate
965 * it here from the unit name if absent
966 */
967 if (!has_name) {
968 char *p = pathp, *ps = pathp, *pa = NULL;
969 int sz;
970
971 while (*p) {
972 if ((*p) == '@')
973 pa = p;
974 if ((*p) == '/')
975 ps = p + 1;
976 p++;
977 }
978 if (pa < ps)
979 pa = p;
980 sz = (pa - ps) + 1;
981 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
982 __alignof__(struct property));
983 if (allnextpp) {
984 pp->name = "name";
985 pp->length = sz;
986 pp->value = (unsigned char *)(pp + 1);
987 *prev_pp = pp;
988 prev_pp = &pp->next;
989 memcpy(pp->value, ps, sz - 1);
990 ((char *)pp->value)[sz - 1] = 0;
991 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
992 }
993 }
994 if (allnextpp) {
995 *prev_pp = NULL;
996 np->name = get_property(np, "name", NULL);
997 np->type = get_property(np, "device_type", NULL);
998
999 if (!np->name)
1000 np->name = "<NULL>";
1001 if (!np->type)
1002 np->type = "<NULL>";
1003 }
1004 while (tag == OF_DT_BEGIN_NODE) {
1005 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1006 tag = *((u32 *)(*p));
1007 }
1008 if (tag != OF_DT_END_NODE) {
1009 printk("Weird tag at end of node: %x\n", tag);
1010 return mem;
1011 }
1012 *p += 4;
1013 return mem;
1014}
1015
1016
1017/**
1018 * unflattens the device-tree passed by the firmware, creating the
1019 * tree of struct device_node. It also fills the "name" and "type"
1020 * pointers of the nodes so the normal device-tree walking functions
1021 * can be used (this used to be done by finish_device_tree)
1022 */
1023void __init unflatten_device_tree(void)
1024{
1025 unsigned long start, mem, size;
1026 struct device_node **allnextp = &allnodes;
1027 char *p = NULL;
1028 int l = 0;
1029
1030 DBG(" -> unflatten_device_tree()\n");
1031
1032 /* First pass, scan for size */
1033 start = ((unsigned long)initial_boot_params) +
1034 initial_boot_params->off_dt_struct;
1035 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1036 size = (size | 3) + 1;
1037
1038 DBG(" size is %lx, allocating...\n", size);
1039
1040 /* Allocate memory for the expanded device tree */
1041 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1042 if (!mem) {
1043 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1044 panic("Couldn't allocate memory with lmb_alloc()!\n");
1045 }
1046 mem = (unsigned long) __va(mem);
1047
1048 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1049
1050 DBG(" unflattening %lx...\n", mem);
1051
1052 /* Second pass, do actual unflattening */
1053 start = ((unsigned long)initial_boot_params) +
1054 initial_boot_params->off_dt_struct;
1055 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1056 if (*((u32 *)start) != OF_DT_END)
1057 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1058 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1059 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1060 ((u32 *)mem)[size / 4] );
1061 *allnextp = NULL;
1062
1063 /* Get pointer to OF "/chosen" node for use everywhere */
1064 of_chosen = of_find_node_by_path("/chosen");
a575b807
PM
1065 if (of_chosen == NULL)
1066 of_chosen = of_find_node_by_path("/chosen@0");
9b6b563c
PM
1067
1068 /* Retreive command line */
1069 if (of_chosen != NULL) {
1070 p = (char *)get_property(of_chosen, "bootargs", &l);
1071 if (p != NULL && l > 0)
1072 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1073 }
1074#ifdef CONFIG_CMDLINE
1075 if (l == 0 || (l == 1 && (*p) == 0))
1076 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1077#endif /* CONFIG_CMDLINE */
1078
1079 DBG("Command line is: %s\n", cmd_line);
1080
1081 DBG(" <- unflatten_device_tree()\n");
1082}
1083
1084
1085static int __init early_init_dt_scan_cpus(unsigned long node,
1086 const char *uname, int depth, void *data)
1087{
9b6b563c 1088 u32 *prop;
676e2497
SR
1089 unsigned long size;
1090 char *type = of_get_flat_dt_prop(node, "device_type", &size);
9b6b563c
PM
1091
1092 /* We are scanning "cpu" nodes only */
1093 if (type == NULL || strcmp(type, "cpu") != 0)
1094 return 0;
1095
80579e1f
PM
1096 boot_cpuid = 0;
1097 boot_cpuid_phys = 0;
9b6b563c
PM
1098 if (initial_boot_params && initial_boot_params->version >= 2) {
1099 /* version 2 of the kexec param format adds the phys cpuid
1100 * of booted proc.
1101 */
1102 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
9b6b563c 1103 } else {
80579e1f 1104 /* Check if it's the boot-cpu, set it's hw index now */
3c726f8d
BH
1105 if (of_get_flat_dt_prop(node,
1106 "linux,boot-cpu", NULL) != NULL) {
1107 prop = of_get_flat_dt_prop(node, "reg", NULL);
80579e1f
PM
1108 if (prop != NULL)
1109 boot_cpuid_phys = *prop;
9b6b563c
PM
1110 }
1111 }
80579e1f 1112 set_hard_smp_processor_id(0, boot_cpuid_phys);
9b6b563c
PM
1113
1114#ifdef CONFIG_ALTIVEC
1115 /* Check if we have a VMX and eventually update CPU features */
676e2497 1116 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
9b6b563c
PM
1117 if (prop && (*prop) > 0) {
1118 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1119 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1120 }
1121
1122 /* Same goes for Apple's "altivec" property */
3c726f8d 1123 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
9b6b563c
PM
1124 if (prop) {
1125 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1126 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1127 }
1128#endif /* CONFIG_ALTIVEC */
1129
1130#ifdef CONFIG_PPC_PSERIES
1131 /*
1132 * Check for an SMT capable CPU and set the CPU feature. We do
1133 * this by looking at the size of the ibm,ppc-interrupt-server#s
1134 * property
1135 */
3c726f8d 1136 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
9b6b563c
PM
1137 &size);
1138 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1139 if (prop && ((size / sizeof(u32)) > 1))
1140 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1141#endif
1142
1143 return 0;
1144}
1145
1146static int __init early_init_dt_scan_chosen(unsigned long node,
1147 const char *uname, int depth, void *data)
1148{
1149 u32 *prop;
1150 unsigned long *lprop;
1151
1152 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1153
a575b807
PM
1154 if (depth != 1 ||
1155 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
9b6b563c
PM
1156 return 0;
1157
1158 /* get platform type */
3c726f8d 1159 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
9b6b563c
PM
1160 if (prop == NULL)
1161 return 0;
60dda256 1162#ifdef CONFIG_PPC_MULTIPLATFORM
9b6b563c
PM
1163 _machine = *prop;
1164#endif
1165
1166#ifdef CONFIG_PPC64
1167 /* check if iommu is forced on or off */
3c726f8d 1168 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
9b6b563c 1169 iommu_is_off = 1;
3c726f8d 1170 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
9b6b563c
PM
1171 iommu_force_on = 1;
1172#endif
1173
3c726f8d 1174 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
9b6b563c
PM
1175 if (lprop)
1176 memory_limit = *lprop;
1177
1178#ifdef CONFIG_PPC64
3c726f8d 1179 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
9b6b563c
PM
1180 if (lprop)
1181 tce_alloc_start = *lprop;
3c726f8d 1182 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
9b6b563c
PM
1183 if (lprop)
1184 tce_alloc_end = *lprop;
1185#endif
1186
1187#ifdef CONFIG_PPC_RTAS
1188 /* To help early debugging via the front panel, we retreive a minimal
1189 * set of RTAS infos now if available
1190 */
1191 {
1192 u64 *basep, *entryp;
1193
3c726f8d
BH
1194 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1195 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1196 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
9b6b563c
PM
1197 if (basep && entryp && prop) {
1198 rtas.base = *basep;
1199 rtas.entry = *entryp;
1200 rtas.size = *prop;
1201 }
1202 }
1203#endif /* CONFIG_PPC_RTAS */
1204
dcee3036
ME
1205#ifdef CONFIG_KEXEC
1206 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
1207 if (lprop)
1208 crashk_res.start = *lprop;
1209
1210 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
1211 if (lprop)
1212 crashk_res.end = crashk_res.start + *lprop - 1;
1213#endif
1214
9b6b563c
PM
1215 /* break now */
1216 return 1;
1217}
1218
1219static int __init early_init_dt_scan_root(unsigned long node,
1220 const char *uname, int depth, void *data)
1221{
1222 u32 *prop;
1223
1224 if (depth != 0)
1225 return 0;
1226
3c726f8d 1227 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
9b6b563c
PM
1228 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1229 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1230
3c726f8d 1231 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
9b6b563c
PM
1232 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1233 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1234
1235 /* break now */
1236 return 1;
1237}
1238
1239static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1240{
1241 cell_t *p = *cellp;
1242 unsigned long r;
1243
1244 /* Ignore more than 2 cells */
1245 while (s > sizeof(unsigned long) / 4) {
1246 p++;
1247 s--;
1248 }
1249 r = *p++;
1250#ifdef CONFIG_PPC64
1251 if (s > 1) {
1252 r <<= 32;
1253 r |= *(p++);
1254 s--;
1255 }
1256#endif
1257
1258 *cellp = p;
1259 return r;
1260}
1261
1262
1263static int __init early_init_dt_scan_memory(unsigned long node,
1264 const char *uname, int depth, void *data)
1265{
3c726f8d 1266 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
9b6b563c
PM
1267 cell_t *reg, *endp;
1268 unsigned long l;
1269
1270 /* We are scanning "memory" nodes only */
a23414be
PM
1271 if (type == NULL) {
1272 /*
1273 * The longtrail doesn't have a device_type on the
1274 * /memory node, so look for the node called /memory@0.
1275 */
1276 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1277 return 0;
1278 } else if (strcmp(type, "memory") != 0)
9b6b563c
PM
1279 return 0;
1280
ba759485
ME
1281 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1282 if (reg == NULL)
1283 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
9b6b563c
PM
1284 if (reg == NULL)
1285 return 0;
1286
1287 endp = reg + (l / sizeof(cell_t));
1288
358c86fd 1289 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
9b6b563c
PM
1290 uname, l, reg[0], reg[1], reg[2], reg[3]);
1291
1292 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1293 unsigned long base, size;
1294
1295 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1296 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1297
1298 if (size == 0)
1299 continue;
1300 DBG(" - %lx , %lx\n", base, size);
1301#ifdef CONFIG_PPC64
1302 if (iommu_is_off) {
1303 if (base >= 0x80000000ul)
1304 continue;
1305 if ((base + size) > 0x80000000ul)
1306 size = 0x80000000ul - base;
1307 }
1308#endif
1309 lmb_add(base, size);
1310 }
1311 return 0;
1312}
1313
1314static void __init early_reserve_mem(void)
1315{
1316 unsigned long base, size;
1317 unsigned long *reserve_map;
1318
1319 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1320 initial_boot_params->off_mem_rsvmap);
1321 while (1) {
1322 base = *(reserve_map++);
1323 size = *(reserve_map++);
1324 if (size == 0)
1325 break;
1326 DBG("reserving: %lx -> %lx\n", base, size);
1327 lmb_reserve(base, size);
1328 }
1329
1330#if 0
1331 DBG("memory reserved, lmbs :\n");
1332 lmb_dump_all();
1333#endif
1334}
1335
1336void __init early_init_devtree(void *params)
1337{
1338 DBG(" -> early_init_devtree()\n");
1339
1340 /* Setup flat device-tree pointer */
1341 initial_boot_params = params;
1342
1343 /* Retrieve various informations from the /chosen node of the
1344 * device-tree, including the platform type, initrd location and
1345 * size, TCE reserve, and more ...
1346 */
3c726f8d 1347 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
9b6b563c
PM
1348
1349 /* Scan memory nodes and rebuild LMBs */
1350 lmb_init();
3c726f8d
BH
1351 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1352 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
9b6b563c
PM
1353 lmb_enforce_memory_limit(memory_limit);
1354 lmb_analyze();
9b6b563c
PM
1355
1356 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1357
1358 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
0cc4746c
ME
1359 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1360#ifdef CONFIG_CRASH_DUMP
1361 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
1362#endif
9b6b563c
PM
1363 early_reserve_mem();
1364
1365 DBG("Scanning CPUs ...\n");
1366
3c726f8d
BH
1367 /* Retreive CPU related informations from the flat tree
1368 * (altivec support, boot CPU ID, ...)
9b6b563c 1369 */
3c726f8d 1370 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
9b6b563c 1371
9b6b563c
PM
1372 DBG(" <- early_init_devtree()\n");
1373}
1374
1375#undef printk
1376
1377int
1378prom_n_addr_cells(struct device_node* np)
1379{
1380 int* ip;
1381 do {
1382 if (np->parent)
1383 np = np->parent;
1384 ip = (int *) get_property(np, "#address-cells", NULL);
1385 if (ip != NULL)
1386 return *ip;
1387 } while (np->parent);
1388 /* No #address-cells property for the root node, default to 1 */
1389 return 1;
1390}
1dfc6772 1391EXPORT_SYMBOL(prom_n_addr_cells);
9b6b563c
PM
1392
1393int
1394prom_n_size_cells(struct device_node* np)
1395{
1396 int* ip;
1397 do {
1398 if (np->parent)
1399 np = np->parent;
1400 ip = (int *) get_property(np, "#size-cells", NULL);
1401 if (ip != NULL)
1402 return *ip;
1403 } while (np->parent);
1404 /* No #size-cells property for the root node, default to 1 */
1405 return 1;
1406}
1dfc6772 1407EXPORT_SYMBOL(prom_n_size_cells);
9b6b563c
PM
1408
1409/**
1410 * Work out the sense (active-low level / active-high edge)
1411 * of each interrupt from the device tree.
1412 */
1413void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1414{
1415 struct device_node *np;
1416 int i, j;
1417
1418 /* default to level-triggered */
6d0124fc 1419 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
9b6b563c
PM
1420
1421 for (np = allnodes; np != 0; np = np->allnext) {
1422 for (j = 0; j < np->n_intrs; j++) {
1423 i = np->intrs[j].line;
1424 if (i >= off && i < max)
6d0124fc 1425 senses[i-off] = np->intrs[j].sense;
9b6b563c
PM
1426 }
1427 }
1428}
1429
1430/**
1431 * Construct and return a list of the device_nodes with a given name.
1432 */
1433struct device_node *find_devices(const char *name)
1434{
1435 struct device_node *head, **prevp, *np;
1436
1437 prevp = &head;
1438 for (np = allnodes; np != 0; np = np->allnext) {
1439 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1440 *prevp = np;
1441 prevp = &np->next;
1442 }
1443 }
1444 *prevp = NULL;
1445 return head;
1446}
1447EXPORT_SYMBOL(find_devices);
1448
1449/**
1450 * Construct and return a list of the device_nodes with a given type.
1451 */
1452struct device_node *find_type_devices(const char *type)
1453{
1454 struct device_node *head, **prevp, *np;
1455
1456 prevp = &head;
1457 for (np = allnodes; np != 0; np = np->allnext) {
1458 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1459 *prevp = np;
1460 prevp = &np->next;
1461 }
1462 }
1463 *prevp = NULL;
1464 return head;
1465}
1466EXPORT_SYMBOL(find_type_devices);
1467
1468/**
1469 * Returns all nodes linked together
1470 */
1471struct device_node *find_all_nodes(void)
1472{
1473 struct device_node *head, **prevp, *np;
1474
1475 prevp = &head;
1476 for (np = allnodes; np != 0; np = np->allnext) {
1477 *prevp = np;
1478 prevp = &np->next;
1479 }
1480 *prevp = NULL;
1481 return head;
1482}
1483EXPORT_SYMBOL(find_all_nodes);
1484
1485/** Checks if the given "compat" string matches one of the strings in
1486 * the device's "compatible" property
1487 */
1488int device_is_compatible(struct device_node *device, const char *compat)
1489{
1490 const char* cp;
1491 int cplen, l;
1492
1493 cp = (char *) get_property(device, "compatible", &cplen);
1494 if (cp == NULL)
1495 return 0;
1496 while (cplen > 0) {
1497 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1498 return 1;
1499 l = strlen(cp) + 1;
1500 cp += l;
1501 cplen -= l;
1502 }
1503
1504 return 0;
1505}
1506EXPORT_SYMBOL(device_is_compatible);
1507
1508
1509/**
1510 * Indicates whether the root node has a given value in its
1511 * compatible property.
1512 */
1513int machine_is_compatible(const char *compat)
1514{
1515 struct device_node *root;
1516 int rc = 0;
1517
1518 root = of_find_node_by_path("/");
1519 if (root) {
1520 rc = device_is_compatible(root, compat);
1521 of_node_put(root);
1522 }
1523 return rc;
1524}
1525EXPORT_SYMBOL(machine_is_compatible);
1526
1527/**
1528 * Construct and return a list of the device_nodes with a given type
1529 * and compatible property.
1530 */
1531struct device_node *find_compatible_devices(const char *type,
1532 const char *compat)
1533{
1534 struct device_node *head, **prevp, *np;
1535
1536 prevp = &head;
1537 for (np = allnodes; np != 0; np = np->allnext) {
1538 if (type != NULL
1539 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1540 continue;
1541 if (device_is_compatible(np, compat)) {
1542 *prevp = np;
1543 prevp = &np->next;
1544 }
1545 }
1546 *prevp = NULL;
1547 return head;
1548}
1549EXPORT_SYMBOL(find_compatible_devices);
1550
1551/**
1552 * Find the device_node with a given full_name.
1553 */
1554struct device_node *find_path_device(const char *path)
1555{
1556 struct device_node *np;
1557
1558 for (np = allnodes; np != 0; np = np->allnext)
1559 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1560 return np;
1561 return NULL;
1562}
1563EXPORT_SYMBOL(find_path_device);
1564
1565/*******
1566 *
1567 * New implementation of the OF "find" APIs, return a refcounted
1568 * object, call of_node_put() when done. The device tree and list
1569 * are protected by a rw_lock.
1570 *
1571 * Note that property management will need some locking as well,
1572 * this isn't dealt with yet.
1573 *
1574 *******/
1575
1576/**
1577 * of_find_node_by_name - Find a node by its "name" property
1578 * @from: The node to start searching from or NULL, the node
1579 * you pass will not be searched, only the next one
1580 * will; typically, you pass what the previous call
1581 * returned. of_node_put() will be called on it
1582 * @name: The name string to match against
1583 *
1584 * Returns a node pointer with refcount incremented, use
1585 * of_node_put() on it when done.
1586 */
1587struct device_node *of_find_node_by_name(struct device_node *from,
1588 const char *name)
1589{
1590 struct device_node *np;
1591
1592 read_lock(&devtree_lock);
1593 np = from ? from->allnext : allnodes;
1594 for (; np != 0; np = np->allnext)
1595 if (np->name != 0 && strcasecmp(np->name, name) == 0
1596 && of_node_get(np))
1597 break;
1598 if (from)
1599 of_node_put(from);
1600 read_unlock(&devtree_lock);
1601 return np;
1602}
1603EXPORT_SYMBOL(of_find_node_by_name);
1604
1605/**
1606 * of_find_node_by_type - Find a node by its "device_type" property
1607 * @from: The node to start searching from or NULL, the node
1608 * you pass will not be searched, only the next one
1609 * will; typically, you pass what the previous call
1610 * returned. of_node_put() will be called on it
1611 * @name: The type string to match against
1612 *
1613 * Returns a node pointer with refcount incremented, use
1614 * of_node_put() on it when done.
1615 */
1616struct device_node *of_find_node_by_type(struct device_node *from,
1617 const char *type)
1618{
1619 struct device_node *np;
1620
1621 read_lock(&devtree_lock);
1622 np = from ? from->allnext : allnodes;
1623 for (; np != 0; np = np->allnext)
1624 if (np->type != 0 && strcasecmp(np->type, type) == 0
1625 && of_node_get(np))
1626 break;
1627 if (from)
1628 of_node_put(from);
1629 read_unlock(&devtree_lock);
1630 return np;
1631}
1632EXPORT_SYMBOL(of_find_node_by_type);
1633
1634/**
1635 * of_find_compatible_node - Find a node based on type and one of the
1636 * tokens in its "compatible" property
1637 * @from: The node to start searching from or NULL, the node
1638 * you pass will not be searched, only the next one
1639 * will; typically, you pass what the previous call
1640 * returned. of_node_put() will be called on it
1641 * @type: The type string to match "device_type" or NULL to ignore
1642 * @compatible: The string to match to one of the tokens in the device
1643 * "compatible" list.
1644 *
1645 * Returns a node pointer with refcount incremented, use
1646 * of_node_put() on it when done.
1647 */
1648struct device_node *of_find_compatible_node(struct device_node *from,
1649 const char *type, const char *compatible)
1650{
1651 struct device_node *np;
1652
1653 read_lock(&devtree_lock);
1654 np = from ? from->allnext : allnodes;
1655 for (; np != 0; np = np->allnext) {
1656 if (type != NULL
1657 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1658 continue;
1659 if (device_is_compatible(np, compatible) && of_node_get(np))
1660 break;
1661 }
1662 if (from)
1663 of_node_put(from);
1664 read_unlock(&devtree_lock);
1665 return np;
1666}
1667EXPORT_SYMBOL(of_find_compatible_node);
1668
1669/**
1670 * of_find_node_by_path - Find a node matching a full OF path
1671 * @path: The full path to match
1672 *
1673 * Returns a node pointer with refcount incremented, use
1674 * of_node_put() on it when done.
1675 */
1676struct device_node *of_find_node_by_path(const char *path)
1677{
1678 struct device_node *np = allnodes;
1679
1680 read_lock(&devtree_lock);
1681 for (; np != 0; np = np->allnext) {
1682 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1683 && of_node_get(np))
1684 break;
1685 }
1686 read_unlock(&devtree_lock);
1687 return np;
1688}
1689EXPORT_SYMBOL(of_find_node_by_path);
1690
1691/**
1692 * of_find_node_by_phandle - Find a node given a phandle
1693 * @handle: phandle of the node to find
1694 *
1695 * Returns a node pointer with refcount incremented, use
1696 * of_node_put() on it when done.
1697 */
1698struct device_node *of_find_node_by_phandle(phandle handle)
1699{
1700 struct device_node *np;
1701
1702 read_lock(&devtree_lock);
1703 for (np = allnodes; np != 0; np = np->allnext)
1704 if (np->linux_phandle == handle)
1705 break;
1706 if (np)
1707 of_node_get(np);
1708 read_unlock(&devtree_lock);
1709 return np;
1710}
1711EXPORT_SYMBOL(of_find_node_by_phandle);
1712
1713/**
1714 * of_find_all_nodes - Get next node in global list
1715 * @prev: Previous node or NULL to start iteration
1716 * of_node_put() will be called on it
1717 *
1718 * Returns a node pointer with refcount incremented, use
1719 * of_node_put() on it when done.
1720 */
1721struct device_node *of_find_all_nodes(struct device_node *prev)
1722{
1723 struct device_node *np;
1724
1725 read_lock(&devtree_lock);
1726 np = prev ? prev->allnext : allnodes;
1727 for (; np != 0; np = np->allnext)
1728 if (of_node_get(np))
1729 break;
1730 if (prev)
1731 of_node_put(prev);
1732 read_unlock(&devtree_lock);
1733 return np;
1734}
1735EXPORT_SYMBOL(of_find_all_nodes);
1736
1737/**
1738 * of_get_parent - Get a node's parent if any
1739 * @node: Node to get parent
1740 *
1741 * Returns a node pointer with refcount incremented, use
1742 * of_node_put() on it when done.
1743 */
1744struct device_node *of_get_parent(const struct device_node *node)
1745{
1746 struct device_node *np;
1747
1748 if (!node)
1749 return NULL;
1750
1751 read_lock(&devtree_lock);
1752 np = of_node_get(node->parent);
1753 read_unlock(&devtree_lock);
1754 return np;
1755}
1756EXPORT_SYMBOL(of_get_parent);
1757
1758/**
1759 * of_get_next_child - Iterate a node childs
1760 * @node: parent node
1761 * @prev: previous child of the parent node, or NULL to get first
1762 *
1763 * Returns a node pointer with refcount incremented, use
1764 * of_node_put() on it when done.
1765 */
1766struct device_node *of_get_next_child(const struct device_node *node,
1767 struct device_node *prev)
1768{
1769 struct device_node *next;
1770
1771 read_lock(&devtree_lock);
1772 next = prev ? prev->sibling : node->child;
1773 for (; next != 0; next = next->sibling)
1774 if (of_node_get(next))
1775 break;
1776 if (prev)
1777 of_node_put(prev);
1778 read_unlock(&devtree_lock);
1779 return next;
1780}
1781EXPORT_SYMBOL(of_get_next_child);
1782
1783/**
1784 * of_node_get - Increment refcount of a node
1785 * @node: Node to inc refcount, NULL is supported to
1786 * simplify writing of callers
1787 *
1788 * Returns node.
1789 */
1790struct device_node *of_node_get(struct device_node *node)
1791{
1792 if (node)
1793 kref_get(&node->kref);
1794 return node;
1795}
1796EXPORT_SYMBOL(of_node_get);
1797
1798static inline struct device_node * kref_to_device_node(struct kref *kref)
1799{
1800 return container_of(kref, struct device_node, kref);
1801}
1802
1803/**
1804 * of_node_release - release a dynamically allocated node
1805 * @kref: kref element of the node to be released
1806 *
1807 * In of_node_put() this function is passed to kref_put()
1808 * as the destructor.
1809 */
1810static void of_node_release(struct kref *kref)
1811{
1812 struct device_node *node = kref_to_device_node(kref);
1813 struct property *prop = node->properties;
1814
1815 if (!OF_IS_DYNAMIC(node))
1816 return;
1817 while (prop) {
1818 struct property *next = prop->next;
1819 kfree(prop->name);
1820 kfree(prop->value);
1821 kfree(prop);
1822 prop = next;
1823 }
1824 kfree(node->intrs);
1825 kfree(node->addrs);
1826 kfree(node->full_name);
1827 kfree(node->data);
1828 kfree(node);
1829}
1830
1831/**
1832 * of_node_put - Decrement refcount of a node
1833 * @node: Node to dec refcount, NULL is supported to
1834 * simplify writing of callers
1835 *
1836 */
1837void of_node_put(struct device_node *node)
1838{
1839 if (node)
1840 kref_put(&node->kref, of_node_release);
1841}
1842EXPORT_SYMBOL(of_node_put);
1843
1844/*
1845 * Plug a device node into the tree and global list.
1846 */
1847void of_attach_node(struct device_node *np)
1848{
1849 write_lock(&devtree_lock);
1850 np->sibling = np->parent->child;
1851 np->allnext = allnodes;
1852 np->parent->child = np;
1853 allnodes = np;
1854 write_unlock(&devtree_lock);
1855}
1856
1857/*
1858 * "Unplug" a node from the device tree. The caller must hold
1859 * a reference to the node. The memory associated with the node
1860 * is not freed until its refcount goes to zero.
1861 */
1862void of_detach_node(const struct device_node *np)
1863{
1864 struct device_node *parent;
1865
1866 write_lock(&devtree_lock);
1867
1868 parent = np->parent;
1869
1870 if (allnodes == np)
1871 allnodes = np->allnext;
1872 else {
1873 struct device_node *prev;
1874 for (prev = allnodes;
1875 prev->allnext != np;
1876 prev = prev->allnext)
1877 ;
1878 prev->allnext = np->allnext;
1879 }
1880
1881 if (parent->child == np)
1882 parent->child = np->sibling;
1883 else {
1884 struct device_node *prevsib;
1885 for (prevsib = np->parent->child;
1886 prevsib->sibling != np;
1887 prevsib = prevsib->sibling)
1888 ;
1889 prevsib->sibling = np->sibling;
1890 }
1891
1892 write_unlock(&devtree_lock);
1893}
1894
1895#ifdef CONFIG_PPC_PSERIES
1896/*
1897 * Fix up the uninitialized fields in a new device node:
1898 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1899 *
1900 * A lot of boot-time code is duplicated here, because functions such
1901 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1902 * slab allocator.
1903 *
1904 * This should probably be split up into smaller chunks.
1905 */
1906
1907static int of_finish_dynamic_node(struct device_node *node,
1908 unsigned long *unused1, int unused2,
1909 int unused3, int unused4)
1910{
1911 struct device_node *parent = of_get_parent(node);
1912 int err = 0;
1913 phandle *ibm_phandle;
1914
1915 node->name = get_property(node, "name", NULL);
1916 node->type = get_property(node, "device_type", NULL);
1917
1918 if (!parent) {
1919 err = -ENODEV;
1920 goto out;
1921 }
1922
1923 /* We don't support that function on PowerMac, at least
1924 * not yet
1925 */
799d6046 1926 if (_machine == PLATFORM_POWERMAC)
9b6b563c
PM
1927 return -ENODEV;
1928
1929 /* fix up new node's linux_phandle field */
1930 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1931 node->linux_phandle = *ibm_phandle;
1932
1933out:
1934 of_node_put(parent);
1935 return err;
1936}
1937
1938static int prom_reconfig_notifier(struct notifier_block *nb,
1939 unsigned long action, void *node)
1940{
1941 int err;
1942
1943 switch (action) {
1944 case PSERIES_RECONFIG_ADD:
1945 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1946 if (err < 0) {
1947 printk(KERN_ERR "finish_node returned %d\n", err);
1948 err = NOTIFY_BAD;
1949 }
1950 break;
1951 default:
1952 err = NOTIFY_DONE;
1953 break;
1954 }
1955 return err;
1956}
1957
1958static struct notifier_block prom_reconfig_nb = {
1959 .notifier_call = prom_reconfig_notifier,
1960 .priority = 10, /* This one needs to run first */
1961};
1962
1963static int __init prom_reconfig_setup(void)
1964{
1965 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1966}
1967__initcall(prom_reconfig_setup);
1968#endif
1969
1970/*
1971 * Find a property with a given name for a given node
1972 * and return the value.
1973 */
1974unsigned char *get_property(struct device_node *np, const char *name,
1975 int *lenp)
1976{
1977 struct property *pp;
1978
1979 for (pp = np->properties; pp != 0; pp = pp->next)
1980 if (strcmp(pp->name, name) == 0) {
1981 if (lenp != 0)
1982 *lenp = pp->length;
1983 return pp->value;
1984 }
1985 return NULL;
1986}
1987EXPORT_SYMBOL(get_property);
1988
1989/*
1990 * Add a property to a node
1991 */
183d0202 1992int prom_add_property(struct device_node* np, struct property* prop)
9b6b563c 1993{
183d0202 1994 struct property **next;
9b6b563c
PM
1995
1996 prop->next = NULL;
183d0202
BH
1997 write_lock(&devtree_lock);
1998 next = &np->properties;
1999 while (*next) {
2000 if (strcmp(prop->name, (*next)->name) == 0) {
2001 /* duplicate ! don't insert it */
2002 write_unlock(&devtree_lock);
2003 return -1;
2004 }
9b6b563c 2005 next = &(*next)->next;
183d0202 2006 }
9b6b563c 2007 *next = prop;
183d0202
BH
2008 write_unlock(&devtree_lock);
2009
799d6046 2010#ifdef CONFIG_PROC_DEVICETREE
183d0202
BH
2011 /* try to add to proc as well if it was initialized */
2012 if (np->pde)
2013 proc_device_tree_add_prop(np->pde, prop);
799d6046 2014#endif /* CONFIG_PROC_DEVICETREE */
183d0202
BH
2015
2016 return 0;
9b6b563c
PM
2017}
2018
2019/* I quickly hacked that one, check against spec ! */
2020static inline unsigned long
2021bus_space_to_resource_flags(unsigned int bus_space)
2022{
2023 u8 space = (bus_space >> 24) & 0xf;
2024 if (space == 0)
2025 space = 0x02;
2026 if (space == 0x02)
2027 return IORESOURCE_MEM;
2028 else if (space == 0x01)
2029 return IORESOURCE_IO;
2030 else {
2031 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2032 bus_space);
2033 return 0;
2034 }
2035}
2036
60dda256 2037#ifdef CONFIG_PCI
9b6b563c
PM
2038static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2039 struct address_range *range)
2040{
2041 unsigned long mask;
2042 int i;
2043
2044 /* Check this one */
2045 mask = bus_space_to_resource_flags(range->space);
2046 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2047 if ((pdev->resource[i].flags & mask) == mask &&
2048 pdev->resource[i].start <= range->address &&
2049 pdev->resource[i].end > range->address) {
2050 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2051 /* Add better message */
2052 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2053 return NULL;
2054 }
2055 break;
2056 }
2057 }
2058 if (i == DEVICE_COUNT_RESOURCE)
2059 return NULL;
2060 return &pdev->resource[i];
2061}
2062
2063/*
2064 * Request an OF device resource. Currently handles child of PCI devices,
2065 * or other nodes attached to the root node. Ultimately, put some
2066 * link to resources in the OF node.
2067 */
2068struct resource *request_OF_resource(struct device_node* node, int index,
2069 const char* name_postfix)
2070{
2071 struct pci_dev* pcidev;
2072 u8 pci_bus, pci_devfn;
2073 unsigned long iomask;
2074 struct device_node* nd;
2075 struct resource* parent;
2076 struct resource *res = NULL;
2077 int nlen, plen;
2078
2079 if (index >= node->n_addrs)
2080 goto fail;
2081
2082 /* Sanity check on bus space */
2083 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2084 if (iomask & IORESOURCE_MEM)
2085 parent = &iomem_resource;
2086 else if (iomask & IORESOURCE_IO)
2087 parent = &ioport_resource;
2088 else
2089 goto fail;
2090
2091 /* Find a PCI parent if any */
2092 nd = node;
2093 pcidev = NULL;
2094 while (nd) {
2095 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2096 pcidev = pci_find_slot(pci_bus, pci_devfn);
2097 if (pcidev) break;
2098 nd = nd->parent;
2099 }
2100 if (pcidev)
2101 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2102 if (!parent) {
2103 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2104 node->name);
2105 goto fail;
2106 }
2107
2108 res = __request_region(parent, node->addrs[index].address,
2109 node->addrs[index].size, NULL);
2110 if (!res)
2111 goto fail;
2112 nlen = strlen(node->name);
2113 plen = name_postfix ? strlen(name_postfix) : 0;
2114 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2115 if (res->name) {
2116 strcpy((char *)res->name, node->name);
2117 if (plen)
2118 strcpy((char *)res->name+nlen, name_postfix);
2119 }
2120 return res;
2121fail:
2122 return NULL;
2123}
2124EXPORT_SYMBOL(request_OF_resource);
2125
2126int release_OF_resource(struct device_node *node, int index)
2127{
2128 struct pci_dev* pcidev;
2129 u8 pci_bus, pci_devfn;
2130 unsigned long iomask, start, end;
2131 struct device_node* nd;
2132 struct resource* parent;
2133 struct resource *res = NULL;
2134
2135 if (index >= node->n_addrs)
2136 return -EINVAL;
2137
2138 /* Sanity check on bus space */
2139 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2140 if (iomask & IORESOURCE_MEM)
2141 parent = &iomem_resource;
2142 else if (iomask & IORESOURCE_IO)
2143 parent = &ioport_resource;
2144 else
2145 return -EINVAL;
2146
2147 /* Find a PCI parent if any */
2148 nd = node;
2149 pcidev = NULL;
2150 while(nd) {
2151 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2152 pcidev = pci_find_slot(pci_bus, pci_devfn);
2153 if (pcidev) break;
2154 nd = nd->parent;
2155 }
2156 if (pcidev)
2157 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2158 if (!parent) {
2159 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2160 node->name);
2161 return -ENODEV;
2162 }
2163
2164 /* Find us in the parent and its childs */
2165 res = parent->child;
2166 start = node->addrs[index].address;
2167 end = start + node->addrs[index].size - 1;
2168 while (res) {
2169 if (res->start == start && res->end == end &&
2170 (res->flags & IORESOURCE_BUSY))
2171 break;
2172 if (res->start <= start && res->end >= end)
2173 res = res->child;
2174 else
2175 res = res->sibling;
2176 }
2177 if (!res)
2178 return -ENODEV;
2179
2180 if (res->name) {
2181 kfree(res->name);
2182 res->name = NULL;
2183 }
2184 release_resource(res);
2185 kfree(res);
2186
2187 return 0;
2188}
2189EXPORT_SYMBOL(release_OF_resource);
60dda256 2190#endif /* CONFIG_PCI */
This page took 0.138954 seconds and 5 git commands to generate.