powerpc: Pull out MPC106 (grackle) initialization code into its own file
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
CommitLineData
9b6b563c
PM
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
40ef8cbc
PM
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
9b6b563c
PM
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static unsigned long memory_limit;
82
83static int __initdata dt_root_addr_cells;
84static int __initdata dt_root_size_cells;
85
86#ifdef CONFIG_PPC64
87static int __initdata iommu_is_off;
88int __initdata iommu_force_on;
89extern unsigned long tce_alloc_start, tce_alloc_end;
90#endif
91
92typedef u32 cell_t;
93
94#if 0
95static struct boot_param_header *initial_boot_params __initdata;
96#else
97struct boot_param_header *initial_boot_params;
98#endif
99
100static struct device_node *allnodes = NULL;
101
102/* use when traversing tree through the allnext, child, sibling,
103 * or parent members of struct device_node.
104 */
105static DEFINE_RWLOCK(devtree_lock);
106
107/* export that to outside world */
108struct device_node *of_chosen;
109
110struct device_node *dflt_interrupt_controller;
111int num_interrupt_controllers;
112
9b6b563c
PM
113/*
114 * Wrapper for allocating memory for various data that needs to be
115 * attached to device nodes as they are processed at boot or when
116 * added to the device tree later (e.g. DLPAR). At boot there is
117 * already a region reserved so we just increment *mem_start by size;
118 * otherwise we call kmalloc.
119 */
120static void * prom_alloc(unsigned long size, unsigned long *mem_start)
121{
122 unsigned long tmp;
123
124 if (!mem_start)
125 return kmalloc(size, GFP_KERNEL);
126
127 tmp = *mem_start;
128 *mem_start += size;
129 return (void *)tmp;
130}
131
132/*
133 * Find the device_node with a given phandle.
134 */
135static struct device_node * find_phandle(phandle ph)
136{
137 struct device_node *np;
138
139 for (np = allnodes; np != 0; np = np->allnext)
140 if (np->linux_phandle == ph)
141 return np;
142 return NULL;
143}
144
145/*
146 * Find the interrupt parent of a node.
147 */
148static struct device_node * __devinit intr_parent(struct device_node *p)
149{
150 phandle *parp;
151
152 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
153 if (parp == NULL)
154 return p->parent;
155 p = find_phandle(*parp);
156 if (p != NULL)
157 return p;
158 /*
159 * On a powermac booted with BootX, we don't get to know the
160 * phandles for any nodes, so find_phandle will return NULL.
161 * Fortunately these machines only have one interrupt controller
162 * so there isn't in fact any ambiguity. -- paulus
163 */
164 if (num_interrupt_controllers == 1)
165 p = dflt_interrupt_controller;
166 return p;
167}
168
169/*
170 * Find out the size of each entry of the interrupts property
171 * for a node.
172 */
173int __devinit prom_n_intr_cells(struct device_node *np)
174{
175 struct device_node *p;
176 unsigned int *icp;
177
178 for (p = np; (p = intr_parent(p)) != NULL; ) {
179 icp = (unsigned int *)
180 get_property(p, "#interrupt-cells", NULL);
181 if (icp != NULL)
182 return *icp;
183 if (get_property(p, "interrupt-controller", NULL) != NULL
184 || get_property(p, "interrupt-map", NULL) != NULL) {
185 printk("oops, node %s doesn't have #interrupt-cells\n",
186 p->full_name);
187 return 1;
188 }
189 }
190#ifdef DEBUG_IRQ
191 printk("prom_n_intr_cells failed for %s\n", np->full_name);
192#endif
193 return 1;
194}
195
196/*
197 * Map an interrupt from a device up to the platform interrupt
198 * descriptor.
199 */
200static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
201 struct device_node *np, unsigned int *ints,
202 int nintrc)
203{
204 struct device_node *p, *ipar;
205 unsigned int *imap, *imask, *ip;
206 int i, imaplen, match;
207 int newintrc = 0, newaddrc = 0;
208 unsigned int *reg;
209 int naddrc;
210
211 reg = (unsigned int *) get_property(np, "reg", NULL);
212 naddrc = prom_n_addr_cells(np);
213 p = intr_parent(np);
214 while (p != NULL) {
215 if (get_property(p, "interrupt-controller", NULL) != NULL)
216 /* this node is an interrupt controller, stop here */
217 break;
218 imap = (unsigned int *)
219 get_property(p, "interrupt-map", &imaplen);
220 if (imap == NULL) {
221 p = intr_parent(p);
222 continue;
223 }
224 imask = (unsigned int *)
225 get_property(p, "interrupt-map-mask", NULL);
226 if (imask == NULL) {
227 printk("oops, %s has interrupt-map but no mask\n",
228 p->full_name);
229 return 0;
230 }
231 imaplen /= sizeof(unsigned int);
232 match = 0;
233 ipar = NULL;
234 while (imaplen > 0 && !match) {
235 /* check the child-interrupt field */
236 match = 1;
237 for (i = 0; i < naddrc && match; ++i)
238 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
239 for (; i < naddrc + nintrc && match; ++i)
240 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
241 imap += naddrc + nintrc;
242 imaplen -= naddrc + nintrc;
243 /* grab the interrupt parent */
244 ipar = find_phandle((phandle) *imap++);
245 --imaplen;
246 if (ipar == NULL && num_interrupt_controllers == 1)
247 /* cope with BootX not giving us phandles */
248 ipar = dflt_interrupt_controller;
249 if (ipar == NULL) {
250 printk("oops, no int parent %x in map of %s\n",
251 imap[-1], p->full_name);
252 return 0;
253 }
254 /* find the parent's # addr and intr cells */
255 ip = (unsigned int *)
256 get_property(ipar, "#interrupt-cells", NULL);
257 if (ip == NULL) {
258 printk("oops, no #interrupt-cells on %s\n",
259 ipar->full_name);
260 return 0;
261 }
262 newintrc = *ip;
263 ip = (unsigned int *)
264 get_property(ipar, "#address-cells", NULL);
265 newaddrc = (ip == NULL)? 0: *ip;
266 imap += newaddrc + newintrc;
267 imaplen -= newaddrc + newintrc;
268 }
269 if (imaplen < 0) {
270 printk("oops, error decoding int-map on %s, len=%d\n",
271 p->full_name, imaplen);
272 return 0;
273 }
274 if (!match) {
275#ifdef DEBUG_IRQ
276 printk("oops, no match in %s int-map for %s\n",
277 p->full_name, np->full_name);
278#endif
279 return 0;
280 }
281 p = ipar;
282 naddrc = newaddrc;
283 nintrc = newintrc;
284 ints = imap - nintrc;
285 reg = ints - naddrc;
286 }
287 if (p == NULL) {
288#ifdef DEBUG_IRQ
289 printk("hmmm, int tree for %s doesn't have ctrler\n",
290 np->full_name);
291#endif
292 return 0;
293 }
294 *irq = ints;
295 *ictrler = p;
296 return nintrc;
297}
298
299static int __devinit finish_node_interrupts(struct device_node *np,
300 unsigned long *mem_start,
301 int measure_only)
302{
303 unsigned int *ints;
304 int intlen, intrcells, intrcount;
305 int i, j, n;
306 unsigned int *irq, virq;
307 struct device_node *ic;
308
a575b807
PM
309 if (num_interrupt_controllers == 0) {
310 /*
311 * Old machines just have a list of interrupt numbers
312 * and no interrupt-controller nodes.
313 */
314 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
315 &intlen);
316 /* XXX old interpret_pci_props looked in parent too */
317 /* XXX old interpret_macio_props looked for interrupts
318 before AAPL,interrupts */
319 if (ints == NULL)
320 ints = (unsigned int *) get_property(np, "interrupts",
321 &intlen);
322 if (ints == NULL)
323 return 0;
324
325 np->n_intrs = intlen / sizeof(unsigned int);
326 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
327 mem_start);
328 if (!np->intrs)
329 return -ENOMEM;
330 if (measure_only)
331 return 0;
332
333 for (i = 0; i < np->n_intrs; ++i) {
334 np->intrs[i].line = *ints++;
335 np->intrs[i].sense = 1;
336 }
337 return 0;
338 }
339
9b6b563c
PM
340 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
341 if (ints == NULL)
342 return 0;
343 intrcells = prom_n_intr_cells(np);
344 intlen /= intrcells * sizeof(unsigned int);
345
346 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
347 if (!np->intrs)
348 return -ENOMEM;
349
350 if (measure_only)
351 return 0;
352
353 intrcount = 0;
354 for (i = 0; i < intlen; ++i, ints += intrcells) {
355 n = map_interrupt(&irq, &ic, np, ints, intrcells);
356 if (n <= 0)
357 continue;
358
359 /* don't map IRQ numbers under a cascaded 8259 controller */
360 if (ic && device_is_compatible(ic, "chrp,iic")) {
361 np->intrs[intrcount].line = irq[0];
362 } else {
363#ifdef CONFIG_PPC64
364 virq = virt_irq_create_mapping(irq[0]);
365 if (virq == NO_IRQ) {
366 printk(KERN_CRIT "Could not allocate interrupt"
367 " number for %s\n", np->full_name);
368 continue;
369 }
370 virq = irq_offset_up(virq);
371#else
372 virq = irq[0];
373#endif
374 np->intrs[intrcount].line = virq;
375 }
376
377#ifdef CONFIG_PPC64
378 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
379 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
380 char *name = get_property(ic->parent, "name", NULL);
381 if (name && !strcmp(name, "u3"))
382 np->intrs[intrcount].line += 128;
383 else if (!(name && !strcmp(name, "mac-io")))
384 /* ignore other cascaded controllers, such as
385 the k2-sata-root */
386 break;
387 }
388#endif
389 np->intrs[intrcount].sense = 1;
390 if (n > 1)
391 np->intrs[intrcount].sense = irq[1];
392 if (n > 2) {
393 printk("hmmm, got %d intr cells for %s:", n,
394 np->full_name);
395 for (j = 0; j < n; ++j)
396 printk(" %d", irq[j]);
397 printk("\n");
398 }
399 ++intrcount;
400 }
401 np->n_intrs = intrcount;
402
403 return 0;
404}
405
406static int __devinit interpret_pci_props(struct device_node *np,
407 unsigned long *mem_start,
408 int naddrc, int nsizec,
409 int measure_only)
410{
411 struct address_range *adr;
412 struct pci_reg_property *pci_addrs;
413 int i, l, n_addrs;
414
415 pci_addrs = (struct pci_reg_property *)
416 get_property(np, "assigned-addresses", &l);
417 if (!pci_addrs)
418 return 0;
419
420 n_addrs = l / sizeof(*pci_addrs);
421
422 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
423 if (!adr)
424 return -ENOMEM;
425
426 if (measure_only)
427 return 0;
428
429 np->addrs = adr;
430 np->n_addrs = n_addrs;
431
432 for (i = 0; i < n_addrs; i++) {
433 adr[i].space = pci_addrs[i].addr.a_hi;
434 adr[i].address = pci_addrs[i].addr.a_lo |
435 ((u64)pci_addrs[i].addr.a_mid << 32);
436 adr[i].size = pci_addrs[i].size_lo;
437 }
438
439 return 0;
440}
441
442static int __init interpret_dbdma_props(struct device_node *np,
443 unsigned long *mem_start,
444 int naddrc, int nsizec,
445 int measure_only)
446{
447 struct reg_property32 *rp;
448 struct address_range *adr;
449 unsigned long base_address;
450 int i, l;
451 struct device_node *db;
452
453 base_address = 0;
454 if (!measure_only) {
455 for (db = np->parent; db != NULL; db = db->parent) {
456 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
457 base_address = db->addrs[0].address;
458 break;
459 }
460 }
461 }
462
463 rp = (struct reg_property32 *) get_property(np, "reg", &l);
464 if (rp != 0 && l >= sizeof(struct reg_property32)) {
465 i = 0;
466 adr = (struct address_range *) (*mem_start);
467 while ((l -= sizeof(struct reg_property32)) >= 0) {
468 if (!measure_only) {
469 adr[i].space = 2;
470 adr[i].address = rp[i].address + base_address;
471 adr[i].size = rp[i].size;
472 }
473 ++i;
474 }
475 np->addrs = adr;
476 np->n_addrs = i;
477 (*mem_start) += i * sizeof(struct address_range);
478 }
479
480 return 0;
481}
482
483static int __init interpret_macio_props(struct device_node *np,
484 unsigned long *mem_start,
485 int naddrc, int nsizec,
486 int measure_only)
487{
488 struct reg_property32 *rp;
489 struct address_range *adr;
490 unsigned long base_address;
491 int i, l;
492 struct device_node *db;
493
494 base_address = 0;
495 if (!measure_only) {
496 for (db = np->parent; db != NULL; db = db->parent) {
497 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
498 base_address = db->addrs[0].address;
499 break;
500 }
501 }
502 }
503
504 rp = (struct reg_property32 *) get_property(np, "reg", &l);
505 if (rp != 0 && l >= sizeof(struct reg_property32)) {
506 i = 0;
507 adr = (struct address_range *) (*mem_start);
508 while ((l -= sizeof(struct reg_property32)) >= 0) {
509 if (!measure_only) {
510 adr[i].space = 2;
511 adr[i].address = rp[i].address + base_address;
512 adr[i].size = rp[i].size;
513 }
514 ++i;
515 }
516 np->addrs = adr;
517 np->n_addrs = i;
518 (*mem_start) += i * sizeof(struct address_range);
519 }
520
521 return 0;
522}
523
524static int __init interpret_isa_props(struct device_node *np,
525 unsigned long *mem_start,
526 int naddrc, int nsizec,
527 int measure_only)
528{
529 struct isa_reg_property *rp;
530 struct address_range *adr;
531 int i, l;
532
533 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
534 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
535 i = 0;
536 adr = (struct address_range *) (*mem_start);
537 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
538 if (!measure_only) {
539 adr[i].space = rp[i].space;
540 adr[i].address = rp[i].address;
541 adr[i].size = rp[i].size;
542 }
543 ++i;
544 }
545 np->addrs = adr;
546 np->n_addrs = i;
547 (*mem_start) += i * sizeof(struct address_range);
548 }
549
550 return 0;
551}
552
553static int __init interpret_root_props(struct device_node *np,
554 unsigned long *mem_start,
555 int naddrc, int nsizec,
556 int measure_only)
557{
558 struct address_range *adr;
559 int i, l;
560 unsigned int *rp;
561 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
562
563 rp = (unsigned int *) get_property(np, "reg", &l);
564 if (rp != 0 && l >= rpsize) {
565 i = 0;
566 adr = (struct address_range *) (*mem_start);
567 while ((l -= rpsize) >= 0) {
568 if (!measure_only) {
569 adr[i].space = 0;
570 adr[i].address = rp[naddrc - 1];
571 adr[i].size = rp[naddrc + nsizec - 1];
572 }
573 ++i;
574 rp += naddrc + nsizec;
575 }
576 np->addrs = adr;
577 np->n_addrs = i;
578 (*mem_start) += i * sizeof(struct address_range);
579 }
580
581 return 0;
582}
583
584static int __devinit finish_node(struct device_node *np,
585 unsigned long *mem_start,
586 interpret_func *ifunc,
587 int naddrc, int nsizec,
588 int measure_only)
589{
590 struct device_node *child;
591 int *ip, rc = 0;
592
593 /* get the device addresses and interrupts */
594 if (ifunc != NULL)
595 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
596 if (rc)
597 goto out;
598
599 rc = finish_node_interrupts(np, mem_start, measure_only);
600 if (rc)
601 goto out;
602
603 /* Look for #address-cells and #size-cells properties. */
604 ip = (int *) get_property(np, "#address-cells", NULL);
605 if (ip != NULL)
606 naddrc = *ip;
607 ip = (int *) get_property(np, "#size-cells", NULL);
608 if (ip != NULL)
609 nsizec = *ip;
610
611 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
612 ifunc = interpret_root_props;
613 else if (np->type == 0)
614 ifunc = NULL;
615 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
616 ifunc = interpret_pci_props;
617 else if (!strcmp(np->type, "dbdma"))
618 ifunc = interpret_dbdma_props;
619 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
620 ifunc = interpret_macio_props;
621 else if (!strcmp(np->type, "isa"))
622 ifunc = interpret_isa_props;
623 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
624 ifunc = interpret_root_props;
625 else if (!((ifunc == interpret_dbdma_props
626 || ifunc == interpret_macio_props)
627 && (!strcmp(np->type, "escc")
628 || !strcmp(np->type, "media-bay"))))
629 ifunc = NULL;
630
631 for (child = np->child; child != NULL; child = child->sibling) {
632 rc = finish_node(child, mem_start, ifunc,
633 naddrc, nsizec, measure_only);
634 if (rc)
635 goto out;
636 }
637out:
638 return rc;
639}
640
641static void __init scan_interrupt_controllers(void)
642{
643 struct device_node *np;
644 int n = 0;
645 char *name, *ic;
646 int iclen;
647
648 for (np = allnodes; np != NULL; np = np->allnext) {
649 ic = get_property(np, "interrupt-controller", &iclen);
650 name = get_property(np, "name", NULL);
651 /* checking iclen makes sure we don't get a false
652 match on /chosen.interrupt_controller */
653 if ((name != NULL
654 && strcmp(name, "interrupt-controller") == 0)
655 || (ic != NULL && iclen == 0
656 && strcmp(name, "AppleKiwi"))) {
657 if (n == 0)
658 dflt_interrupt_controller = np;
659 ++n;
660 }
661 }
662 num_interrupt_controllers = n;
663}
664
665/**
666 * finish_device_tree is called once things are running normally
667 * (i.e. with text and data mapped to the address they were linked at).
668 * It traverses the device tree and fills in some of the additional,
669 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
670 * mapping is also initialized at this point.
671 */
672void __init finish_device_tree(void)
673{
674 unsigned long start, end, size = 0;
675
676 DBG(" -> finish_device_tree\n");
677
678#ifdef CONFIG_PPC64
679 /* Initialize virtual IRQ map */
680 virt_irq_init();
681#endif
682 scan_interrupt_controllers();
683
684 /*
685 * Finish device-tree (pre-parsing some properties etc...)
686 * We do this in 2 passes. One with "measure_only" set, which
687 * will only measure the amount of memory needed, then we can
688 * allocate that memory, and call finish_node again. However,
689 * we must be careful as most routines will fail nowadays when
690 * prom_alloc() returns 0, so we must make sure our first pass
691 * doesn't start at 0. We pre-initialize size to 16 for that
692 * reason and then remove those additional 16 bytes
693 */
694 size = 16;
695 finish_node(allnodes, &size, NULL, 0, 0, 1);
696 size -= 16;
697 end = start = (unsigned long) __va(lmb_alloc(size, 128));
698 finish_node(allnodes, &end, NULL, 0, 0, 0);
699 BUG_ON(end != start + size);
700
701 DBG(" <- finish_device_tree\n");
702}
703
704static inline char *find_flat_dt_string(u32 offset)
705{
706 return ((char *)initial_boot_params) +
707 initial_boot_params->off_dt_strings + offset;
708}
709
710/**
711 * This function is used to scan the flattened device-tree, it is
712 * used to extract the memory informations at boot before we can
713 * unflatten the tree
714 */
715static int __init scan_flat_dt(int (*it)(unsigned long node,
716 const char *uname, int depth,
717 void *data),
718 void *data)
719{
720 unsigned long p = ((unsigned long)initial_boot_params) +
721 initial_boot_params->off_dt_struct;
722 int rc = 0;
723 int depth = -1;
724
725 do {
726 u32 tag = *((u32 *)p);
727 char *pathp;
728
729 p += 4;
730 if (tag == OF_DT_END_NODE) {
731 depth --;
732 continue;
733 }
734 if (tag == OF_DT_NOP)
735 continue;
736 if (tag == OF_DT_END)
737 break;
738 if (tag == OF_DT_PROP) {
739 u32 sz = *((u32 *)p);
740 p += 8;
741 if (initial_boot_params->version < 0x10)
742 p = _ALIGN(p, sz >= 8 ? 8 : 4);
743 p += sz;
744 p = _ALIGN(p, 4);
745 continue;
746 }
747 if (tag != OF_DT_BEGIN_NODE) {
748 printk(KERN_WARNING "Invalid tag %x scanning flattened"
749 " device tree !\n", tag);
750 return -EINVAL;
751 }
752 depth++;
753 pathp = (char *)p;
754 p = _ALIGN(p + strlen(pathp) + 1, 4);
755 if ((*pathp) == '/') {
756 char *lp, *np;
757 for (lp = NULL, np = pathp; *np; np++)
758 if ((*np) == '/')
759 lp = np+1;
760 if (lp != NULL)
761 pathp = lp;
762 }
763 rc = it(p, pathp, depth, data);
764 if (rc != 0)
765 break;
766 } while(1);
767
768 return rc;
769}
770
771/**
772 * This function can be used within scan_flattened_dt callback to get
773 * access to properties
774 */
775static void* __init get_flat_dt_prop(unsigned long node, const char *name,
776 unsigned long *size)
777{
778 unsigned long p = node;
779
780 do {
781 u32 tag = *((u32 *)p);
782 u32 sz, noff;
783 const char *nstr;
784
785 p += 4;
786 if (tag == OF_DT_NOP)
787 continue;
788 if (tag != OF_DT_PROP)
789 return NULL;
790
791 sz = *((u32 *)p);
792 noff = *((u32 *)(p + 4));
793 p += 8;
794 if (initial_boot_params->version < 0x10)
795 p = _ALIGN(p, sz >= 8 ? 8 : 4);
796
797 nstr = find_flat_dt_string(noff);
798 if (nstr == NULL) {
799 printk(KERN_WARNING "Can't find property index"
800 " name !\n");
801 return NULL;
802 }
803 if (strcmp(name, nstr) == 0) {
804 if (size)
805 *size = sz;
806 return (void *)p;
807 }
808 p += sz;
809 p = _ALIGN(p, 4);
810 } while(1);
811}
812
813static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
814 unsigned long align)
815{
816 void *res;
817
818 *mem = _ALIGN(*mem, align);
819 res = (void *)*mem;
820 *mem += size;
821
822 return res;
823}
824
825static unsigned long __init unflatten_dt_node(unsigned long mem,
826 unsigned long *p,
827 struct device_node *dad,
828 struct device_node ***allnextpp,
829 unsigned long fpsize)
830{
831 struct device_node *np;
832 struct property *pp, **prev_pp = NULL;
833 char *pathp;
834 u32 tag;
835 unsigned int l, allocl;
836 int has_name = 0;
837 int new_format = 0;
838
839 tag = *((u32 *)(*p));
840 if (tag != OF_DT_BEGIN_NODE) {
841 printk("Weird tag at start of node: %x\n", tag);
842 return mem;
843 }
844 *p += 4;
845 pathp = (char *)*p;
846 l = allocl = strlen(pathp) + 1;
847 *p = _ALIGN(*p + l, 4);
848
849 /* version 0x10 has a more compact unit name here instead of the full
850 * path. we accumulate the full path size using "fpsize", we'll rebuild
851 * it later. We detect this because the first character of the name is
852 * not '/'.
853 */
854 if ((*pathp) != '/') {
855 new_format = 1;
856 if (fpsize == 0) {
857 /* root node: special case. fpsize accounts for path
858 * plus terminating zero. root node only has '/', so
859 * fpsize should be 2, but we want to avoid the first
860 * level nodes to have two '/' so we use fpsize 1 here
861 */
862 fpsize = 1;
863 allocl = 2;
864 } else {
865 /* account for '/' and path size minus terminal 0
866 * already in 'l'
867 */
868 fpsize += l;
869 allocl = fpsize;
870 }
871 }
872
873
874 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
875 __alignof__(struct device_node));
876 if (allnextpp) {
877 memset(np, 0, sizeof(*np));
878 np->full_name = ((char*)np) + sizeof(struct device_node);
879 if (new_format) {
880 char *p = np->full_name;
881 /* rebuild full path for new format */
882 if (dad && dad->parent) {
883 strcpy(p, dad->full_name);
884#ifdef DEBUG
885 if ((strlen(p) + l + 1) != allocl) {
886 DBG("%s: p: %d, l: %d, a: %d\n",
887 pathp, strlen(p), l, allocl);
888 }
889#endif
890 p += strlen(p);
891 }
892 *(p++) = '/';
893 memcpy(p, pathp, l);
894 } else
895 memcpy(np->full_name, pathp, l);
896 prev_pp = &np->properties;
897 **allnextpp = np;
898 *allnextpp = &np->allnext;
899 if (dad != NULL) {
900 np->parent = dad;
901 /* we temporarily use the next field as `last_child'*/
902 if (dad->next == 0)
903 dad->child = np;
904 else
905 dad->next->sibling = np;
906 dad->next = np;
907 }
908 kref_init(&np->kref);
909 }
910 while(1) {
911 u32 sz, noff;
912 char *pname;
913
914 tag = *((u32 *)(*p));
915 if (tag == OF_DT_NOP) {
916 *p += 4;
917 continue;
918 }
919 if (tag != OF_DT_PROP)
920 break;
921 *p += 4;
922 sz = *((u32 *)(*p));
923 noff = *((u32 *)((*p) + 4));
924 *p += 8;
925 if (initial_boot_params->version < 0x10)
926 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
927
928 pname = find_flat_dt_string(noff);
929 if (pname == NULL) {
930 printk("Can't find property name in list !\n");
931 break;
932 }
933 if (strcmp(pname, "name") == 0)
934 has_name = 1;
935 l = strlen(pname) + 1;
936 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
937 __alignof__(struct property));
938 if (allnextpp) {
939 if (strcmp(pname, "linux,phandle") == 0) {
940 np->node = *((u32 *)*p);
941 if (np->linux_phandle == 0)
942 np->linux_phandle = np->node;
943 }
944 if (strcmp(pname, "ibm,phandle") == 0)
945 np->linux_phandle = *((u32 *)*p);
946 pp->name = pname;
947 pp->length = sz;
948 pp->value = (void *)*p;
949 *prev_pp = pp;
950 prev_pp = &pp->next;
951 }
952 *p = _ALIGN((*p) + sz, 4);
953 }
954 /* with version 0x10 we may not have the name property, recreate
955 * it here from the unit name if absent
956 */
957 if (!has_name) {
958 char *p = pathp, *ps = pathp, *pa = NULL;
959 int sz;
960
961 while (*p) {
962 if ((*p) == '@')
963 pa = p;
964 if ((*p) == '/')
965 ps = p + 1;
966 p++;
967 }
968 if (pa < ps)
969 pa = p;
970 sz = (pa - ps) + 1;
971 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
972 __alignof__(struct property));
973 if (allnextpp) {
974 pp->name = "name";
975 pp->length = sz;
976 pp->value = (unsigned char *)(pp + 1);
977 *prev_pp = pp;
978 prev_pp = &pp->next;
979 memcpy(pp->value, ps, sz - 1);
980 ((char *)pp->value)[sz - 1] = 0;
981 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
982 }
983 }
984 if (allnextpp) {
985 *prev_pp = NULL;
986 np->name = get_property(np, "name", NULL);
987 np->type = get_property(np, "device_type", NULL);
988
989 if (!np->name)
990 np->name = "<NULL>";
991 if (!np->type)
992 np->type = "<NULL>";
993 }
994 while (tag == OF_DT_BEGIN_NODE) {
995 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
996 tag = *((u32 *)(*p));
997 }
998 if (tag != OF_DT_END_NODE) {
999 printk("Weird tag at end of node: %x\n", tag);
1000 return mem;
1001 }
1002 *p += 4;
1003 return mem;
1004}
1005
1006
1007/**
1008 * unflattens the device-tree passed by the firmware, creating the
1009 * tree of struct device_node. It also fills the "name" and "type"
1010 * pointers of the nodes so the normal device-tree walking functions
1011 * can be used (this used to be done by finish_device_tree)
1012 */
1013void __init unflatten_device_tree(void)
1014{
1015 unsigned long start, mem, size;
1016 struct device_node **allnextp = &allnodes;
1017 char *p = NULL;
1018 int l = 0;
1019
1020 DBG(" -> unflatten_device_tree()\n");
1021
1022 /* First pass, scan for size */
1023 start = ((unsigned long)initial_boot_params) +
1024 initial_boot_params->off_dt_struct;
1025 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1026 size = (size | 3) + 1;
1027
1028 DBG(" size is %lx, allocating...\n", size);
1029
1030 /* Allocate memory for the expanded device tree */
1031 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1032 if (!mem) {
1033 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1034 panic("Couldn't allocate memory with lmb_alloc()!\n");
1035 }
1036 mem = (unsigned long) __va(mem);
1037
1038 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1039
1040 DBG(" unflattening %lx...\n", mem);
1041
1042 /* Second pass, do actual unflattening */
1043 start = ((unsigned long)initial_boot_params) +
1044 initial_boot_params->off_dt_struct;
1045 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1046 if (*((u32 *)start) != OF_DT_END)
1047 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1048 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1049 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1050 ((u32 *)mem)[size / 4] );
1051 *allnextp = NULL;
1052
1053 /* Get pointer to OF "/chosen" node for use everywhere */
1054 of_chosen = of_find_node_by_path("/chosen");
a575b807
PM
1055 if (of_chosen == NULL)
1056 of_chosen = of_find_node_by_path("/chosen@0");
9b6b563c
PM
1057
1058 /* Retreive command line */
1059 if (of_chosen != NULL) {
1060 p = (char *)get_property(of_chosen, "bootargs", &l);
1061 if (p != NULL && l > 0)
1062 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1063 }
1064#ifdef CONFIG_CMDLINE
1065 if (l == 0 || (l == 1 && (*p) == 0))
1066 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1067#endif /* CONFIG_CMDLINE */
1068
1069 DBG("Command line is: %s\n", cmd_line);
1070
1071 DBG(" <- unflatten_device_tree()\n");
1072}
1073
1074
1075static int __init early_init_dt_scan_cpus(unsigned long node,
1076 const char *uname, int depth, void *data)
1077{
1078 char *type = get_flat_dt_prop(node, "device_type", NULL);
1079 u32 *prop;
1080 unsigned long size = 0;
1081
1082 /* We are scanning "cpu" nodes only */
1083 if (type == NULL || strcmp(type, "cpu") != 0)
1084 return 0;
1085
1086#ifdef CONFIG_PPC_PSERIES
1087 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1088 */
1089 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1090 u32 *pft_size;
1091 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1092 if (pft_size != NULL) {
1093 /* pft_size[0] is the NUMA CEC cookie */
1094 ppc64_pft_size = pft_size[1];
1095 }
1096 }
1097#endif
1098
1099#ifdef CONFIG_PPC64
1100 if (initial_boot_params && initial_boot_params->version >= 2) {
1101 /* version 2 of the kexec param format adds the phys cpuid
1102 * of booted proc.
1103 */
1104 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1105 boot_cpuid = 0;
1106 } else {
1107 /* Check if it's the boot-cpu, set it's hw index in paca now */
1108 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
40ef8cbc 1109 prop = get_flat_dt_prop(node, "reg", NULL);
9b6b563c
PM
1110 set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1111 boot_cpuid_phys = get_hard_smp_processor_id(0);
1112 }
1113 }
1114#endif
1115
1116#ifdef CONFIG_ALTIVEC
1117 /* Check if we have a VMX and eventually update CPU features */
1118 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1119 if (prop && (*prop) > 0) {
1120 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1121 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1122 }
1123
1124 /* Same goes for Apple's "altivec" property */
1125 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1126 if (prop) {
1127 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1128 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1129 }
1130#endif /* CONFIG_ALTIVEC */
1131
1132#ifdef CONFIG_PPC_PSERIES
1133 /*
1134 * Check for an SMT capable CPU and set the CPU feature. We do
1135 * this by looking at the size of the ibm,ppc-interrupt-server#s
1136 * property
1137 */
1138 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1139 &size);
1140 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1141 if (prop && ((size / sizeof(u32)) > 1))
1142 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1143#endif
1144
1145 return 0;
1146}
1147
1148static int __init early_init_dt_scan_chosen(unsigned long node,
1149 const char *uname, int depth, void *data)
1150{
1151 u32 *prop;
1152 unsigned long *lprop;
1153
1154 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1155
a575b807
PM
1156 if (depth != 1 ||
1157 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
9b6b563c
PM
1158 return 0;
1159
1160 /* get platform type */
1161 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1162 if (prop == NULL)
1163 return 0;
1164#ifdef CONFIG_PPC64
1165 systemcfg->platform = *prop;
1166#else
60dda256 1167#ifdef CONFIG_PPC_MULTIPLATFORM
9b6b563c
PM
1168 _machine = *prop;
1169#endif
60dda256 1170#endif
9b6b563c
PM
1171
1172#ifdef CONFIG_PPC64
1173 /* check if iommu is forced on or off */
1174 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1175 iommu_is_off = 1;
1176 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1177 iommu_force_on = 1;
1178#endif
1179
1180 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1181 if (lprop)
1182 memory_limit = *lprop;
1183
1184#ifdef CONFIG_PPC64
1185 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1186 if (lprop)
1187 tce_alloc_start = *lprop;
1188 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1189 if (lprop)
1190 tce_alloc_end = *lprop;
1191#endif
1192
1193#ifdef CONFIG_PPC_RTAS
1194 /* To help early debugging via the front panel, we retreive a minimal
1195 * set of RTAS infos now if available
1196 */
1197 {
1198 u64 *basep, *entryp;
1199
1200 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1201 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1202 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1203 if (basep && entryp && prop) {
1204 rtas.base = *basep;
1205 rtas.entry = *entryp;
1206 rtas.size = *prop;
1207 }
1208 }
1209#endif /* CONFIG_PPC_RTAS */
1210
1211 /* break now */
1212 return 1;
1213}
1214
1215static int __init early_init_dt_scan_root(unsigned long node,
1216 const char *uname, int depth, void *data)
1217{
1218 u32 *prop;
1219
1220 if (depth != 0)
1221 return 0;
1222
1223 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1224 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1225 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1226
1227 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1228 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1229 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1230
1231 /* break now */
1232 return 1;
1233}
1234
1235static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1236{
1237 cell_t *p = *cellp;
1238 unsigned long r;
1239
1240 /* Ignore more than 2 cells */
1241 while (s > sizeof(unsigned long) / 4) {
1242 p++;
1243 s--;
1244 }
1245 r = *p++;
1246#ifdef CONFIG_PPC64
1247 if (s > 1) {
1248 r <<= 32;
1249 r |= *(p++);
1250 s--;
1251 }
1252#endif
1253
1254 *cellp = p;
1255 return r;
1256}
1257
1258
1259static int __init early_init_dt_scan_memory(unsigned long node,
1260 const char *uname, int depth, void *data)
1261{
1262 char *type = get_flat_dt_prop(node, "device_type", NULL);
1263 cell_t *reg, *endp;
1264 unsigned long l;
1265
1266 /* We are scanning "memory" nodes only */
1267 if (type == NULL || strcmp(type, "memory") != 0)
1268 return 0;
1269
1270 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1271 if (reg == NULL)
1272 return 0;
1273
1274 endp = reg + (l / sizeof(cell_t));
1275
1276 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1277 uname, l, reg[0], reg[1], reg[2], reg[3]);
1278
1279 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1280 unsigned long base, size;
1281
1282 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1283 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1284
1285 if (size == 0)
1286 continue;
1287 DBG(" - %lx , %lx\n", base, size);
1288#ifdef CONFIG_PPC64
1289 if (iommu_is_off) {
1290 if (base >= 0x80000000ul)
1291 continue;
1292 if ((base + size) > 0x80000000ul)
1293 size = 0x80000000ul - base;
1294 }
1295#endif
1296 lmb_add(base, size);
1297 }
1298 return 0;
1299}
1300
1301static void __init early_reserve_mem(void)
1302{
1303 unsigned long base, size;
1304 unsigned long *reserve_map;
1305
1306 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1307 initial_boot_params->off_mem_rsvmap);
1308 while (1) {
1309 base = *(reserve_map++);
1310 size = *(reserve_map++);
1311 if (size == 0)
1312 break;
1313 DBG("reserving: %lx -> %lx\n", base, size);
1314 lmb_reserve(base, size);
1315 }
1316
1317#if 0
1318 DBG("memory reserved, lmbs :\n");
1319 lmb_dump_all();
1320#endif
1321}
1322
1323void __init early_init_devtree(void *params)
1324{
1325 DBG(" -> early_init_devtree()\n");
1326
1327 /* Setup flat device-tree pointer */
1328 initial_boot_params = params;
1329
1330 /* Retrieve various informations from the /chosen node of the
1331 * device-tree, including the platform type, initrd location and
1332 * size, TCE reserve, and more ...
1333 */
1334 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1335
1336 /* Scan memory nodes and rebuild LMBs */
1337 lmb_init();
1338 scan_flat_dt(early_init_dt_scan_root, NULL);
1339 scan_flat_dt(early_init_dt_scan_memory, NULL);
1340 lmb_enforce_memory_limit(memory_limit);
1341 lmb_analyze();
1342#ifdef CONFIG_PPC64
1343 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1344#endif
1345 lmb_reserve(0, __pa(klimit));
1346
1347 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1348
1349 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1350 early_reserve_mem();
1351
1352 DBG("Scanning CPUs ...\n");
1353
1354 /* Retreive hash table size from flattened tree plus other
1355 * CPU related informations (altivec support, boot CPU ID, ...)
1356 */
1357 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1358
9b6b563c
PM
1359 DBG(" <- early_init_devtree()\n");
1360}
1361
1362#undef printk
1363
1364int
1365prom_n_addr_cells(struct device_node* np)
1366{
1367 int* ip;
1368 do {
1369 if (np->parent)
1370 np = np->parent;
1371 ip = (int *) get_property(np, "#address-cells", NULL);
1372 if (ip != NULL)
1373 return *ip;
1374 } while (np->parent);
1375 /* No #address-cells property for the root node, default to 1 */
1376 return 1;
1377}
1378
1379int
1380prom_n_size_cells(struct device_node* np)
1381{
1382 int* ip;
1383 do {
1384 if (np->parent)
1385 np = np->parent;
1386 ip = (int *) get_property(np, "#size-cells", NULL);
1387 if (ip != NULL)
1388 return *ip;
1389 } while (np->parent);
1390 /* No #size-cells property for the root node, default to 1 */
1391 return 1;
1392}
1393
1394/**
1395 * Work out the sense (active-low level / active-high edge)
1396 * of each interrupt from the device tree.
1397 */
1398void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1399{
1400 struct device_node *np;
1401 int i, j;
1402
1403 /* default to level-triggered */
1404 memset(senses, 1, max - off);
1405
1406 for (np = allnodes; np != 0; np = np->allnext) {
1407 for (j = 0; j < np->n_intrs; j++) {
1408 i = np->intrs[j].line;
1409 if (i >= off && i < max)
1410 senses[i-off] = np->intrs[j].sense ?
1411 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1412 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1413 }
1414 }
1415}
1416
1417/**
1418 * Construct and return a list of the device_nodes with a given name.
1419 */
1420struct device_node *find_devices(const char *name)
1421{
1422 struct device_node *head, **prevp, *np;
1423
1424 prevp = &head;
1425 for (np = allnodes; np != 0; np = np->allnext) {
1426 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1427 *prevp = np;
1428 prevp = &np->next;
1429 }
1430 }
1431 *prevp = NULL;
1432 return head;
1433}
1434EXPORT_SYMBOL(find_devices);
1435
1436/**
1437 * Construct and return a list of the device_nodes with a given type.
1438 */
1439struct device_node *find_type_devices(const char *type)
1440{
1441 struct device_node *head, **prevp, *np;
1442
1443 prevp = &head;
1444 for (np = allnodes; np != 0; np = np->allnext) {
1445 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1446 *prevp = np;
1447 prevp = &np->next;
1448 }
1449 }
1450 *prevp = NULL;
1451 return head;
1452}
1453EXPORT_SYMBOL(find_type_devices);
1454
1455/**
1456 * Returns all nodes linked together
1457 */
1458struct device_node *find_all_nodes(void)
1459{
1460 struct device_node *head, **prevp, *np;
1461
1462 prevp = &head;
1463 for (np = allnodes; np != 0; np = np->allnext) {
1464 *prevp = np;
1465 prevp = &np->next;
1466 }
1467 *prevp = NULL;
1468 return head;
1469}
1470EXPORT_SYMBOL(find_all_nodes);
1471
1472/** Checks if the given "compat" string matches one of the strings in
1473 * the device's "compatible" property
1474 */
1475int device_is_compatible(struct device_node *device, const char *compat)
1476{
1477 const char* cp;
1478 int cplen, l;
1479
1480 cp = (char *) get_property(device, "compatible", &cplen);
1481 if (cp == NULL)
1482 return 0;
1483 while (cplen > 0) {
1484 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1485 return 1;
1486 l = strlen(cp) + 1;
1487 cp += l;
1488 cplen -= l;
1489 }
1490
1491 return 0;
1492}
1493EXPORT_SYMBOL(device_is_compatible);
1494
1495
1496/**
1497 * Indicates whether the root node has a given value in its
1498 * compatible property.
1499 */
1500int machine_is_compatible(const char *compat)
1501{
1502 struct device_node *root;
1503 int rc = 0;
1504
1505 root = of_find_node_by_path("/");
1506 if (root) {
1507 rc = device_is_compatible(root, compat);
1508 of_node_put(root);
1509 }
1510 return rc;
1511}
1512EXPORT_SYMBOL(machine_is_compatible);
1513
1514/**
1515 * Construct and return a list of the device_nodes with a given type
1516 * and compatible property.
1517 */
1518struct device_node *find_compatible_devices(const char *type,
1519 const char *compat)
1520{
1521 struct device_node *head, **prevp, *np;
1522
1523 prevp = &head;
1524 for (np = allnodes; np != 0; np = np->allnext) {
1525 if (type != NULL
1526 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1527 continue;
1528 if (device_is_compatible(np, compat)) {
1529 *prevp = np;
1530 prevp = &np->next;
1531 }
1532 }
1533 *prevp = NULL;
1534 return head;
1535}
1536EXPORT_SYMBOL(find_compatible_devices);
1537
1538/**
1539 * Find the device_node with a given full_name.
1540 */
1541struct device_node *find_path_device(const char *path)
1542{
1543 struct device_node *np;
1544
1545 for (np = allnodes; np != 0; np = np->allnext)
1546 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1547 return np;
1548 return NULL;
1549}
1550EXPORT_SYMBOL(find_path_device);
1551
1552/*******
1553 *
1554 * New implementation of the OF "find" APIs, return a refcounted
1555 * object, call of_node_put() when done. The device tree and list
1556 * are protected by a rw_lock.
1557 *
1558 * Note that property management will need some locking as well,
1559 * this isn't dealt with yet.
1560 *
1561 *******/
1562
1563/**
1564 * of_find_node_by_name - Find a node by its "name" property
1565 * @from: The node to start searching from or NULL, the node
1566 * you pass will not be searched, only the next one
1567 * will; typically, you pass what the previous call
1568 * returned. of_node_put() will be called on it
1569 * @name: The name string to match against
1570 *
1571 * Returns a node pointer with refcount incremented, use
1572 * of_node_put() on it when done.
1573 */
1574struct device_node *of_find_node_by_name(struct device_node *from,
1575 const char *name)
1576{
1577 struct device_node *np;
1578
1579 read_lock(&devtree_lock);
1580 np = from ? from->allnext : allnodes;
1581 for (; np != 0; np = np->allnext)
1582 if (np->name != 0 && strcasecmp(np->name, name) == 0
1583 && of_node_get(np))
1584 break;
1585 if (from)
1586 of_node_put(from);
1587 read_unlock(&devtree_lock);
1588 return np;
1589}
1590EXPORT_SYMBOL(of_find_node_by_name);
1591
1592/**
1593 * of_find_node_by_type - Find a node by its "device_type" property
1594 * @from: The node to start searching from or NULL, the node
1595 * you pass will not be searched, only the next one
1596 * will; typically, you pass what the previous call
1597 * returned. of_node_put() will be called on it
1598 * @name: The type string to match against
1599 *
1600 * Returns a node pointer with refcount incremented, use
1601 * of_node_put() on it when done.
1602 */
1603struct device_node *of_find_node_by_type(struct device_node *from,
1604 const char *type)
1605{
1606 struct device_node *np;
1607
1608 read_lock(&devtree_lock);
1609 np = from ? from->allnext : allnodes;
1610 for (; np != 0; np = np->allnext)
1611 if (np->type != 0 && strcasecmp(np->type, type) == 0
1612 && of_node_get(np))
1613 break;
1614 if (from)
1615 of_node_put(from);
1616 read_unlock(&devtree_lock);
1617 return np;
1618}
1619EXPORT_SYMBOL(of_find_node_by_type);
1620
1621/**
1622 * of_find_compatible_node - Find a node based on type and one of the
1623 * tokens in its "compatible" property
1624 * @from: The node to start searching from or NULL, the node
1625 * you pass will not be searched, only the next one
1626 * will; typically, you pass what the previous call
1627 * returned. of_node_put() will be called on it
1628 * @type: The type string to match "device_type" or NULL to ignore
1629 * @compatible: The string to match to one of the tokens in the device
1630 * "compatible" list.
1631 *
1632 * Returns a node pointer with refcount incremented, use
1633 * of_node_put() on it when done.
1634 */
1635struct device_node *of_find_compatible_node(struct device_node *from,
1636 const char *type, const char *compatible)
1637{
1638 struct device_node *np;
1639
1640 read_lock(&devtree_lock);
1641 np = from ? from->allnext : allnodes;
1642 for (; np != 0; np = np->allnext) {
1643 if (type != NULL
1644 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1645 continue;
1646 if (device_is_compatible(np, compatible) && of_node_get(np))
1647 break;
1648 }
1649 if (from)
1650 of_node_put(from);
1651 read_unlock(&devtree_lock);
1652 return np;
1653}
1654EXPORT_SYMBOL(of_find_compatible_node);
1655
1656/**
1657 * of_find_node_by_path - Find a node matching a full OF path
1658 * @path: The full path to match
1659 *
1660 * Returns a node pointer with refcount incremented, use
1661 * of_node_put() on it when done.
1662 */
1663struct device_node *of_find_node_by_path(const char *path)
1664{
1665 struct device_node *np = allnodes;
1666
1667 read_lock(&devtree_lock);
1668 for (; np != 0; np = np->allnext) {
1669 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1670 && of_node_get(np))
1671 break;
1672 }
1673 read_unlock(&devtree_lock);
1674 return np;
1675}
1676EXPORT_SYMBOL(of_find_node_by_path);
1677
1678/**
1679 * of_find_node_by_phandle - Find a node given a phandle
1680 * @handle: phandle of the node to find
1681 *
1682 * Returns a node pointer with refcount incremented, use
1683 * of_node_put() on it when done.
1684 */
1685struct device_node *of_find_node_by_phandle(phandle handle)
1686{
1687 struct device_node *np;
1688
1689 read_lock(&devtree_lock);
1690 for (np = allnodes; np != 0; np = np->allnext)
1691 if (np->linux_phandle == handle)
1692 break;
1693 if (np)
1694 of_node_get(np);
1695 read_unlock(&devtree_lock);
1696 return np;
1697}
1698EXPORT_SYMBOL(of_find_node_by_phandle);
1699
1700/**
1701 * of_find_all_nodes - Get next node in global list
1702 * @prev: Previous node or NULL to start iteration
1703 * of_node_put() will be called on it
1704 *
1705 * Returns a node pointer with refcount incremented, use
1706 * of_node_put() on it when done.
1707 */
1708struct device_node *of_find_all_nodes(struct device_node *prev)
1709{
1710 struct device_node *np;
1711
1712 read_lock(&devtree_lock);
1713 np = prev ? prev->allnext : allnodes;
1714 for (; np != 0; np = np->allnext)
1715 if (of_node_get(np))
1716 break;
1717 if (prev)
1718 of_node_put(prev);
1719 read_unlock(&devtree_lock);
1720 return np;
1721}
1722EXPORT_SYMBOL(of_find_all_nodes);
1723
1724/**
1725 * of_get_parent - Get a node's parent if any
1726 * @node: Node to get parent
1727 *
1728 * Returns a node pointer with refcount incremented, use
1729 * of_node_put() on it when done.
1730 */
1731struct device_node *of_get_parent(const struct device_node *node)
1732{
1733 struct device_node *np;
1734
1735 if (!node)
1736 return NULL;
1737
1738 read_lock(&devtree_lock);
1739 np = of_node_get(node->parent);
1740 read_unlock(&devtree_lock);
1741 return np;
1742}
1743EXPORT_SYMBOL(of_get_parent);
1744
1745/**
1746 * of_get_next_child - Iterate a node childs
1747 * @node: parent node
1748 * @prev: previous child of the parent node, or NULL to get first
1749 *
1750 * Returns a node pointer with refcount incremented, use
1751 * of_node_put() on it when done.
1752 */
1753struct device_node *of_get_next_child(const struct device_node *node,
1754 struct device_node *prev)
1755{
1756 struct device_node *next;
1757
1758 read_lock(&devtree_lock);
1759 next = prev ? prev->sibling : node->child;
1760 for (; next != 0; next = next->sibling)
1761 if (of_node_get(next))
1762 break;
1763 if (prev)
1764 of_node_put(prev);
1765 read_unlock(&devtree_lock);
1766 return next;
1767}
1768EXPORT_SYMBOL(of_get_next_child);
1769
1770/**
1771 * of_node_get - Increment refcount of a node
1772 * @node: Node to inc refcount, NULL is supported to
1773 * simplify writing of callers
1774 *
1775 * Returns node.
1776 */
1777struct device_node *of_node_get(struct device_node *node)
1778{
1779 if (node)
1780 kref_get(&node->kref);
1781 return node;
1782}
1783EXPORT_SYMBOL(of_node_get);
1784
1785static inline struct device_node * kref_to_device_node(struct kref *kref)
1786{
1787 return container_of(kref, struct device_node, kref);
1788}
1789
1790/**
1791 * of_node_release - release a dynamically allocated node
1792 * @kref: kref element of the node to be released
1793 *
1794 * In of_node_put() this function is passed to kref_put()
1795 * as the destructor.
1796 */
1797static void of_node_release(struct kref *kref)
1798{
1799 struct device_node *node = kref_to_device_node(kref);
1800 struct property *prop = node->properties;
1801
1802 if (!OF_IS_DYNAMIC(node))
1803 return;
1804 while (prop) {
1805 struct property *next = prop->next;
1806 kfree(prop->name);
1807 kfree(prop->value);
1808 kfree(prop);
1809 prop = next;
1810 }
1811 kfree(node->intrs);
1812 kfree(node->addrs);
1813 kfree(node->full_name);
1814 kfree(node->data);
1815 kfree(node);
1816}
1817
1818/**
1819 * of_node_put - Decrement refcount of a node
1820 * @node: Node to dec refcount, NULL is supported to
1821 * simplify writing of callers
1822 *
1823 */
1824void of_node_put(struct device_node *node)
1825{
1826 if (node)
1827 kref_put(&node->kref, of_node_release);
1828}
1829EXPORT_SYMBOL(of_node_put);
1830
1831/*
1832 * Plug a device node into the tree and global list.
1833 */
1834void of_attach_node(struct device_node *np)
1835{
1836 write_lock(&devtree_lock);
1837 np->sibling = np->parent->child;
1838 np->allnext = allnodes;
1839 np->parent->child = np;
1840 allnodes = np;
1841 write_unlock(&devtree_lock);
1842}
1843
1844/*
1845 * "Unplug" a node from the device tree. The caller must hold
1846 * a reference to the node. The memory associated with the node
1847 * is not freed until its refcount goes to zero.
1848 */
1849void of_detach_node(const struct device_node *np)
1850{
1851 struct device_node *parent;
1852
1853 write_lock(&devtree_lock);
1854
1855 parent = np->parent;
1856
1857 if (allnodes == np)
1858 allnodes = np->allnext;
1859 else {
1860 struct device_node *prev;
1861 for (prev = allnodes;
1862 prev->allnext != np;
1863 prev = prev->allnext)
1864 ;
1865 prev->allnext = np->allnext;
1866 }
1867
1868 if (parent->child == np)
1869 parent->child = np->sibling;
1870 else {
1871 struct device_node *prevsib;
1872 for (prevsib = np->parent->child;
1873 prevsib->sibling != np;
1874 prevsib = prevsib->sibling)
1875 ;
1876 prevsib->sibling = np->sibling;
1877 }
1878
1879 write_unlock(&devtree_lock);
1880}
1881
1882#ifdef CONFIG_PPC_PSERIES
1883/*
1884 * Fix up the uninitialized fields in a new device node:
1885 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1886 *
1887 * A lot of boot-time code is duplicated here, because functions such
1888 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1889 * slab allocator.
1890 *
1891 * This should probably be split up into smaller chunks.
1892 */
1893
1894static int of_finish_dynamic_node(struct device_node *node,
1895 unsigned long *unused1, int unused2,
1896 int unused3, int unused4)
1897{
1898 struct device_node *parent = of_get_parent(node);
1899 int err = 0;
1900 phandle *ibm_phandle;
1901
1902 node->name = get_property(node, "name", NULL);
1903 node->type = get_property(node, "device_type", NULL);
1904
1905 if (!parent) {
1906 err = -ENODEV;
1907 goto out;
1908 }
1909
1910 /* We don't support that function on PowerMac, at least
1911 * not yet
1912 */
1913 if (systemcfg->platform == PLATFORM_POWERMAC)
1914 return -ENODEV;
1915
1916 /* fix up new node's linux_phandle field */
1917 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1918 node->linux_phandle = *ibm_phandle;
1919
1920out:
1921 of_node_put(parent);
1922 return err;
1923}
1924
1925static int prom_reconfig_notifier(struct notifier_block *nb,
1926 unsigned long action, void *node)
1927{
1928 int err;
1929
1930 switch (action) {
1931 case PSERIES_RECONFIG_ADD:
1932 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1933 if (err < 0) {
1934 printk(KERN_ERR "finish_node returned %d\n", err);
1935 err = NOTIFY_BAD;
1936 }
1937 break;
1938 default:
1939 err = NOTIFY_DONE;
1940 break;
1941 }
1942 return err;
1943}
1944
1945static struct notifier_block prom_reconfig_nb = {
1946 .notifier_call = prom_reconfig_notifier,
1947 .priority = 10, /* This one needs to run first */
1948};
1949
1950static int __init prom_reconfig_setup(void)
1951{
1952 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1953}
1954__initcall(prom_reconfig_setup);
1955#endif
1956
1957/*
1958 * Find a property with a given name for a given node
1959 * and return the value.
1960 */
1961unsigned char *get_property(struct device_node *np, const char *name,
1962 int *lenp)
1963{
1964 struct property *pp;
1965
1966 for (pp = np->properties; pp != 0; pp = pp->next)
1967 if (strcmp(pp->name, name) == 0) {
1968 if (lenp != 0)
1969 *lenp = pp->length;
1970 return pp->value;
1971 }
1972 return NULL;
1973}
1974EXPORT_SYMBOL(get_property);
1975
1976/*
1977 * Add a property to a node
1978 */
1979void prom_add_property(struct device_node* np, struct property* prop)
1980{
1981 struct property **next = &np->properties;
1982
1983 prop->next = NULL;
1984 while (*next)
1985 next = &(*next)->next;
1986 *next = prop;
1987}
1988
1989/* I quickly hacked that one, check against spec ! */
1990static inline unsigned long
1991bus_space_to_resource_flags(unsigned int bus_space)
1992{
1993 u8 space = (bus_space >> 24) & 0xf;
1994 if (space == 0)
1995 space = 0x02;
1996 if (space == 0x02)
1997 return IORESOURCE_MEM;
1998 else if (space == 0x01)
1999 return IORESOURCE_IO;
2000 else {
2001 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2002 bus_space);
2003 return 0;
2004 }
2005}
2006
60dda256 2007#ifdef CONFIG_PCI
9b6b563c
PM
2008static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2009 struct address_range *range)
2010{
2011 unsigned long mask;
2012 int i;
2013
2014 /* Check this one */
2015 mask = bus_space_to_resource_flags(range->space);
2016 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2017 if ((pdev->resource[i].flags & mask) == mask &&
2018 pdev->resource[i].start <= range->address &&
2019 pdev->resource[i].end > range->address) {
2020 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2021 /* Add better message */
2022 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2023 return NULL;
2024 }
2025 break;
2026 }
2027 }
2028 if (i == DEVICE_COUNT_RESOURCE)
2029 return NULL;
2030 return &pdev->resource[i];
2031}
2032
2033/*
2034 * Request an OF device resource. Currently handles child of PCI devices,
2035 * or other nodes attached to the root node. Ultimately, put some
2036 * link to resources in the OF node.
2037 */
2038struct resource *request_OF_resource(struct device_node* node, int index,
2039 const char* name_postfix)
2040{
2041 struct pci_dev* pcidev;
2042 u8 pci_bus, pci_devfn;
2043 unsigned long iomask;
2044 struct device_node* nd;
2045 struct resource* parent;
2046 struct resource *res = NULL;
2047 int nlen, plen;
2048
2049 if (index >= node->n_addrs)
2050 goto fail;
2051
2052 /* Sanity check on bus space */
2053 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2054 if (iomask & IORESOURCE_MEM)
2055 parent = &iomem_resource;
2056 else if (iomask & IORESOURCE_IO)
2057 parent = &ioport_resource;
2058 else
2059 goto fail;
2060
2061 /* Find a PCI parent if any */
2062 nd = node;
2063 pcidev = NULL;
2064 while (nd) {
2065 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2066 pcidev = pci_find_slot(pci_bus, pci_devfn);
2067 if (pcidev) break;
2068 nd = nd->parent;
2069 }
2070 if (pcidev)
2071 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2072 if (!parent) {
2073 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2074 node->name);
2075 goto fail;
2076 }
2077
2078 res = __request_region(parent, node->addrs[index].address,
2079 node->addrs[index].size, NULL);
2080 if (!res)
2081 goto fail;
2082 nlen = strlen(node->name);
2083 plen = name_postfix ? strlen(name_postfix) : 0;
2084 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2085 if (res->name) {
2086 strcpy((char *)res->name, node->name);
2087 if (plen)
2088 strcpy((char *)res->name+nlen, name_postfix);
2089 }
2090 return res;
2091fail:
2092 return NULL;
2093}
2094EXPORT_SYMBOL(request_OF_resource);
2095
2096int release_OF_resource(struct device_node *node, int index)
2097{
2098 struct pci_dev* pcidev;
2099 u8 pci_bus, pci_devfn;
2100 unsigned long iomask, start, end;
2101 struct device_node* nd;
2102 struct resource* parent;
2103 struct resource *res = NULL;
2104
2105 if (index >= node->n_addrs)
2106 return -EINVAL;
2107
2108 /* Sanity check on bus space */
2109 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2110 if (iomask & IORESOURCE_MEM)
2111 parent = &iomem_resource;
2112 else if (iomask & IORESOURCE_IO)
2113 parent = &ioport_resource;
2114 else
2115 return -EINVAL;
2116
2117 /* Find a PCI parent if any */
2118 nd = node;
2119 pcidev = NULL;
2120 while(nd) {
2121 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2122 pcidev = pci_find_slot(pci_bus, pci_devfn);
2123 if (pcidev) break;
2124 nd = nd->parent;
2125 }
2126 if (pcidev)
2127 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2128 if (!parent) {
2129 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2130 node->name);
2131 return -ENODEV;
2132 }
2133
2134 /* Find us in the parent and its childs */
2135 res = parent->child;
2136 start = node->addrs[index].address;
2137 end = start + node->addrs[index].size - 1;
2138 while (res) {
2139 if (res->start == start && res->end == end &&
2140 (res->flags & IORESOURCE_BUSY))
2141 break;
2142 if (res->start <= start && res->end >= end)
2143 res = res->child;
2144 else
2145 res = res->sibling;
2146 }
2147 if (!res)
2148 return -ENODEV;
2149
2150 if (res->name) {
2151 kfree(res->name);
2152 res->name = NULL;
2153 }
2154 release_resource(res);
2155 kfree(res);
2156
2157 return 0;
2158}
2159EXPORT_SYMBOL(release_OF_resource);
60dda256 2160#endif /* CONFIG_PCI */
This page took 0.108071 seconds and 5 git commands to generate.