[PATCH] powerpc: Always panic if lmb_alloc() fails
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
CommitLineData
9b6b563c
PM
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
dcee3036 32#include <linux/kexec.h>
9b6b563c
PM
33
34#include <asm/prom.h>
35#include <asm/rtas.h>
36#include <asm/lmb.h>
37#include <asm/page.h>
38#include <asm/processor.h>
39#include <asm/irq.h>
40#include <asm/io.h>
0cc4746c 41#include <asm/kdump.h>
9b6b563c
PM
42#include <asm/smp.h>
43#include <asm/system.h>
44#include <asm/mmu.h>
45#include <asm/pgtable.h>
46#include <asm/pci.h>
47#include <asm/iommu.h>
48#include <asm/btext.h>
49#include <asm/sections.h>
50#include <asm/machdep.h>
51#include <asm/pSeries_reconfig.h>
40ef8cbc 52#include <asm/pci-bridge.h>
9b6b563c
PM
53
54#ifdef DEBUG
55#define DBG(fmt...) printk(KERN_ERR fmt)
56#else
57#define DBG(fmt...)
58#endif
59
9b6b563c 60
9b6b563c
PM
61static int __initdata dt_root_addr_cells;
62static int __initdata dt_root_size_cells;
63
64#ifdef CONFIG_PPC64
65static int __initdata iommu_is_off;
66int __initdata iommu_force_on;
cf00a8d1 67unsigned long tce_alloc_start, tce_alloc_end;
9b6b563c
PM
68#endif
69
70typedef u32 cell_t;
71
72#if 0
73static struct boot_param_header *initial_boot_params __initdata;
74#else
75struct boot_param_header *initial_boot_params;
76#endif
77
78static struct device_node *allnodes = NULL;
79
80/* use when traversing tree through the allnext, child, sibling,
81 * or parent members of struct device_node.
82 */
83static DEFINE_RWLOCK(devtree_lock);
84
85/* export that to outside world */
86struct device_node *of_chosen;
87
88struct device_node *dflt_interrupt_controller;
89int num_interrupt_controllers;
90
9b6b563c
PM
91/*
92 * Wrapper for allocating memory for various data that needs to be
93 * attached to device nodes as they are processed at boot or when
94 * added to the device tree later (e.g. DLPAR). At boot there is
95 * already a region reserved so we just increment *mem_start by size;
96 * otherwise we call kmalloc.
97 */
98static void * prom_alloc(unsigned long size, unsigned long *mem_start)
99{
100 unsigned long tmp;
101
102 if (!mem_start)
103 return kmalloc(size, GFP_KERNEL);
104
105 tmp = *mem_start;
106 *mem_start += size;
107 return (void *)tmp;
108}
109
110/*
111 * Find the device_node with a given phandle.
112 */
113static struct device_node * find_phandle(phandle ph)
114{
115 struct device_node *np;
116
117 for (np = allnodes; np != 0; np = np->allnext)
118 if (np->linux_phandle == ph)
119 return np;
120 return NULL;
121}
122
123/*
124 * Find the interrupt parent of a node.
125 */
126static struct device_node * __devinit intr_parent(struct device_node *p)
127{
128 phandle *parp;
129
130 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
131 if (parp == NULL)
132 return p->parent;
133 p = find_phandle(*parp);
134 if (p != NULL)
135 return p;
136 /*
137 * On a powermac booted with BootX, we don't get to know the
138 * phandles for any nodes, so find_phandle will return NULL.
139 * Fortunately these machines only have one interrupt controller
140 * so there isn't in fact any ambiguity. -- paulus
141 */
142 if (num_interrupt_controllers == 1)
143 p = dflt_interrupt_controller;
144 return p;
145}
146
147/*
148 * Find out the size of each entry of the interrupts property
149 * for a node.
150 */
151int __devinit prom_n_intr_cells(struct device_node *np)
152{
153 struct device_node *p;
154 unsigned int *icp;
155
156 for (p = np; (p = intr_parent(p)) != NULL; ) {
157 icp = (unsigned int *)
158 get_property(p, "#interrupt-cells", NULL);
159 if (icp != NULL)
160 return *icp;
161 if (get_property(p, "interrupt-controller", NULL) != NULL
162 || get_property(p, "interrupt-map", NULL) != NULL) {
163 printk("oops, node %s doesn't have #interrupt-cells\n",
164 p->full_name);
165 return 1;
166 }
167 }
168#ifdef DEBUG_IRQ
169 printk("prom_n_intr_cells failed for %s\n", np->full_name);
170#endif
171 return 1;
172}
173
174/*
175 * Map an interrupt from a device up to the platform interrupt
176 * descriptor.
177 */
178static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
179 struct device_node *np, unsigned int *ints,
180 int nintrc)
181{
182 struct device_node *p, *ipar;
183 unsigned int *imap, *imask, *ip;
184 int i, imaplen, match;
185 int newintrc = 0, newaddrc = 0;
186 unsigned int *reg;
187 int naddrc;
188
189 reg = (unsigned int *) get_property(np, "reg", NULL);
190 naddrc = prom_n_addr_cells(np);
191 p = intr_parent(np);
192 while (p != NULL) {
193 if (get_property(p, "interrupt-controller", NULL) != NULL)
194 /* this node is an interrupt controller, stop here */
195 break;
196 imap = (unsigned int *)
197 get_property(p, "interrupt-map", &imaplen);
198 if (imap == NULL) {
199 p = intr_parent(p);
200 continue;
201 }
202 imask = (unsigned int *)
203 get_property(p, "interrupt-map-mask", NULL);
204 if (imask == NULL) {
205 printk("oops, %s has interrupt-map but no mask\n",
206 p->full_name);
207 return 0;
208 }
209 imaplen /= sizeof(unsigned int);
210 match = 0;
211 ipar = NULL;
212 while (imaplen > 0 && !match) {
213 /* check the child-interrupt field */
214 match = 1;
215 for (i = 0; i < naddrc && match; ++i)
216 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
217 for (; i < naddrc + nintrc && match; ++i)
218 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
219 imap += naddrc + nintrc;
220 imaplen -= naddrc + nintrc;
221 /* grab the interrupt parent */
222 ipar = find_phandle((phandle) *imap++);
223 --imaplen;
224 if (ipar == NULL && num_interrupt_controllers == 1)
225 /* cope with BootX not giving us phandles */
226 ipar = dflt_interrupt_controller;
227 if (ipar == NULL) {
228 printk("oops, no int parent %x in map of %s\n",
229 imap[-1], p->full_name);
230 return 0;
231 }
232 /* find the parent's # addr and intr cells */
233 ip = (unsigned int *)
234 get_property(ipar, "#interrupt-cells", NULL);
235 if (ip == NULL) {
236 printk("oops, no #interrupt-cells on %s\n",
237 ipar->full_name);
238 return 0;
239 }
240 newintrc = *ip;
241 ip = (unsigned int *)
242 get_property(ipar, "#address-cells", NULL);
243 newaddrc = (ip == NULL)? 0: *ip;
244 imap += newaddrc + newintrc;
245 imaplen -= newaddrc + newintrc;
246 }
247 if (imaplen < 0) {
248 printk("oops, error decoding int-map on %s, len=%d\n",
249 p->full_name, imaplen);
250 return 0;
251 }
252 if (!match) {
253#ifdef DEBUG_IRQ
254 printk("oops, no match in %s int-map for %s\n",
255 p->full_name, np->full_name);
256#endif
257 return 0;
258 }
259 p = ipar;
260 naddrc = newaddrc;
261 nintrc = newintrc;
262 ints = imap - nintrc;
263 reg = ints - naddrc;
264 }
265 if (p == NULL) {
266#ifdef DEBUG_IRQ
267 printk("hmmm, int tree for %s doesn't have ctrler\n",
268 np->full_name);
269#endif
270 return 0;
271 }
272 *irq = ints;
273 *ictrler = p;
274 return nintrc;
275}
276
6d0124fc
PM
277static unsigned char map_isa_senses[4] = {
278 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
279 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
280 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
281 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
282};
283
284static unsigned char map_mpic_senses[4] = {
285 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
286 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
287 /* 2 seems to be used for the 8259 cascade... */
288 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
289 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
290};
291
9b6b563c
PM
292static int __devinit finish_node_interrupts(struct device_node *np,
293 unsigned long *mem_start,
294 int measure_only)
295{
296 unsigned int *ints;
297 int intlen, intrcells, intrcount;
6d0124fc 298 int i, j, n, sense;
9b6b563c
PM
299 unsigned int *irq, virq;
300 struct device_node *ic;
1beb6a7d
BH
301 int trace = 0;
302
303 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
304#define TRACE(fmt...)
305
306 if (!strcmp(np->name, "smu-doorbell"))
307 trace = 1;
308
309 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
310 num_interrupt_controllers);
9b6b563c 311
a575b807
PM
312 if (num_interrupt_controllers == 0) {
313 /*
314 * Old machines just have a list of interrupt numbers
315 * and no interrupt-controller nodes.
316 */
317 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
318 &intlen);
319 /* XXX old interpret_pci_props looked in parent too */
320 /* XXX old interpret_macio_props looked for interrupts
321 before AAPL,interrupts */
322 if (ints == NULL)
323 ints = (unsigned int *) get_property(np, "interrupts",
324 &intlen);
325 if (ints == NULL)
326 return 0;
327
328 np->n_intrs = intlen / sizeof(unsigned int);
329 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
330 mem_start);
331 if (!np->intrs)
332 return -ENOMEM;
333 if (measure_only)
334 return 0;
335
336 for (i = 0; i < np->n_intrs; ++i) {
337 np->intrs[i].line = *ints++;
6d0124fc
PM
338 np->intrs[i].sense = IRQ_SENSE_LEVEL
339 | IRQ_POLARITY_NEGATIVE;
a575b807
PM
340 }
341 return 0;
342 }
343
9b6b563c 344 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
1beb6a7d 345 TRACE("ints=%p, intlen=%d\n", ints, intlen);
9b6b563c
PM
346 if (ints == NULL)
347 return 0;
348 intrcells = prom_n_intr_cells(np);
349 intlen /= intrcells * sizeof(unsigned int);
1beb6a7d 350 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
9b6b563c
PM
351 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
352 if (!np->intrs)
353 return -ENOMEM;
354
355 if (measure_only)
356 return 0;
357
358 intrcount = 0;
359 for (i = 0; i < intlen; ++i, ints += intrcells) {
360 n = map_interrupt(&irq, &ic, np, ints, intrcells);
1beb6a7d 361 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
9b6b563c
PM
362 if (n <= 0)
363 continue;
364
365 /* don't map IRQ numbers under a cascaded 8259 controller */
366 if (ic && device_is_compatible(ic, "chrp,iic")) {
367 np->intrs[intrcount].line = irq[0];
6d0124fc
PM
368 sense = (n > 1)? (irq[1] & 3): 3;
369 np->intrs[intrcount].sense = map_isa_senses[sense];
9b6b563c 370 } else {
9b6b563c 371 virq = virt_irq_create_mapping(irq[0]);
1beb6a7d 372 TRACE("virq=%d\n", virq);
6d0124fc 373#ifdef CONFIG_PPC64
9b6b563c
PM
374 if (virq == NO_IRQ) {
375 printk(KERN_CRIT "Could not allocate interrupt"
376 " number for %s\n", np->full_name);
377 continue;
378 }
9b6b563c 379#endif
6d0124fc
PM
380 np->intrs[intrcount].line = irq_offset_up(virq);
381 sense = (n > 1)? (irq[1] & 3): 1;
1beb6a7d
BH
382
383 /* Apple uses bits in there in a different way, let's
384 * only keep the real sense bit on macs
385 */
386 if (_machine == PLATFORM_POWERMAC)
387 sense &= 0x1;
6d0124fc 388 np->intrs[intrcount].sense = map_mpic_senses[sense];
9b6b563c
PM
389 }
390
391#ifdef CONFIG_PPC64
392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
799d6046 393 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
9b6b563c
PM
394 char *name = get_property(ic->parent, "name", NULL);
395 if (name && !strcmp(name, "u3"))
396 np->intrs[intrcount].line += 128;
1beb6a7d
BH
397 else if (!(name && (!strcmp(name, "mac-io") ||
398 !strcmp(name, "u4"))))
9b6b563c
PM
399 /* ignore other cascaded controllers, such as
400 the k2-sata-root */
401 break;
402 }
1beb6a7d 403#endif /* CONFIG_PPC64 */
9b6b563c
PM
404 if (n > 2) {
405 printk("hmmm, got %d intr cells for %s:", n,
406 np->full_name);
407 for (j = 0; j < n; ++j)
408 printk(" %d", irq[j]);
409 printk("\n");
410 }
411 ++intrcount;
412 }
413 np->n_intrs = intrcount;
414
415 return 0;
416}
417
9b6b563c
PM
418static int __devinit finish_node(struct device_node *np,
419 unsigned long *mem_start,
9b6b563c
PM
420 int measure_only)
421{
422 struct device_node *child;
cc5d0189 423 int rc = 0;
9b6b563c
PM
424
425 rc = finish_node_interrupts(np, mem_start, measure_only);
426 if (rc)
427 goto out;
428
9b6b563c 429 for (child = np->child; child != NULL; child = child->sibling) {
cc5d0189 430 rc = finish_node(child, mem_start, measure_only);
9b6b563c
PM
431 if (rc)
432 goto out;
433 }
434out:
435 return rc;
436}
437
438static void __init scan_interrupt_controllers(void)
439{
440 struct device_node *np;
441 int n = 0;
442 char *name, *ic;
443 int iclen;
444
445 for (np = allnodes; np != NULL; np = np->allnext) {
446 ic = get_property(np, "interrupt-controller", &iclen);
447 name = get_property(np, "name", NULL);
448 /* checking iclen makes sure we don't get a false
449 match on /chosen.interrupt_controller */
450 if ((name != NULL
451 && strcmp(name, "interrupt-controller") == 0)
452 || (ic != NULL && iclen == 0
453 && strcmp(name, "AppleKiwi"))) {
454 if (n == 0)
455 dflt_interrupt_controller = np;
456 ++n;
457 }
458 }
459 num_interrupt_controllers = n;
460}
461
462/**
463 * finish_device_tree is called once things are running normally
464 * (i.e. with text and data mapped to the address they were linked at).
465 * It traverses the device tree and fills in some of the additional,
466 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
467 * mapping is also initialized at this point.
468 */
469void __init finish_device_tree(void)
470{
471 unsigned long start, end, size = 0;
472
473 DBG(" -> finish_device_tree\n");
474
475#ifdef CONFIG_PPC64
476 /* Initialize virtual IRQ map */
477 virt_irq_init();
478#endif
479 scan_interrupt_controllers();
480
481 /*
482 * Finish device-tree (pre-parsing some properties etc...)
483 * We do this in 2 passes. One with "measure_only" set, which
484 * will only measure the amount of memory needed, then we can
485 * allocate that memory, and call finish_node again. However,
486 * we must be careful as most routines will fail nowadays when
487 * prom_alloc() returns 0, so we must make sure our first pass
488 * doesn't start at 0. We pre-initialize size to 16 for that
489 * reason and then remove those additional 16 bytes
490 */
491 size = 16;
cc5d0189 492 finish_node(allnodes, &size, 1);
9b6b563c 493 size -= 16;
fa938953
ME
494
495 if (0 == size)
496 end = start = 0;
497 else
498 end = start = (unsigned long)__va(lmb_alloc(size, 128));
499
cc5d0189 500 finish_node(allnodes, &end, 0);
9b6b563c
PM
501 BUG_ON(end != start + size);
502
503 DBG(" <- finish_device_tree\n");
504}
505
506static inline char *find_flat_dt_string(u32 offset)
507{
508 return ((char *)initial_boot_params) +
509 initial_boot_params->off_dt_strings + offset;
510}
511
512/**
513 * This function is used to scan the flattened device-tree, it is
514 * used to extract the memory informations at boot before we can
515 * unflatten the tree
516 */
3c726f8d
BH
517int __init of_scan_flat_dt(int (*it)(unsigned long node,
518 const char *uname, int depth,
519 void *data),
520 void *data)
9b6b563c
PM
521{
522 unsigned long p = ((unsigned long)initial_boot_params) +
523 initial_boot_params->off_dt_struct;
524 int rc = 0;
525 int depth = -1;
526
527 do {
528 u32 tag = *((u32 *)p);
529 char *pathp;
530
531 p += 4;
532 if (tag == OF_DT_END_NODE) {
533 depth --;
534 continue;
535 }
536 if (tag == OF_DT_NOP)
537 continue;
538 if (tag == OF_DT_END)
539 break;
540 if (tag == OF_DT_PROP) {
541 u32 sz = *((u32 *)p);
542 p += 8;
543 if (initial_boot_params->version < 0x10)
544 p = _ALIGN(p, sz >= 8 ? 8 : 4);
545 p += sz;
546 p = _ALIGN(p, 4);
547 continue;
548 }
549 if (tag != OF_DT_BEGIN_NODE) {
550 printk(KERN_WARNING "Invalid tag %x scanning flattened"
551 " device tree !\n", tag);
552 return -EINVAL;
553 }
554 depth++;
555 pathp = (char *)p;
556 p = _ALIGN(p + strlen(pathp) + 1, 4);
557 if ((*pathp) == '/') {
558 char *lp, *np;
559 for (lp = NULL, np = pathp; *np; np++)
560 if ((*np) == '/')
561 lp = np+1;
562 if (lp != NULL)
563 pathp = lp;
564 }
565 rc = it(p, pathp, depth, data);
566 if (rc != 0)
567 break;
568 } while(1);
569
570 return rc;
571}
572
573/**
574 * This function can be used within scan_flattened_dt callback to get
575 * access to properties
576 */
3c726f8d
BH
577void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
578 unsigned long *size)
9b6b563c
PM
579{
580 unsigned long p = node;
581
582 do {
583 u32 tag = *((u32 *)p);
584 u32 sz, noff;
585 const char *nstr;
586
587 p += 4;
588 if (tag == OF_DT_NOP)
589 continue;
590 if (tag != OF_DT_PROP)
591 return NULL;
592
593 sz = *((u32 *)p);
594 noff = *((u32 *)(p + 4));
595 p += 8;
596 if (initial_boot_params->version < 0x10)
597 p = _ALIGN(p, sz >= 8 ? 8 : 4);
598
599 nstr = find_flat_dt_string(noff);
600 if (nstr == NULL) {
601 printk(KERN_WARNING "Can't find property index"
602 " name !\n");
603 return NULL;
604 }
605 if (strcmp(name, nstr) == 0) {
606 if (size)
607 *size = sz;
608 return (void *)p;
609 }
610 p += sz;
611 p = _ALIGN(p, 4);
612 } while(1);
613}
614
615static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
616 unsigned long align)
617{
618 void *res;
619
620 *mem = _ALIGN(*mem, align);
621 res = (void *)*mem;
622 *mem += size;
623
624 return res;
625}
626
627static unsigned long __init unflatten_dt_node(unsigned long mem,
628 unsigned long *p,
629 struct device_node *dad,
630 struct device_node ***allnextpp,
631 unsigned long fpsize)
632{
633 struct device_node *np;
634 struct property *pp, **prev_pp = NULL;
635 char *pathp;
636 u32 tag;
637 unsigned int l, allocl;
638 int has_name = 0;
639 int new_format = 0;
640
641 tag = *((u32 *)(*p));
642 if (tag != OF_DT_BEGIN_NODE) {
643 printk("Weird tag at start of node: %x\n", tag);
644 return mem;
645 }
646 *p += 4;
647 pathp = (char *)*p;
648 l = allocl = strlen(pathp) + 1;
649 *p = _ALIGN(*p + l, 4);
650
651 /* version 0x10 has a more compact unit name here instead of the full
652 * path. we accumulate the full path size using "fpsize", we'll rebuild
653 * it later. We detect this because the first character of the name is
654 * not '/'.
655 */
656 if ((*pathp) != '/') {
657 new_format = 1;
658 if (fpsize == 0) {
659 /* root node: special case. fpsize accounts for path
660 * plus terminating zero. root node only has '/', so
661 * fpsize should be 2, but we want to avoid the first
662 * level nodes to have two '/' so we use fpsize 1 here
663 */
664 fpsize = 1;
665 allocl = 2;
666 } else {
667 /* account for '/' and path size minus terminal 0
668 * already in 'l'
669 */
670 fpsize += l;
671 allocl = fpsize;
672 }
673 }
674
675
676 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
677 __alignof__(struct device_node));
678 if (allnextpp) {
679 memset(np, 0, sizeof(*np));
680 np->full_name = ((char*)np) + sizeof(struct device_node);
681 if (new_format) {
682 char *p = np->full_name;
683 /* rebuild full path for new format */
684 if (dad && dad->parent) {
685 strcpy(p, dad->full_name);
686#ifdef DEBUG
687 if ((strlen(p) + l + 1) != allocl) {
688 DBG("%s: p: %d, l: %d, a: %d\n",
689 pathp, strlen(p), l, allocl);
690 }
691#endif
692 p += strlen(p);
693 }
694 *(p++) = '/';
695 memcpy(p, pathp, l);
696 } else
697 memcpy(np->full_name, pathp, l);
698 prev_pp = &np->properties;
699 **allnextpp = np;
700 *allnextpp = &np->allnext;
701 if (dad != NULL) {
702 np->parent = dad;
703 /* we temporarily use the next field as `last_child'*/
704 if (dad->next == 0)
705 dad->child = np;
706 else
707 dad->next->sibling = np;
708 dad->next = np;
709 }
710 kref_init(&np->kref);
711 }
712 while(1) {
713 u32 sz, noff;
714 char *pname;
715
716 tag = *((u32 *)(*p));
717 if (tag == OF_DT_NOP) {
718 *p += 4;
719 continue;
720 }
721 if (tag != OF_DT_PROP)
722 break;
723 *p += 4;
724 sz = *((u32 *)(*p));
725 noff = *((u32 *)((*p) + 4));
726 *p += 8;
727 if (initial_boot_params->version < 0x10)
728 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
729
730 pname = find_flat_dt_string(noff);
731 if (pname == NULL) {
732 printk("Can't find property name in list !\n");
733 break;
734 }
735 if (strcmp(pname, "name") == 0)
736 has_name = 1;
737 l = strlen(pname) + 1;
738 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
739 __alignof__(struct property));
740 if (allnextpp) {
741 if (strcmp(pname, "linux,phandle") == 0) {
742 np->node = *((u32 *)*p);
743 if (np->linux_phandle == 0)
744 np->linux_phandle = np->node;
745 }
746 if (strcmp(pname, "ibm,phandle") == 0)
747 np->linux_phandle = *((u32 *)*p);
748 pp->name = pname;
749 pp->length = sz;
750 pp->value = (void *)*p;
751 *prev_pp = pp;
752 prev_pp = &pp->next;
753 }
754 *p = _ALIGN((*p) + sz, 4);
755 }
756 /* with version 0x10 we may not have the name property, recreate
757 * it here from the unit name if absent
758 */
759 if (!has_name) {
760 char *p = pathp, *ps = pathp, *pa = NULL;
761 int sz;
762
763 while (*p) {
764 if ((*p) == '@')
765 pa = p;
766 if ((*p) == '/')
767 ps = p + 1;
768 p++;
769 }
770 if (pa < ps)
771 pa = p;
772 sz = (pa - ps) + 1;
773 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
774 __alignof__(struct property));
775 if (allnextpp) {
776 pp->name = "name";
777 pp->length = sz;
778 pp->value = (unsigned char *)(pp + 1);
779 *prev_pp = pp;
780 prev_pp = &pp->next;
781 memcpy(pp->value, ps, sz - 1);
782 ((char *)pp->value)[sz - 1] = 0;
783 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
784 }
785 }
786 if (allnextpp) {
787 *prev_pp = NULL;
788 np->name = get_property(np, "name", NULL);
789 np->type = get_property(np, "device_type", NULL);
790
791 if (!np->name)
792 np->name = "<NULL>";
793 if (!np->type)
794 np->type = "<NULL>";
795 }
796 while (tag == OF_DT_BEGIN_NODE) {
797 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
798 tag = *((u32 *)(*p));
799 }
800 if (tag != OF_DT_END_NODE) {
801 printk("Weird tag at end of node: %x\n", tag);
802 return mem;
803 }
804 *p += 4;
805 return mem;
806}
807
808
809/**
810 * unflattens the device-tree passed by the firmware, creating the
811 * tree of struct device_node. It also fills the "name" and "type"
812 * pointers of the nodes so the normal device-tree walking functions
813 * can be used (this used to be done by finish_device_tree)
814 */
815void __init unflatten_device_tree(void)
816{
817 unsigned long start, mem, size;
818 struct device_node **allnextp = &allnodes;
819 char *p = NULL;
820 int l = 0;
821
822 DBG(" -> unflatten_device_tree()\n");
823
824 /* First pass, scan for size */
825 start = ((unsigned long)initial_boot_params) +
826 initial_boot_params->off_dt_struct;
827 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
828 size = (size | 3) + 1;
829
830 DBG(" size is %lx, allocating...\n", size);
831
832 /* Allocate memory for the expanded device tree */
833 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
9b6b563c
PM
834 mem = (unsigned long) __va(mem);
835
836 ((u32 *)mem)[size / 4] = 0xdeadbeef;
837
838 DBG(" unflattening %lx...\n", mem);
839
840 /* Second pass, do actual unflattening */
841 start = ((unsigned long)initial_boot_params) +
842 initial_boot_params->off_dt_struct;
843 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
844 if (*((u32 *)start) != OF_DT_END)
845 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
846 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
847 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
848 ((u32 *)mem)[size / 4] );
849 *allnextp = NULL;
850
851 /* Get pointer to OF "/chosen" node for use everywhere */
852 of_chosen = of_find_node_by_path("/chosen");
a575b807
PM
853 if (of_chosen == NULL)
854 of_chosen = of_find_node_by_path("/chosen@0");
9b6b563c
PM
855
856 /* Retreive command line */
857 if (of_chosen != NULL) {
858 p = (char *)get_property(of_chosen, "bootargs", &l);
859 if (p != NULL && l > 0)
860 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
861 }
862#ifdef CONFIG_CMDLINE
863 if (l == 0 || (l == 1 && (*p) == 0))
864 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
865#endif /* CONFIG_CMDLINE */
866
867 DBG("Command line is: %s\n", cmd_line);
868
869 DBG(" <- unflatten_device_tree()\n");
870}
871
872
873static int __init early_init_dt_scan_cpus(unsigned long node,
874 const char *uname, int depth, void *data)
875{
9b6b563c 876 u32 *prop;
676e2497
SR
877 unsigned long size;
878 char *type = of_get_flat_dt_prop(node, "device_type", &size);
9b6b563c
PM
879
880 /* We are scanning "cpu" nodes only */
881 if (type == NULL || strcmp(type, "cpu") != 0)
882 return 0;
883
80579e1f
PM
884 boot_cpuid = 0;
885 boot_cpuid_phys = 0;
9b6b563c
PM
886 if (initial_boot_params && initial_boot_params->version >= 2) {
887 /* version 2 of the kexec param format adds the phys cpuid
888 * of booted proc.
889 */
890 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
9b6b563c 891 } else {
80579e1f 892 /* Check if it's the boot-cpu, set it's hw index now */
3c726f8d
BH
893 if (of_get_flat_dt_prop(node,
894 "linux,boot-cpu", NULL) != NULL) {
895 prop = of_get_flat_dt_prop(node, "reg", NULL);
80579e1f
PM
896 if (prop != NULL)
897 boot_cpuid_phys = *prop;
9b6b563c
PM
898 }
899 }
80579e1f 900 set_hard_smp_processor_id(0, boot_cpuid_phys);
9b6b563c
PM
901
902#ifdef CONFIG_ALTIVEC
903 /* Check if we have a VMX and eventually update CPU features */
676e2497 904 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
9b6b563c
PM
905 if (prop && (*prop) > 0) {
906 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
907 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
908 }
909
910 /* Same goes for Apple's "altivec" property */
3c726f8d 911 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
9b6b563c
PM
912 if (prop) {
913 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
914 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
915 }
916#endif /* CONFIG_ALTIVEC */
917
918#ifdef CONFIG_PPC_PSERIES
919 /*
920 * Check for an SMT capable CPU and set the CPU feature. We do
921 * this by looking at the size of the ibm,ppc-interrupt-server#s
922 * property
923 */
3c726f8d 924 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
9b6b563c
PM
925 &size);
926 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
927 if (prop && ((size / sizeof(u32)) > 1))
928 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
929#endif
930
931 return 0;
932}
933
934static int __init early_init_dt_scan_chosen(unsigned long node,
935 const char *uname, int depth, void *data)
936{
937 u32 *prop;
938 unsigned long *lprop;
939
940 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
941
a575b807
PM
942 if (depth != 1 ||
943 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
9b6b563c
PM
944 return 0;
945
946 /* get platform type */
3c726f8d 947 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
9b6b563c
PM
948 if (prop == NULL)
949 return 0;
60dda256 950#ifdef CONFIG_PPC_MULTIPLATFORM
9b6b563c
PM
951 _machine = *prop;
952#endif
953
954#ifdef CONFIG_PPC64
955 /* check if iommu is forced on or off */
3c726f8d 956 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
9b6b563c 957 iommu_is_off = 1;
3c726f8d 958 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
9b6b563c
PM
959 iommu_force_on = 1;
960#endif
961
3c726f8d 962 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
9b6b563c
PM
963 if (lprop)
964 memory_limit = *lprop;
965
966#ifdef CONFIG_PPC64
3c726f8d 967 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
9b6b563c
PM
968 if (lprop)
969 tce_alloc_start = *lprop;
3c726f8d 970 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
9b6b563c
PM
971 if (lprop)
972 tce_alloc_end = *lprop;
973#endif
974
975#ifdef CONFIG_PPC_RTAS
943ffb58 976 /* To help early debugging via the front panel, we retrieve a minimal
9b6b563c
PM
977 * set of RTAS infos now if available
978 */
979 {
980 u64 *basep, *entryp;
981
3c726f8d
BH
982 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
983 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
984 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
9b6b563c
PM
985 if (basep && entryp && prop) {
986 rtas.base = *basep;
987 rtas.entry = *entryp;
988 rtas.size = *prop;
989 }
990 }
991#endif /* CONFIG_PPC_RTAS */
992
dcee3036
ME
993#ifdef CONFIG_KEXEC
994 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
995 if (lprop)
996 crashk_res.start = *lprop;
997
998 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
999 if (lprop)
1000 crashk_res.end = crashk_res.start + *lprop - 1;
1001#endif
1002
9b6b563c
PM
1003 /* break now */
1004 return 1;
1005}
1006
1007static int __init early_init_dt_scan_root(unsigned long node,
1008 const char *uname, int depth, void *data)
1009{
1010 u32 *prop;
1011
1012 if (depth != 0)
1013 return 0;
1014
3c726f8d 1015 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
9b6b563c
PM
1016 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1017 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1018
3c726f8d 1019 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
9b6b563c
PM
1020 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1021 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1022
1023 /* break now */
1024 return 1;
1025}
1026
1027static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1028{
1029 cell_t *p = *cellp;
1030 unsigned long r;
1031
1032 /* Ignore more than 2 cells */
1033 while (s > sizeof(unsigned long) / 4) {
1034 p++;
1035 s--;
1036 }
1037 r = *p++;
1038#ifdef CONFIG_PPC64
1039 if (s > 1) {
1040 r <<= 32;
1041 r |= *(p++);
1042 s--;
1043 }
1044#endif
1045
1046 *cellp = p;
1047 return r;
1048}
1049
1050
1051static int __init early_init_dt_scan_memory(unsigned long node,
1052 const char *uname, int depth, void *data)
1053{
3c726f8d 1054 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
9b6b563c
PM
1055 cell_t *reg, *endp;
1056 unsigned long l;
1057
1058 /* We are scanning "memory" nodes only */
a23414be
PM
1059 if (type == NULL) {
1060 /*
1061 * The longtrail doesn't have a device_type on the
1062 * /memory node, so look for the node called /memory@0.
1063 */
1064 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1065 return 0;
1066 } else if (strcmp(type, "memory") != 0)
9b6b563c
PM
1067 return 0;
1068
ba759485
ME
1069 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1070 if (reg == NULL)
1071 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
9b6b563c
PM
1072 if (reg == NULL)
1073 return 0;
1074
1075 endp = reg + (l / sizeof(cell_t));
1076
358c86fd 1077 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
9b6b563c
PM
1078 uname, l, reg[0], reg[1], reg[2], reg[3]);
1079
1080 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1081 unsigned long base, size;
1082
1083 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1084 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1085
1086 if (size == 0)
1087 continue;
1088 DBG(" - %lx , %lx\n", base, size);
1089#ifdef CONFIG_PPC64
1090 if (iommu_is_off) {
1091 if (base >= 0x80000000ul)
1092 continue;
1093 if ((base + size) > 0x80000000ul)
1094 size = 0x80000000ul - base;
1095 }
1096#endif
1097 lmb_add(base, size);
1098 }
1099 return 0;
1100}
1101
1102static void __init early_reserve_mem(void)
1103{
cbbcf340
KG
1104 u64 base, size;
1105 u64 *reserve_map;
9b6b563c 1106
cbbcf340 1107 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
9b6b563c 1108 initial_boot_params->off_mem_rsvmap);
cbbcf340
KG
1109#ifdef CONFIG_PPC32
1110 /*
1111 * Handle the case where we might be booting from an old kexec
1112 * image that setup the mem_rsvmap as pairs of 32-bit values
1113 */
1114 if (*reserve_map > 0xffffffffull) {
1115 u32 base_32, size_32;
1116 u32 *reserve_map_32 = (u32 *)reserve_map;
1117
1118 while (1) {
1119 base_32 = *(reserve_map_32++);
1120 size_32 = *(reserve_map_32++);
1121 if (size_32 == 0)
1122 break;
1123 DBG("reserving: %lx -> %lx\n", base_32, size_32);
1124 lmb_reserve(base_32, size_32);
1125 }
1126 return;
1127 }
1128#endif
9b6b563c
PM
1129 while (1) {
1130 base = *(reserve_map++);
1131 size = *(reserve_map++);
1132 if (size == 0)
1133 break;
cbbcf340 1134 DBG("reserving: %llx -> %llx\n", base, size);
9b6b563c
PM
1135 lmb_reserve(base, size);
1136 }
1137
1138#if 0
1139 DBG("memory reserved, lmbs :\n");
1140 lmb_dump_all();
1141#endif
1142}
1143
1144void __init early_init_devtree(void *params)
1145{
1146 DBG(" -> early_init_devtree()\n");
1147
1148 /* Setup flat device-tree pointer */
1149 initial_boot_params = params;
1150
1151 /* Retrieve various informations from the /chosen node of the
1152 * device-tree, including the platform type, initrd location and
1153 * size, TCE reserve, and more ...
1154 */
3c726f8d 1155 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
9b6b563c
PM
1156
1157 /* Scan memory nodes and rebuild LMBs */
1158 lmb_init();
3c726f8d
BH
1159 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1160 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
9b6b563c
PM
1161 lmb_enforce_memory_limit(memory_limit);
1162 lmb_analyze();
9b6b563c
PM
1163
1164 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1165
1166 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
0cc4746c
ME
1167 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1168#ifdef CONFIG_CRASH_DUMP
1169 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
1170#endif
9b6b563c
PM
1171 early_reserve_mem();
1172
1173 DBG("Scanning CPUs ...\n");
1174
3c726f8d
BH
1175 /* Retreive CPU related informations from the flat tree
1176 * (altivec support, boot CPU ID, ...)
9b6b563c 1177 */
3c726f8d 1178 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
9b6b563c 1179
9b6b563c
PM
1180 DBG(" <- early_init_devtree()\n");
1181}
1182
1183#undef printk
1184
1185int
1186prom_n_addr_cells(struct device_node* np)
1187{
1188 int* ip;
1189 do {
1190 if (np->parent)
1191 np = np->parent;
1192 ip = (int *) get_property(np, "#address-cells", NULL);
1193 if (ip != NULL)
1194 return *ip;
1195 } while (np->parent);
1196 /* No #address-cells property for the root node, default to 1 */
1197 return 1;
1198}
1dfc6772 1199EXPORT_SYMBOL(prom_n_addr_cells);
9b6b563c
PM
1200
1201int
1202prom_n_size_cells(struct device_node* np)
1203{
1204 int* ip;
1205 do {
1206 if (np->parent)
1207 np = np->parent;
1208 ip = (int *) get_property(np, "#size-cells", NULL);
1209 if (ip != NULL)
1210 return *ip;
1211 } while (np->parent);
1212 /* No #size-cells property for the root node, default to 1 */
1213 return 1;
1214}
1dfc6772 1215EXPORT_SYMBOL(prom_n_size_cells);
9b6b563c
PM
1216
1217/**
1218 * Work out the sense (active-low level / active-high edge)
1219 * of each interrupt from the device tree.
1220 */
1221void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1222{
1223 struct device_node *np;
1224 int i, j;
1225
1226 /* default to level-triggered */
6d0124fc 1227 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
9b6b563c
PM
1228
1229 for (np = allnodes; np != 0; np = np->allnext) {
1230 for (j = 0; j < np->n_intrs; j++) {
1231 i = np->intrs[j].line;
1232 if (i >= off && i < max)
6d0124fc 1233 senses[i-off] = np->intrs[j].sense;
9b6b563c
PM
1234 }
1235 }
1236}
1237
1238/**
1239 * Construct and return a list of the device_nodes with a given name.
1240 */
1241struct device_node *find_devices(const char *name)
1242{
1243 struct device_node *head, **prevp, *np;
1244
1245 prevp = &head;
1246 for (np = allnodes; np != 0; np = np->allnext) {
1247 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1248 *prevp = np;
1249 prevp = &np->next;
1250 }
1251 }
1252 *prevp = NULL;
1253 return head;
1254}
1255EXPORT_SYMBOL(find_devices);
1256
1257/**
1258 * Construct and return a list of the device_nodes with a given type.
1259 */
1260struct device_node *find_type_devices(const char *type)
1261{
1262 struct device_node *head, **prevp, *np;
1263
1264 prevp = &head;
1265 for (np = allnodes; np != 0; np = np->allnext) {
1266 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1267 *prevp = np;
1268 prevp = &np->next;
1269 }
1270 }
1271 *prevp = NULL;
1272 return head;
1273}
1274EXPORT_SYMBOL(find_type_devices);
1275
1276/**
1277 * Returns all nodes linked together
1278 */
1279struct device_node *find_all_nodes(void)
1280{
1281 struct device_node *head, **prevp, *np;
1282
1283 prevp = &head;
1284 for (np = allnodes; np != 0; np = np->allnext) {
1285 *prevp = np;
1286 prevp = &np->next;
1287 }
1288 *prevp = NULL;
1289 return head;
1290}
1291EXPORT_SYMBOL(find_all_nodes);
1292
1293/** Checks if the given "compat" string matches one of the strings in
1294 * the device's "compatible" property
1295 */
1296int device_is_compatible(struct device_node *device, const char *compat)
1297{
1298 const char* cp;
1299 int cplen, l;
1300
1301 cp = (char *) get_property(device, "compatible", &cplen);
1302 if (cp == NULL)
1303 return 0;
1304 while (cplen > 0) {
1305 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1306 return 1;
1307 l = strlen(cp) + 1;
1308 cp += l;
1309 cplen -= l;
1310 }
1311
1312 return 0;
1313}
1314EXPORT_SYMBOL(device_is_compatible);
1315
1316
1317/**
1318 * Indicates whether the root node has a given value in its
1319 * compatible property.
1320 */
1321int machine_is_compatible(const char *compat)
1322{
1323 struct device_node *root;
1324 int rc = 0;
1325
1326 root = of_find_node_by_path("/");
1327 if (root) {
1328 rc = device_is_compatible(root, compat);
1329 of_node_put(root);
1330 }
1331 return rc;
1332}
1333EXPORT_SYMBOL(machine_is_compatible);
1334
1335/**
1336 * Construct and return a list of the device_nodes with a given type
1337 * and compatible property.
1338 */
1339struct device_node *find_compatible_devices(const char *type,
1340 const char *compat)
1341{
1342 struct device_node *head, **prevp, *np;
1343
1344 prevp = &head;
1345 for (np = allnodes; np != 0; np = np->allnext) {
1346 if (type != NULL
1347 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1348 continue;
1349 if (device_is_compatible(np, compat)) {
1350 *prevp = np;
1351 prevp = &np->next;
1352 }
1353 }
1354 *prevp = NULL;
1355 return head;
1356}
1357EXPORT_SYMBOL(find_compatible_devices);
1358
1359/**
1360 * Find the device_node with a given full_name.
1361 */
1362struct device_node *find_path_device(const char *path)
1363{
1364 struct device_node *np;
1365
1366 for (np = allnodes; np != 0; np = np->allnext)
1367 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1368 return np;
1369 return NULL;
1370}
1371EXPORT_SYMBOL(find_path_device);
1372
1373/*******
1374 *
1375 * New implementation of the OF "find" APIs, return a refcounted
1376 * object, call of_node_put() when done. The device tree and list
1377 * are protected by a rw_lock.
1378 *
1379 * Note that property management will need some locking as well,
1380 * this isn't dealt with yet.
1381 *
1382 *******/
1383
1384/**
1385 * of_find_node_by_name - Find a node by its "name" property
1386 * @from: The node to start searching from or NULL, the node
1387 * you pass will not be searched, only the next one
1388 * will; typically, you pass what the previous call
1389 * returned. of_node_put() will be called on it
1390 * @name: The name string to match against
1391 *
1392 * Returns a node pointer with refcount incremented, use
1393 * of_node_put() on it when done.
1394 */
1395struct device_node *of_find_node_by_name(struct device_node *from,
1396 const char *name)
1397{
1398 struct device_node *np;
1399
1400 read_lock(&devtree_lock);
1401 np = from ? from->allnext : allnodes;
090db7c8
OH
1402 for (; np != NULL; np = np->allnext)
1403 if (np->name != NULL && strcasecmp(np->name, name) == 0
9b6b563c
PM
1404 && of_node_get(np))
1405 break;
1406 if (from)
1407 of_node_put(from);
1408 read_unlock(&devtree_lock);
1409 return np;
1410}
1411EXPORT_SYMBOL(of_find_node_by_name);
1412
1413/**
1414 * of_find_node_by_type - Find a node by its "device_type" property
1415 * @from: The node to start searching from or NULL, the node
1416 * you pass will not be searched, only the next one
1417 * will; typically, you pass what the previous call
1418 * returned. of_node_put() will be called on it
1419 * @name: The type string to match against
1420 *
1421 * Returns a node pointer with refcount incremented, use
1422 * of_node_put() on it when done.
1423 */
1424struct device_node *of_find_node_by_type(struct device_node *from,
1425 const char *type)
1426{
1427 struct device_node *np;
1428
1429 read_lock(&devtree_lock);
1430 np = from ? from->allnext : allnodes;
1431 for (; np != 0; np = np->allnext)
1432 if (np->type != 0 && strcasecmp(np->type, type) == 0
1433 && of_node_get(np))
1434 break;
1435 if (from)
1436 of_node_put(from);
1437 read_unlock(&devtree_lock);
1438 return np;
1439}
1440EXPORT_SYMBOL(of_find_node_by_type);
1441
1442/**
1443 * of_find_compatible_node - Find a node based on type and one of the
1444 * tokens in its "compatible" property
1445 * @from: The node to start searching from or NULL, the node
1446 * you pass will not be searched, only the next one
1447 * will; typically, you pass what the previous call
1448 * returned. of_node_put() will be called on it
1449 * @type: The type string to match "device_type" or NULL to ignore
1450 * @compatible: The string to match to one of the tokens in the device
1451 * "compatible" list.
1452 *
1453 * Returns a node pointer with refcount incremented, use
1454 * of_node_put() on it when done.
1455 */
1456struct device_node *of_find_compatible_node(struct device_node *from,
1457 const char *type, const char *compatible)
1458{
1459 struct device_node *np;
1460
1461 read_lock(&devtree_lock);
1462 np = from ? from->allnext : allnodes;
1463 for (; np != 0; np = np->allnext) {
1464 if (type != NULL
1465 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1466 continue;
1467 if (device_is_compatible(np, compatible) && of_node_get(np))
1468 break;
1469 }
1470 if (from)
1471 of_node_put(from);
1472 read_unlock(&devtree_lock);
1473 return np;
1474}
1475EXPORT_SYMBOL(of_find_compatible_node);
1476
1477/**
1478 * of_find_node_by_path - Find a node matching a full OF path
1479 * @path: The full path to match
1480 *
1481 * Returns a node pointer with refcount incremented, use
1482 * of_node_put() on it when done.
1483 */
1484struct device_node *of_find_node_by_path(const char *path)
1485{
1486 struct device_node *np = allnodes;
1487
1488 read_lock(&devtree_lock);
1489 for (; np != 0; np = np->allnext) {
1490 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1491 && of_node_get(np))
1492 break;
1493 }
1494 read_unlock(&devtree_lock);
1495 return np;
1496}
1497EXPORT_SYMBOL(of_find_node_by_path);
1498
1499/**
1500 * of_find_node_by_phandle - Find a node given a phandle
1501 * @handle: phandle of the node to find
1502 *
1503 * Returns a node pointer with refcount incremented, use
1504 * of_node_put() on it when done.
1505 */
1506struct device_node *of_find_node_by_phandle(phandle handle)
1507{
1508 struct device_node *np;
1509
1510 read_lock(&devtree_lock);
1511 for (np = allnodes; np != 0; np = np->allnext)
1512 if (np->linux_phandle == handle)
1513 break;
1514 if (np)
1515 of_node_get(np);
1516 read_unlock(&devtree_lock);
1517 return np;
1518}
1519EXPORT_SYMBOL(of_find_node_by_phandle);
1520
1521/**
1522 * of_find_all_nodes - Get next node in global list
1523 * @prev: Previous node or NULL to start iteration
1524 * of_node_put() will be called on it
1525 *
1526 * Returns a node pointer with refcount incremented, use
1527 * of_node_put() on it when done.
1528 */
1529struct device_node *of_find_all_nodes(struct device_node *prev)
1530{
1531 struct device_node *np;
1532
1533 read_lock(&devtree_lock);
1534 np = prev ? prev->allnext : allnodes;
1535 for (; np != 0; np = np->allnext)
1536 if (of_node_get(np))
1537 break;
1538 if (prev)
1539 of_node_put(prev);
1540 read_unlock(&devtree_lock);
1541 return np;
1542}
1543EXPORT_SYMBOL(of_find_all_nodes);
1544
1545/**
1546 * of_get_parent - Get a node's parent if any
1547 * @node: Node to get parent
1548 *
1549 * Returns a node pointer with refcount incremented, use
1550 * of_node_put() on it when done.
1551 */
1552struct device_node *of_get_parent(const struct device_node *node)
1553{
1554 struct device_node *np;
1555
1556 if (!node)
1557 return NULL;
1558
1559 read_lock(&devtree_lock);
1560 np = of_node_get(node->parent);
1561 read_unlock(&devtree_lock);
1562 return np;
1563}
1564EXPORT_SYMBOL(of_get_parent);
1565
1566/**
1567 * of_get_next_child - Iterate a node childs
1568 * @node: parent node
1569 * @prev: previous child of the parent node, or NULL to get first
1570 *
1571 * Returns a node pointer with refcount incremented, use
1572 * of_node_put() on it when done.
1573 */
1574struct device_node *of_get_next_child(const struct device_node *node,
1575 struct device_node *prev)
1576{
1577 struct device_node *next;
1578
1579 read_lock(&devtree_lock);
1580 next = prev ? prev->sibling : node->child;
1581 for (; next != 0; next = next->sibling)
1582 if (of_node_get(next))
1583 break;
1584 if (prev)
1585 of_node_put(prev);
1586 read_unlock(&devtree_lock);
1587 return next;
1588}
1589EXPORT_SYMBOL(of_get_next_child);
1590
1591/**
1592 * of_node_get - Increment refcount of a node
1593 * @node: Node to inc refcount, NULL is supported to
1594 * simplify writing of callers
1595 *
1596 * Returns node.
1597 */
1598struct device_node *of_node_get(struct device_node *node)
1599{
1600 if (node)
1601 kref_get(&node->kref);
1602 return node;
1603}
1604EXPORT_SYMBOL(of_node_get);
1605
1606static inline struct device_node * kref_to_device_node(struct kref *kref)
1607{
1608 return container_of(kref, struct device_node, kref);
1609}
1610
1611/**
1612 * of_node_release - release a dynamically allocated node
1613 * @kref: kref element of the node to be released
1614 *
1615 * In of_node_put() this function is passed to kref_put()
1616 * as the destructor.
1617 */
1618static void of_node_release(struct kref *kref)
1619{
1620 struct device_node *node = kref_to_device_node(kref);
1621 struct property *prop = node->properties;
1622
1623 if (!OF_IS_DYNAMIC(node))
1624 return;
1625 while (prop) {
1626 struct property *next = prop->next;
1627 kfree(prop->name);
1628 kfree(prop->value);
1629 kfree(prop);
1630 prop = next;
088186de
DB
1631
1632 if (!prop) {
1633 prop = node->deadprops;
1634 node->deadprops = NULL;
1635 }
9b6b563c
PM
1636 }
1637 kfree(node->intrs);
9b6b563c
PM
1638 kfree(node->full_name);
1639 kfree(node->data);
1640 kfree(node);
1641}
1642
1643/**
1644 * of_node_put - Decrement refcount of a node
1645 * @node: Node to dec refcount, NULL is supported to
1646 * simplify writing of callers
1647 *
1648 */
1649void of_node_put(struct device_node *node)
1650{
1651 if (node)
1652 kref_put(&node->kref, of_node_release);
1653}
1654EXPORT_SYMBOL(of_node_put);
1655
1656/*
1657 * Plug a device node into the tree and global list.
1658 */
1659void of_attach_node(struct device_node *np)
1660{
1661 write_lock(&devtree_lock);
1662 np->sibling = np->parent->child;
1663 np->allnext = allnodes;
1664 np->parent->child = np;
1665 allnodes = np;
1666 write_unlock(&devtree_lock);
1667}
1668
1669/*
1670 * "Unplug" a node from the device tree. The caller must hold
1671 * a reference to the node. The memory associated with the node
1672 * is not freed until its refcount goes to zero.
1673 */
1674void of_detach_node(const struct device_node *np)
1675{
1676 struct device_node *parent;
1677
1678 write_lock(&devtree_lock);
1679
1680 parent = np->parent;
1681
1682 if (allnodes == np)
1683 allnodes = np->allnext;
1684 else {
1685 struct device_node *prev;
1686 for (prev = allnodes;
1687 prev->allnext != np;
1688 prev = prev->allnext)
1689 ;
1690 prev->allnext = np->allnext;
1691 }
1692
1693 if (parent->child == np)
1694 parent->child = np->sibling;
1695 else {
1696 struct device_node *prevsib;
1697 for (prevsib = np->parent->child;
1698 prevsib->sibling != np;
1699 prevsib = prevsib->sibling)
1700 ;
1701 prevsib->sibling = np->sibling;
1702 }
1703
1704 write_unlock(&devtree_lock);
1705}
1706
1707#ifdef CONFIG_PPC_PSERIES
1708/*
1709 * Fix up the uninitialized fields in a new device node:
1710 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1711 *
1712 * A lot of boot-time code is duplicated here, because functions such
1713 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1714 * slab allocator.
1715 *
1716 * This should probably be split up into smaller chunks.
1717 */
1718
cc5d0189 1719static int of_finish_dynamic_node(struct device_node *node)
9b6b563c
PM
1720{
1721 struct device_node *parent = of_get_parent(node);
1722 int err = 0;
1723 phandle *ibm_phandle;
1724
1725 node->name = get_property(node, "name", NULL);
1726 node->type = get_property(node, "device_type", NULL);
1727
1728 if (!parent) {
1729 err = -ENODEV;
1730 goto out;
1731 }
1732
1733 /* We don't support that function on PowerMac, at least
1734 * not yet
1735 */
799d6046 1736 if (_machine == PLATFORM_POWERMAC)
9b6b563c
PM
1737 return -ENODEV;
1738
1739 /* fix up new node's linux_phandle field */
cc5d0189
BH
1740 if ((ibm_phandle = (unsigned int *)get_property(node,
1741 "ibm,phandle", NULL)))
9b6b563c
PM
1742 node->linux_phandle = *ibm_phandle;
1743
1744out:
1745 of_node_put(parent);
1746 return err;
1747}
1748
1749static int prom_reconfig_notifier(struct notifier_block *nb,
1750 unsigned long action, void *node)
1751{
1752 int err;
1753
1754 switch (action) {
1755 case PSERIES_RECONFIG_ADD:
cc5d0189
BH
1756 err = of_finish_dynamic_node(node);
1757 if (!err)
1758 finish_node(node, NULL, 0);
9b6b563c
PM
1759 if (err < 0) {
1760 printk(KERN_ERR "finish_node returned %d\n", err);
1761 err = NOTIFY_BAD;
1762 }
1763 break;
1764 default:
1765 err = NOTIFY_DONE;
1766 break;
1767 }
1768 return err;
1769}
1770
1771static struct notifier_block prom_reconfig_nb = {
1772 .notifier_call = prom_reconfig_notifier,
1773 .priority = 10, /* This one needs to run first */
1774};
1775
1776static int __init prom_reconfig_setup(void)
1777{
1778 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1779}
1780__initcall(prom_reconfig_setup);
1781#endif
1782
ecaa8b0f
DB
1783struct property *of_find_property(struct device_node *np, const char *name,
1784 int *lenp)
9b6b563c
PM
1785{
1786 struct property *pp;
1787
088186de 1788 read_lock(&devtree_lock);
9b6b563c
PM
1789 for (pp = np->properties; pp != 0; pp = pp->next)
1790 if (strcmp(pp->name, name) == 0) {
1791 if (lenp != 0)
1792 *lenp = pp->length;
088186de 1793 break;
9b6b563c 1794 }
088186de
DB
1795 read_unlock(&devtree_lock);
1796
ecaa8b0f
DB
1797 return pp;
1798}
1799
1800/*
1801 * Find a property with a given name for a given node
1802 * and return the value.
1803 */
1804unsigned char *get_property(struct device_node *np, const char *name,
1805 int *lenp)
1806{
1807 struct property *pp = of_find_property(np,name,lenp);
088186de 1808 return pp ? pp->value : NULL;
9b6b563c
PM
1809}
1810EXPORT_SYMBOL(get_property);
1811
1812/*
1813 * Add a property to a node
1814 */
183d0202 1815int prom_add_property(struct device_node* np, struct property* prop)
9b6b563c 1816{
183d0202 1817 struct property **next;
9b6b563c
PM
1818
1819 prop->next = NULL;
183d0202
BH
1820 write_lock(&devtree_lock);
1821 next = &np->properties;
1822 while (*next) {
1823 if (strcmp(prop->name, (*next)->name) == 0) {
1824 /* duplicate ! don't insert it */
1825 write_unlock(&devtree_lock);
1826 return -1;
1827 }
9b6b563c 1828 next = &(*next)->next;
183d0202 1829 }
9b6b563c 1830 *next = prop;
183d0202
BH
1831 write_unlock(&devtree_lock);
1832
799d6046 1833#ifdef CONFIG_PROC_DEVICETREE
183d0202
BH
1834 /* try to add to proc as well if it was initialized */
1835 if (np->pde)
1836 proc_device_tree_add_prop(np->pde, prop);
799d6046 1837#endif /* CONFIG_PROC_DEVICETREE */
183d0202
BH
1838
1839 return 0;
9b6b563c
PM
1840}
1841
088186de
DB
1842/*
1843 * Remove a property from a node. Note that we don't actually
1844 * remove it, since we have given out who-knows-how-many pointers
1845 * to the data using get-property. Instead we just move the property
1846 * to the "dead properties" list, so it won't be found any more.
1847 */
1848int prom_remove_property(struct device_node *np, struct property *prop)
1849{
1850 struct property **next;
1851 int found = 0;
1852
1853 write_lock(&devtree_lock);
1854 next = &np->properties;
1855 while (*next) {
1856 if (*next == prop) {
1857 /* found the node */
1858 *next = prop->next;
1859 prop->next = np->deadprops;
1860 np->deadprops = prop;
1861 found = 1;
1862 break;
1863 }
1864 next = &(*next)->next;
1865 }
1866 write_unlock(&devtree_lock);
1867
1868 if (!found)
1869 return -ENODEV;
1870
1871#ifdef CONFIG_PROC_DEVICETREE
1872 /* try to remove the proc node as well */
1873 if (np->pde)
1874 proc_device_tree_remove_prop(np->pde, prop);
1875#endif /* CONFIG_PROC_DEVICETREE */
1876
1877 return 0;
1878}
1879
1880/*
1881 * Update a property in a node. Note that we don't actually
1882 * remove it, since we have given out who-knows-how-many pointers
1883 * to the data using get-property. Instead we just move the property
1884 * to the "dead properties" list, and add the new property to the
1885 * property list
1886 */
1887int prom_update_property(struct device_node *np,
1888 struct property *newprop,
1889 struct property *oldprop)
1890{
1891 struct property **next;
1892 int found = 0;
1893
1894 write_lock(&devtree_lock);
1895 next = &np->properties;
1896 while (*next) {
1897 if (*next == oldprop) {
1898 /* found the node */
1899 newprop->next = oldprop->next;
1900 *next = newprop;
1901 oldprop->next = np->deadprops;
1902 np->deadprops = oldprop;
1903 found = 1;
1904 break;
1905 }
1906 next = &(*next)->next;
1907 }
1908 write_unlock(&devtree_lock);
1909
1910 if (!found)
1911 return -ENODEV;
9b6b563c 1912
088186de
DB
1913#ifdef CONFIG_PROC_DEVICETREE
1914 /* try to add to proc as well if it was initialized */
1915 if (np->pde)
1916 proc_device_tree_update_prop(np->pde, newprop, oldprop);
1917#endif /* CONFIG_PROC_DEVICETREE */
1918
1919 return 0;
1920}
b68239ee
ME
1921
1922#ifdef CONFIG_KEXEC
1923/* We may have allocated the flat device tree inside the crash kernel region
1924 * in prom_init. If so we need to move it out into regular memory. */
1925void kdump_move_device_tree(void)
1926{
1927 unsigned long start, end;
1928 struct boot_param_header *new;
1929
1930 start = __pa((unsigned long)initial_boot_params);
1931 end = start + initial_boot_params->totalsize;
1932
1933 if (end < crashk_res.start || start > crashk_res.end)
1934 return;
1935
1936 new = (struct boot_param_header*)
1937 __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE));
1938
1939 memcpy(new, initial_boot_params, initial_boot_params->totalsize);
1940
1941 initial_boot_params = new;
1942
1943 DBG("Flat device tree blob moved to %p\n", initial_boot_params);
1944
1945 /* XXX should we unreserve the old DT? */
1946}
1947#endif /* CONFIG_KEXEC */
This page took 0.135983 seconds and 5 git commands to generate.