[PATCH] powerpc: Experimental support for new G5 Macs (#2)
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
1 /*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG
17
18 #include <stdarg.h>
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
33
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36 #include <asm/lmb.h>
37 #include <asm/page.h>
38 #include <asm/processor.h>
39 #include <asm/irq.h>
40 #include <asm/io.h>
41 #include <asm/kdump.h>
42 #include <asm/smp.h>
43 #include <asm/system.h>
44 #include <asm/mmu.h>
45 #include <asm/pgtable.h>
46 #include <asm/pci.h>
47 #include <asm/iommu.h>
48 #include <asm/btext.h>
49 #include <asm/sections.h>
50 #include <asm/machdep.h>
51 #include <asm/pSeries_reconfig.h>
52 #include <asm/pci-bridge.h>
53
54 #ifdef DEBUG
55 #define DBG(fmt...) printk(KERN_ERR fmt)
56 #else
57 #define DBG(fmt...)
58 #endif
59
60
61 static int __initdata dt_root_addr_cells;
62 static int __initdata dt_root_size_cells;
63
64 #ifdef CONFIG_PPC64
65 static int __initdata iommu_is_off;
66 int __initdata iommu_force_on;
67 unsigned long tce_alloc_start, tce_alloc_end;
68 #endif
69
70 typedef u32 cell_t;
71
72 #if 0
73 static struct boot_param_header *initial_boot_params __initdata;
74 #else
75 struct boot_param_header *initial_boot_params;
76 #endif
77
78 static struct device_node *allnodes = NULL;
79
80 /* use when traversing tree through the allnext, child, sibling,
81 * or parent members of struct device_node.
82 */
83 static DEFINE_RWLOCK(devtree_lock);
84
85 /* export that to outside world */
86 struct device_node *of_chosen;
87
88 struct device_node *dflt_interrupt_controller;
89 int num_interrupt_controllers;
90
91 /*
92 * Wrapper for allocating memory for various data that needs to be
93 * attached to device nodes as they are processed at boot or when
94 * added to the device tree later (e.g. DLPAR). At boot there is
95 * already a region reserved so we just increment *mem_start by size;
96 * otherwise we call kmalloc.
97 */
98 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
99 {
100 unsigned long tmp;
101
102 if (!mem_start)
103 return kmalloc(size, GFP_KERNEL);
104
105 tmp = *mem_start;
106 *mem_start += size;
107 return (void *)tmp;
108 }
109
110 /*
111 * Find the device_node with a given phandle.
112 */
113 static struct device_node * find_phandle(phandle ph)
114 {
115 struct device_node *np;
116
117 for (np = allnodes; np != 0; np = np->allnext)
118 if (np->linux_phandle == ph)
119 return np;
120 return NULL;
121 }
122
123 /*
124 * Find the interrupt parent of a node.
125 */
126 static struct device_node * __devinit intr_parent(struct device_node *p)
127 {
128 phandle *parp;
129
130 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
131 if (parp == NULL)
132 return p->parent;
133 p = find_phandle(*parp);
134 if (p != NULL)
135 return p;
136 /*
137 * On a powermac booted with BootX, we don't get to know the
138 * phandles for any nodes, so find_phandle will return NULL.
139 * Fortunately these machines only have one interrupt controller
140 * so there isn't in fact any ambiguity. -- paulus
141 */
142 if (num_interrupt_controllers == 1)
143 p = dflt_interrupt_controller;
144 return p;
145 }
146
147 /*
148 * Find out the size of each entry of the interrupts property
149 * for a node.
150 */
151 int __devinit prom_n_intr_cells(struct device_node *np)
152 {
153 struct device_node *p;
154 unsigned int *icp;
155
156 for (p = np; (p = intr_parent(p)) != NULL; ) {
157 icp = (unsigned int *)
158 get_property(p, "#interrupt-cells", NULL);
159 if (icp != NULL)
160 return *icp;
161 if (get_property(p, "interrupt-controller", NULL) != NULL
162 || get_property(p, "interrupt-map", NULL) != NULL) {
163 printk("oops, node %s doesn't have #interrupt-cells\n",
164 p->full_name);
165 return 1;
166 }
167 }
168 #ifdef DEBUG_IRQ
169 printk("prom_n_intr_cells failed for %s\n", np->full_name);
170 #endif
171 return 1;
172 }
173
174 /*
175 * Map an interrupt from a device up to the platform interrupt
176 * descriptor.
177 */
178 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
179 struct device_node *np, unsigned int *ints,
180 int nintrc)
181 {
182 struct device_node *p, *ipar;
183 unsigned int *imap, *imask, *ip;
184 int i, imaplen, match;
185 int newintrc = 0, newaddrc = 0;
186 unsigned int *reg;
187 int naddrc;
188
189 reg = (unsigned int *) get_property(np, "reg", NULL);
190 naddrc = prom_n_addr_cells(np);
191 p = intr_parent(np);
192 while (p != NULL) {
193 if (get_property(p, "interrupt-controller", NULL) != NULL)
194 /* this node is an interrupt controller, stop here */
195 break;
196 imap = (unsigned int *)
197 get_property(p, "interrupt-map", &imaplen);
198 if (imap == NULL) {
199 p = intr_parent(p);
200 continue;
201 }
202 imask = (unsigned int *)
203 get_property(p, "interrupt-map-mask", NULL);
204 if (imask == NULL) {
205 printk("oops, %s has interrupt-map but no mask\n",
206 p->full_name);
207 return 0;
208 }
209 imaplen /= sizeof(unsigned int);
210 match = 0;
211 ipar = NULL;
212 while (imaplen > 0 && !match) {
213 /* check the child-interrupt field */
214 match = 1;
215 for (i = 0; i < naddrc && match; ++i)
216 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
217 for (; i < naddrc + nintrc && match; ++i)
218 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
219 imap += naddrc + nintrc;
220 imaplen -= naddrc + nintrc;
221 /* grab the interrupt parent */
222 ipar = find_phandle((phandle) *imap++);
223 --imaplen;
224 if (ipar == NULL && num_interrupt_controllers == 1)
225 /* cope with BootX not giving us phandles */
226 ipar = dflt_interrupt_controller;
227 if (ipar == NULL) {
228 printk("oops, no int parent %x in map of %s\n",
229 imap[-1], p->full_name);
230 return 0;
231 }
232 /* find the parent's # addr and intr cells */
233 ip = (unsigned int *)
234 get_property(ipar, "#interrupt-cells", NULL);
235 if (ip == NULL) {
236 printk("oops, no #interrupt-cells on %s\n",
237 ipar->full_name);
238 return 0;
239 }
240 newintrc = *ip;
241 ip = (unsigned int *)
242 get_property(ipar, "#address-cells", NULL);
243 newaddrc = (ip == NULL)? 0: *ip;
244 imap += newaddrc + newintrc;
245 imaplen -= newaddrc + newintrc;
246 }
247 if (imaplen < 0) {
248 printk("oops, error decoding int-map on %s, len=%d\n",
249 p->full_name, imaplen);
250 return 0;
251 }
252 if (!match) {
253 #ifdef DEBUG_IRQ
254 printk("oops, no match in %s int-map for %s\n",
255 p->full_name, np->full_name);
256 #endif
257 return 0;
258 }
259 p = ipar;
260 naddrc = newaddrc;
261 nintrc = newintrc;
262 ints = imap - nintrc;
263 reg = ints - naddrc;
264 }
265 if (p == NULL) {
266 #ifdef DEBUG_IRQ
267 printk("hmmm, int tree for %s doesn't have ctrler\n",
268 np->full_name);
269 #endif
270 return 0;
271 }
272 *irq = ints;
273 *ictrler = p;
274 return nintrc;
275 }
276
277 static unsigned char map_isa_senses[4] = {
278 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
279 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
280 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
281 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
282 };
283
284 static unsigned char map_mpic_senses[4] = {
285 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
286 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
287 /* 2 seems to be used for the 8259 cascade... */
288 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
289 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
290 };
291
292 static int __devinit finish_node_interrupts(struct device_node *np,
293 unsigned long *mem_start,
294 int measure_only)
295 {
296 unsigned int *ints;
297 int intlen, intrcells, intrcount;
298 int i, j, n, sense;
299 unsigned int *irq, virq;
300 struct device_node *ic;
301 int trace = 0;
302
303 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
304 #define TRACE(fmt...)
305
306 if (!strcmp(np->name, "smu-doorbell"))
307 trace = 1;
308
309 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
310 num_interrupt_controllers);
311
312 if (num_interrupt_controllers == 0) {
313 /*
314 * Old machines just have a list of interrupt numbers
315 * and no interrupt-controller nodes.
316 */
317 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
318 &intlen);
319 /* XXX old interpret_pci_props looked in parent too */
320 /* XXX old interpret_macio_props looked for interrupts
321 before AAPL,interrupts */
322 if (ints == NULL)
323 ints = (unsigned int *) get_property(np, "interrupts",
324 &intlen);
325 if (ints == NULL)
326 return 0;
327
328 np->n_intrs = intlen / sizeof(unsigned int);
329 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
330 mem_start);
331 if (!np->intrs)
332 return -ENOMEM;
333 if (measure_only)
334 return 0;
335
336 for (i = 0; i < np->n_intrs; ++i) {
337 np->intrs[i].line = *ints++;
338 np->intrs[i].sense = IRQ_SENSE_LEVEL
339 | IRQ_POLARITY_NEGATIVE;
340 }
341 return 0;
342 }
343
344 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
345 TRACE("ints=%p, intlen=%d\n", ints, intlen);
346 if (ints == NULL)
347 return 0;
348 intrcells = prom_n_intr_cells(np);
349 intlen /= intrcells * sizeof(unsigned int);
350 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
351 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
352 if (!np->intrs)
353 return -ENOMEM;
354
355 if (measure_only)
356 return 0;
357
358 intrcount = 0;
359 for (i = 0; i < intlen; ++i, ints += intrcells) {
360 n = map_interrupt(&irq, &ic, np, ints, intrcells);
361 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
362 if (n <= 0)
363 continue;
364
365 /* don't map IRQ numbers under a cascaded 8259 controller */
366 if (ic && device_is_compatible(ic, "chrp,iic")) {
367 np->intrs[intrcount].line = irq[0];
368 sense = (n > 1)? (irq[1] & 3): 3;
369 np->intrs[intrcount].sense = map_isa_senses[sense];
370 } else {
371 virq = virt_irq_create_mapping(irq[0]);
372 TRACE("virq=%d\n", virq);
373 #ifdef CONFIG_PPC64
374 if (virq == NO_IRQ) {
375 printk(KERN_CRIT "Could not allocate interrupt"
376 " number for %s\n", np->full_name);
377 continue;
378 }
379 #endif
380 np->intrs[intrcount].line = irq_offset_up(virq);
381 sense = (n > 1)? (irq[1] & 3): 1;
382
383 /* Apple uses bits in there in a different way, let's
384 * only keep the real sense bit on macs
385 */
386 if (_machine == PLATFORM_POWERMAC)
387 sense &= 0x1;
388 np->intrs[intrcount].sense = map_mpic_senses[sense];
389 }
390
391 #ifdef CONFIG_PPC64
392 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
393 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
394 char *name = get_property(ic->parent, "name", NULL);
395 if (name && !strcmp(name, "u3"))
396 np->intrs[intrcount].line += 128;
397 else if (!(name && (!strcmp(name, "mac-io") ||
398 !strcmp(name, "u4"))))
399 /* ignore other cascaded controllers, such as
400 the k2-sata-root */
401 break;
402 }
403 #endif /* CONFIG_PPC64 */
404 if (n > 2) {
405 printk("hmmm, got %d intr cells for %s:", n,
406 np->full_name);
407 for (j = 0; j < n; ++j)
408 printk(" %d", irq[j]);
409 printk("\n");
410 }
411 ++intrcount;
412 }
413 np->n_intrs = intrcount;
414
415 return 0;
416 }
417
418 static int __devinit finish_node(struct device_node *np,
419 unsigned long *mem_start,
420 int measure_only)
421 {
422 struct device_node *child;
423 int rc = 0;
424
425 rc = finish_node_interrupts(np, mem_start, measure_only);
426 if (rc)
427 goto out;
428
429 for (child = np->child; child != NULL; child = child->sibling) {
430 rc = finish_node(child, mem_start, measure_only);
431 if (rc)
432 goto out;
433 }
434 out:
435 return rc;
436 }
437
438 static void __init scan_interrupt_controllers(void)
439 {
440 struct device_node *np;
441 int n = 0;
442 char *name, *ic;
443 int iclen;
444
445 for (np = allnodes; np != NULL; np = np->allnext) {
446 ic = get_property(np, "interrupt-controller", &iclen);
447 name = get_property(np, "name", NULL);
448 /* checking iclen makes sure we don't get a false
449 match on /chosen.interrupt_controller */
450 if ((name != NULL
451 && strcmp(name, "interrupt-controller") == 0)
452 || (ic != NULL && iclen == 0
453 && strcmp(name, "AppleKiwi"))) {
454 if (n == 0)
455 dflt_interrupt_controller = np;
456 ++n;
457 }
458 }
459 num_interrupt_controllers = n;
460 }
461
462 /**
463 * finish_device_tree is called once things are running normally
464 * (i.e. with text and data mapped to the address they were linked at).
465 * It traverses the device tree and fills in some of the additional,
466 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
467 * mapping is also initialized at this point.
468 */
469 void __init finish_device_tree(void)
470 {
471 unsigned long start, end, size = 0;
472
473 DBG(" -> finish_device_tree\n");
474
475 #ifdef CONFIG_PPC64
476 /* Initialize virtual IRQ map */
477 virt_irq_init();
478 #endif
479 scan_interrupt_controllers();
480
481 /*
482 * Finish device-tree (pre-parsing some properties etc...)
483 * We do this in 2 passes. One with "measure_only" set, which
484 * will only measure the amount of memory needed, then we can
485 * allocate that memory, and call finish_node again. However,
486 * we must be careful as most routines will fail nowadays when
487 * prom_alloc() returns 0, so we must make sure our first pass
488 * doesn't start at 0. We pre-initialize size to 16 for that
489 * reason and then remove those additional 16 bytes
490 */
491 size = 16;
492 finish_node(allnodes, &size, 1);
493 size -= 16;
494 end = start = (unsigned long) __va(lmb_alloc(size, 128));
495 finish_node(allnodes, &end, 0);
496 BUG_ON(end != start + size);
497
498 DBG(" <- finish_device_tree\n");
499 }
500
501 static inline char *find_flat_dt_string(u32 offset)
502 {
503 return ((char *)initial_boot_params) +
504 initial_boot_params->off_dt_strings + offset;
505 }
506
507 /**
508 * This function is used to scan the flattened device-tree, it is
509 * used to extract the memory informations at boot before we can
510 * unflatten the tree
511 */
512 int __init of_scan_flat_dt(int (*it)(unsigned long node,
513 const char *uname, int depth,
514 void *data),
515 void *data)
516 {
517 unsigned long p = ((unsigned long)initial_boot_params) +
518 initial_boot_params->off_dt_struct;
519 int rc = 0;
520 int depth = -1;
521
522 do {
523 u32 tag = *((u32 *)p);
524 char *pathp;
525
526 p += 4;
527 if (tag == OF_DT_END_NODE) {
528 depth --;
529 continue;
530 }
531 if (tag == OF_DT_NOP)
532 continue;
533 if (tag == OF_DT_END)
534 break;
535 if (tag == OF_DT_PROP) {
536 u32 sz = *((u32 *)p);
537 p += 8;
538 if (initial_boot_params->version < 0x10)
539 p = _ALIGN(p, sz >= 8 ? 8 : 4);
540 p += sz;
541 p = _ALIGN(p, 4);
542 continue;
543 }
544 if (tag != OF_DT_BEGIN_NODE) {
545 printk(KERN_WARNING "Invalid tag %x scanning flattened"
546 " device tree !\n", tag);
547 return -EINVAL;
548 }
549 depth++;
550 pathp = (char *)p;
551 p = _ALIGN(p + strlen(pathp) + 1, 4);
552 if ((*pathp) == '/') {
553 char *lp, *np;
554 for (lp = NULL, np = pathp; *np; np++)
555 if ((*np) == '/')
556 lp = np+1;
557 if (lp != NULL)
558 pathp = lp;
559 }
560 rc = it(p, pathp, depth, data);
561 if (rc != 0)
562 break;
563 } while(1);
564
565 return rc;
566 }
567
568 /**
569 * This function can be used within scan_flattened_dt callback to get
570 * access to properties
571 */
572 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
573 unsigned long *size)
574 {
575 unsigned long p = node;
576
577 do {
578 u32 tag = *((u32 *)p);
579 u32 sz, noff;
580 const char *nstr;
581
582 p += 4;
583 if (tag == OF_DT_NOP)
584 continue;
585 if (tag != OF_DT_PROP)
586 return NULL;
587
588 sz = *((u32 *)p);
589 noff = *((u32 *)(p + 4));
590 p += 8;
591 if (initial_boot_params->version < 0x10)
592 p = _ALIGN(p, sz >= 8 ? 8 : 4);
593
594 nstr = find_flat_dt_string(noff);
595 if (nstr == NULL) {
596 printk(KERN_WARNING "Can't find property index"
597 " name !\n");
598 return NULL;
599 }
600 if (strcmp(name, nstr) == 0) {
601 if (size)
602 *size = sz;
603 return (void *)p;
604 }
605 p += sz;
606 p = _ALIGN(p, 4);
607 } while(1);
608 }
609
610 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
611 unsigned long align)
612 {
613 void *res;
614
615 *mem = _ALIGN(*mem, align);
616 res = (void *)*mem;
617 *mem += size;
618
619 return res;
620 }
621
622 static unsigned long __init unflatten_dt_node(unsigned long mem,
623 unsigned long *p,
624 struct device_node *dad,
625 struct device_node ***allnextpp,
626 unsigned long fpsize)
627 {
628 struct device_node *np;
629 struct property *pp, **prev_pp = NULL;
630 char *pathp;
631 u32 tag;
632 unsigned int l, allocl;
633 int has_name = 0;
634 int new_format = 0;
635
636 tag = *((u32 *)(*p));
637 if (tag != OF_DT_BEGIN_NODE) {
638 printk("Weird tag at start of node: %x\n", tag);
639 return mem;
640 }
641 *p += 4;
642 pathp = (char *)*p;
643 l = allocl = strlen(pathp) + 1;
644 *p = _ALIGN(*p + l, 4);
645
646 /* version 0x10 has a more compact unit name here instead of the full
647 * path. we accumulate the full path size using "fpsize", we'll rebuild
648 * it later. We detect this because the first character of the name is
649 * not '/'.
650 */
651 if ((*pathp) != '/') {
652 new_format = 1;
653 if (fpsize == 0) {
654 /* root node: special case. fpsize accounts for path
655 * plus terminating zero. root node only has '/', so
656 * fpsize should be 2, but we want to avoid the first
657 * level nodes to have two '/' so we use fpsize 1 here
658 */
659 fpsize = 1;
660 allocl = 2;
661 } else {
662 /* account for '/' and path size minus terminal 0
663 * already in 'l'
664 */
665 fpsize += l;
666 allocl = fpsize;
667 }
668 }
669
670
671 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
672 __alignof__(struct device_node));
673 if (allnextpp) {
674 memset(np, 0, sizeof(*np));
675 np->full_name = ((char*)np) + sizeof(struct device_node);
676 if (new_format) {
677 char *p = np->full_name;
678 /* rebuild full path for new format */
679 if (dad && dad->parent) {
680 strcpy(p, dad->full_name);
681 #ifdef DEBUG
682 if ((strlen(p) + l + 1) != allocl) {
683 DBG("%s: p: %d, l: %d, a: %d\n",
684 pathp, strlen(p), l, allocl);
685 }
686 #endif
687 p += strlen(p);
688 }
689 *(p++) = '/';
690 memcpy(p, pathp, l);
691 } else
692 memcpy(np->full_name, pathp, l);
693 prev_pp = &np->properties;
694 **allnextpp = np;
695 *allnextpp = &np->allnext;
696 if (dad != NULL) {
697 np->parent = dad;
698 /* we temporarily use the next field as `last_child'*/
699 if (dad->next == 0)
700 dad->child = np;
701 else
702 dad->next->sibling = np;
703 dad->next = np;
704 }
705 kref_init(&np->kref);
706 }
707 while(1) {
708 u32 sz, noff;
709 char *pname;
710
711 tag = *((u32 *)(*p));
712 if (tag == OF_DT_NOP) {
713 *p += 4;
714 continue;
715 }
716 if (tag != OF_DT_PROP)
717 break;
718 *p += 4;
719 sz = *((u32 *)(*p));
720 noff = *((u32 *)((*p) + 4));
721 *p += 8;
722 if (initial_boot_params->version < 0x10)
723 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
724
725 pname = find_flat_dt_string(noff);
726 if (pname == NULL) {
727 printk("Can't find property name in list !\n");
728 break;
729 }
730 if (strcmp(pname, "name") == 0)
731 has_name = 1;
732 l = strlen(pname) + 1;
733 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
734 __alignof__(struct property));
735 if (allnextpp) {
736 if (strcmp(pname, "linux,phandle") == 0) {
737 np->node = *((u32 *)*p);
738 if (np->linux_phandle == 0)
739 np->linux_phandle = np->node;
740 }
741 if (strcmp(pname, "ibm,phandle") == 0)
742 np->linux_phandle = *((u32 *)*p);
743 pp->name = pname;
744 pp->length = sz;
745 pp->value = (void *)*p;
746 *prev_pp = pp;
747 prev_pp = &pp->next;
748 }
749 *p = _ALIGN((*p) + sz, 4);
750 }
751 /* with version 0x10 we may not have the name property, recreate
752 * it here from the unit name if absent
753 */
754 if (!has_name) {
755 char *p = pathp, *ps = pathp, *pa = NULL;
756 int sz;
757
758 while (*p) {
759 if ((*p) == '@')
760 pa = p;
761 if ((*p) == '/')
762 ps = p + 1;
763 p++;
764 }
765 if (pa < ps)
766 pa = p;
767 sz = (pa - ps) + 1;
768 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
769 __alignof__(struct property));
770 if (allnextpp) {
771 pp->name = "name";
772 pp->length = sz;
773 pp->value = (unsigned char *)(pp + 1);
774 *prev_pp = pp;
775 prev_pp = &pp->next;
776 memcpy(pp->value, ps, sz - 1);
777 ((char *)pp->value)[sz - 1] = 0;
778 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
779 }
780 }
781 if (allnextpp) {
782 *prev_pp = NULL;
783 np->name = get_property(np, "name", NULL);
784 np->type = get_property(np, "device_type", NULL);
785
786 if (!np->name)
787 np->name = "<NULL>";
788 if (!np->type)
789 np->type = "<NULL>";
790 }
791 while (tag == OF_DT_BEGIN_NODE) {
792 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
793 tag = *((u32 *)(*p));
794 }
795 if (tag != OF_DT_END_NODE) {
796 printk("Weird tag at end of node: %x\n", tag);
797 return mem;
798 }
799 *p += 4;
800 return mem;
801 }
802
803
804 /**
805 * unflattens the device-tree passed by the firmware, creating the
806 * tree of struct device_node. It also fills the "name" and "type"
807 * pointers of the nodes so the normal device-tree walking functions
808 * can be used (this used to be done by finish_device_tree)
809 */
810 void __init unflatten_device_tree(void)
811 {
812 unsigned long start, mem, size;
813 struct device_node **allnextp = &allnodes;
814 char *p = NULL;
815 int l = 0;
816
817 DBG(" -> unflatten_device_tree()\n");
818
819 /* First pass, scan for size */
820 start = ((unsigned long)initial_boot_params) +
821 initial_boot_params->off_dt_struct;
822 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
823 size = (size | 3) + 1;
824
825 DBG(" size is %lx, allocating...\n", size);
826
827 /* Allocate memory for the expanded device tree */
828 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
829 if (!mem) {
830 DBG("Couldn't allocate memory with lmb_alloc()!\n");
831 panic("Couldn't allocate memory with lmb_alloc()!\n");
832 }
833 mem = (unsigned long) __va(mem);
834
835 ((u32 *)mem)[size / 4] = 0xdeadbeef;
836
837 DBG(" unflattening %lx...\n", mem);
838
839 /* Second pass, do actual unflattening */
840 start = ((unsigned long)initial_boot_params) +
841 initial_boot_params->off_dt_struct;
842 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
843 if (*((u32 *)start) != OF_DT_END)
844 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
845 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
846 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
847 ((u32 *)mem)[size / 4] );
848 *allnextp = NULL;
849
850 /* Get pointer to OF "/chosen" node for use everywhere */
851 of_chosen = of_find_node_by_path("/chosen");
852 if (of_chosen == NULL)
853 of_chosen = of_find_node_by_path("/chosen@0");
854
855 /* Retreive command line */
856 if (of_chosen != NULL) {
857 p = (char *)get_property(of_chosen, "bootargs", &l);
858 if (p != NULL && l > 0)
859 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
860 }
861 #ifdef CONFIG_CMDLINE
862 if (l == 0 || (l == 1 && (*p) == 0))
863 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
864 #endif /* CONFIG_CMDLINE */
865
866 DBG("Command line is: %s\n", cmd_line);
867
868 DBG(" <- unflatten_device_tree()\n");
869 }
870
871
872 static int __init early_init_dt_scan_cpus(unsigned long node,
873 const char *uname, int depth, void *data)
874 {
875 u32 *prop;
876 unsigned long size;
877 char *type = of_get_flat_dt_prop(node, "device_type", &size);
878
879 /* We are scanning "cpu" nodes only */
880 if (type == NULL || strcmp(type, "cpu") != 0)
881 return 0;
882
883 boot_cpuid = 0;
884 boot_cpuid_phys = 0;
885 if (initial_boot_params && initial_boot_params->version >= 2) {
886 /* version 2 of the kexec param format adds the phys cpuid
887 * of booted proc.
888 */
889 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
890 } else {
891 /* Check if it's the boot-cpu, set it's hw index now */
892 if (of_get_flat_dt_prop(node,
893 "linux,boot-cpu", NULL) != NULL) {
894 prop = of_get_flat_dt_prop(node, "reg", NULL);
895 if (prop != NULL)
896 boot_cpuid_phys = *prop;
897 }
898 }
899 set_hard_smp_processor_id(0, boot_cpuid_phys);
900
901 #ifdef CONFIG_ALTIVEC
902 /* Check if we have a VMX and eventually update CPU features */
903 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
904 if (prop && (*prop) > 0) {
905 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
906 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
907 }
908
909 /* Same goes for Apple's "altivec" property */
910 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
911 if (prop) {
912 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
913 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
914 }
915 #endif /* CONFIG_ALTIVEC */
916
917 #ifdef CONFIG_PPC_PSERIES
918 /*
919 * Check for an SMT capable CPU and set the CPU feature. We do
920 * this by looking at the size of the ibm,ppc-interrupt-server#s
921 * property
922 */
923 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
924 &size);
925 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
926 if (prop && ((size / sizeof(u32)) > 1))
927 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
928 #endif
929
930 return 0;
931 }
932
933 static int __init early_init_dt_scan_chosen(unsigned long node,
934 const char *uname, int depth, void *data)
935 {
936 u32 *prop;
937 unsigned long *lprop;
938
939 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
940
941 if (depth != 1 ||
942 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
943 return 0;
944
945 /* get platform type */
946 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
947 if (prop == NULL)
948 return 0;
949 #ifdef CONFIG_PPC_MULTIPLATFORM
950 _machine = *prop;
951 #endif
952
953 #ifdef CONFIG_PPC64
954 /* check if iommu is forced on or off */
955 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
956 iommu_is_off = 1;
957 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
958 iommu_force_on = 1;
959 #endif
960
961 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
962 if (lprop)
963 memory_limit = *lprop;
964
965 #ifdef CONFIG_PPC64
966 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
967 if (lprop)
968 tce_alloc_start = *lprop;
969 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
970 if (lprop)
971 tce_alloc_end = *lprop;
972 #endif
973
974 #ifdef CONFIG_PPC_RTAS
975 /* To help early debugging via the front panel, we retreive a minimal
976 * set of RTAS infos now if available
977 */
978 {
979 u64 *basep, *entryp;
980
981 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
982 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
983 prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
984 if (basep && entryp && prop) {
985 rtas.base = *basep;
986 rtas.entry = *entryp;
987 rtas.size = *prop;
988 }
989 }
990 #endif /* CONFIG_PPC_RTAS */
991
992 #ifdef CONFIG_KEXEC
993 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
994 if (lprop)
995 crashk_res.start = *lprop;
996
997 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
998 if (lprop)
999 crashk_res.end = crashk_res.start + *lprop - 1;
1000 #endif
1001
1002 /* break now */
1003 return 1;
1004 }
1005
1006 static int __init early_init_dt_scan_root(unsigned long node,
1007 const char *uname, int depth, void *data)
1008 {
1009 u32 *prop;
1010
1011 if (depth != 0)
1012 return 0;
1013
1014 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1015 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1016 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1017
1018 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1019 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1020 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1021
1022 /* break now */
1023 return 1;
1024 }
1025
1026 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1027 {
1028 cell_t *p = *cellp;
1029 unsigned long r;
1030
1031 /* Ignore more than 2 cells */
1032 while (s > sizeof(unsigned long) / 4) {
1033 p++;
1034 s--;
1035 }
1036 r = *p++;
1037 #ifdef CONFIG_PPC64
1038 if (s > 1) {
1039 r <<= 32;
1040 r |= *(p++);
1041 s--;
1042 }
1043 #endif
1044
1045 *cellp = p;
1046 return r;
1047 }
1048
1049
1050 static int __init early_init_dt_scan_memory(unsigned long node,
1051 const char *uname, int depth, void *data)
1052 {
1053 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1054 cell_t *reg, *endp;
1055 unsigned long l;
1056
1057 /* We are scanning "memory" nodes only */
1058 if (type == NULL) {
1059 /*
1060 * The longtrail doesn't have a device_type on the
1061 * /memory node, so look for the node called /memory@0.
1062 */
1063 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1064 return 0;
1065 } else if (strcmp(type, "memory") != 0)
1066 return 0;
1067
1068 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1069 if (reg == NULL)
1070 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1071 if (reg == NULL)
1072 return 0;
1073
1074 endp = reg + (l / sizeof(cell_t));
1075
1076 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1077 uname, l, reg[0], reg[1], reg[2], reg[3]);
1078
1079 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1080 unsigned long base, size;
1081
1082 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1083 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1084
1085 if (size == 0)
1086 continue;
1087 DBG(" - %lx , %lx\n", base, size);
1088 #ifdef CONFIG_PPC64
1089 if (iommu_is_off) {
1090 if (base >= 0x80000000ul)
1091 continue;
1092 if ((base + size) > 0x80000000ul)
1093 size = 0x80000000ul - base;
1094 }
1095 #endif
1096 lmb_add(base, size);
1097 }
1098 return 0;
1099 }
1100
1101 static void __init early_reserve_mem(void)
1102 {
1103 unsigned long base, size;
1104 unsigned long *reserve_map;
1105
1106 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1107 initial_boot_params->off_mem_rsvmap);
1108 while (1) {
1109 base = *(reserve_map++);
1110 size = *(reserve_map++);
1111 if (size == 0)
1112 break;
1113 DBG("reserving: %lx -> %lx\n", base, size);
1114 lmb_reserve(base, size);
1115 }
1116
1117 #if 0
1118 DBG("memory reserved, lmbs :\n");
1119 lmb_dump_all();
1120 #endif
1121 }
1122
1123 void __init early_init_devtree(void *params)
1124 {
1125 DBG(" -> early_init_devtree()\n");
1126
1127 /* Setup flat device-tree pointer */
1128 initial_boot_params = params;
1129
1130 /* Retrieve various informations from the /chosen node of the
1131 * device-tree, including the platform type, initrd location and
1132 * size, TCE reserve, and more ...
1133 */
1134 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1135
1136 /* Scan memory nodes and rebuild LMBs */
1137 lmb_init();
1138 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1139 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1140 lmb_enforce_memory_limit(memory_limit);
1141 lmb_analyze();
1142
1143 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1144
1145 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1146 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1147 #ifdef CONFIG_CRASH_DUMP
1148 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
1149 #endif
1150 early_reserve_mem();
1151
1152 DBG("Scanning CPUs ...\n");
1153
1154 /* Retreive CPU related informations from the flat tree
1155 * (altivec support, boot CPU ID, ...)
1156 */
1157 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1158
1159 DBG(" <- early_init_devtree()\n");
1160 }
1161
1162 #undef printk
1163
1164 int
1165 prom_n_addr_cells(struct device_node* np)
1166 {
1167 int* ip;
1168 do {
1169 if (np->parent)
1170 np = np->parent;
1171 ip = (int *) get_property(np, "#address-cells", NULL);
1172 if (ip != NULL)
1173 return *ip;
1174 } while (np->parent);
1175 /* No #address-cells property for the root node, default to 1 */
1176 return 1;
1177 }
1178 EXPORT_SYMBOL(prom_n_addr_cells);
1179
1180 int
1181 prom_n_size_cells(struct device_node* np)
1182 {
1183 int* ip;
1184 do {
1185 if (np->parent)
1186 np = np->parent;
1187 ip = (int *) get_property(np, "#size-cells", NULL);
1188 if (ip != NULL)
1189 return *ip;
1190 } while (np->parent);
1191 /* No #size-cells property for the root node, default to 1 */
1192 return 1;
1193 }
1194 EXPORT_SYMBOL(prom_n_size_cells);
1195
1196 /**
1197 * Work out the sense (active-low level / active-high edge)
1198 * of each interrupt from the device tree.
1199 */
1200 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1201 {
1202 struct device_node *np;
1203 int i, j;
1204
1205 /* default to level-triggered */
1206 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1207
1208 for (np = allnodes; np != 0; np = np->allnext) {
1209 for (j = 0; j < np->n_intrs; j++) {
1210 i = np->intrs[j].line;
1211 if (i >= off && i < max)
1212 senses[i-off] = np->intrs[j].sense;
1213 }
1214 }
1215 }
1216
1217 /**
1218 * Construct and return a list of the device_nodes with a given name.
1219 */
1220 struct device_node *find_devices(const char *name)
1221 {
1222 struct device_node *head, **prevp, *np;
1223
1224 prevp = &head;
1225 for (np = allnodes; np != 0; np = np->allnext) {
1226 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1227 *prevp = np;
1228 prevp = &np->next;
1229 }
1230 }
1231 *prevp = NULL;
1232 return head;
1233 }
1234 EXPORT_SYMBOL(find_devices);
1235
1236 /**
1237 * Construct and return a list of the device_nodes with a given type.
1238 */
1239 struct device_node *find_type_devices(const char *type)
1240 {
1241 struct device_node *head, **prevp, *np;
1242
1243 prevp = &head;
1244 for (np = allnodes; np != 0; np = np->allnext) {
1245 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1246 *prevp = np;
1247 prevp = &np->next;
1248 }
1249 }
1250 *prevp = NULL;
1251 return head;
1252 }
1253 EXPORT_SYMBOL(find_type_devices);
1254
1255 /**
1256 * Returns all nodes linked together
1257 */
1258 struct device_node *find_all_nodes(void)
1259 {
1260 struct device_node *head, **prevp, *np;
1261
1262 prevp = &head;
1263 for (np = allnodes; np != 0; np = np->allnext) {
1264 *prevp = np;
1265 prevp = &np->next;
1266 }
1267 *prevp = NULL;
1268 return head;
1269 }
1270 EXPORT_SYMBOL(find_all_nodes);
1271
1272 /** Checks if the given "compat" string matches one of the strings in
1273 * the device's "compatible" property
1274 */
1275 int device_is_compatible(struct device_node *device, const char *compat)
1276 {
1277 const char* cp;
1278 int cplen, l;
1279
1280 cp = (char *) get_property(device, "compatible", &cplen);
1281 if (cp == NULL)
1282 return 0;
1283 while (cplen > 0) {
1284 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1285 return 1;
1286 l = strlen(cp) + 1;
1287 cp += l;
1288 cplen -= l;
1289 }
1290
1291 return 0;
1292 }
1293 EXPORT_SYMBOL(device_is_compatible);
1294
1295
1296 /**
1297 * Indicates whether the root node has a given value in its
1298 * compatible property.
1299 */
1300 int machine_is_compatible(const char *compat)
1301 {
1302 struct device_node *root;
1303 int rc = 0;
1304
1305 root = of_find_node_by_path("/");
1306 if (root) {
1307 rc = device_is_compatible(root, compat);
1308 of_node_put(root);
1309 }
1310 return rc;
1311 }
1312 EXPORT_SYMBOL(machine_is_compatible);
1313
1314 /**
1315 * Construct and return a list of the device_nodes with a given type
1316 * and compatible property.
1317 */
1318 struct device_node *find_compatible_devices(const char *type,
1319 const char *compat)
1320 {
1321 struct device_node *head, **prevp, *np;
1322
1323 prevp = &head;
1324 for (np = allnodes; np != 0; np = np->allnext) {
1325 if (type != NULL
1326 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1327 continue;
1328 if (device_is_compatible(np, compat)) {
1329 *prevp = np;
1330 prevp = &np->next;
1331 }
1332 }
1333 *prevp = NULL;
1334 return head;
1335 }
1336 EXPORT_SYMBOL(find_compatible_devices);
1337
1338 /**
1339 * Find the device_node with a given full_name.
1340 */
1341 struct device_node *find_path_device(const char *path)
1342 {
1343 struct device_node *np;
1344
1345 for (np = allnodes; np != 0; np = np->allnext)
1346 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1347 return np;
1348 return NULL;
1349 }
1350 EXPORT_SYMBOL(find_path_device);
1351
1352 /*******
1353 *
1354 * New implementation of the OF "find" APIs, return a refcounted
1355 * object, call of_node_put() when done. The device tree and list
1356 * are protected by a rw_lock.
1357 *
1358 * Note that property management will need some locking as well,
1359 * this isn't dealt with yet.
1360 *
1361 *******/
1362
1363 /**
1364 * of_find_node_by_name - Find a node by its "name" property
1365 * @from: The node to start searching from or NULL, the node
1366 * you pass will not be searched, only the next one
1367 * will; typically, you pass what the previous call
1368 * returned. of_node_put() will be called on it
1369 * @name: The name string to match against
1370 *
1371 * Returns a node pointer with refcount incremented, use
1372 * of_node_put() on it when done.
1373 */
1374 struct device_node *of_find_node_by_name(struct device_node *from,
1375 const char *name)
1376 {
1377 struct device_node *np;
1378
1379 read_lock(&devtree_lock);
1380 np = from ? from->allnext : allnodes;
1381 for (; np != 0; np = np->allnext)
1382 if (np->name != 0 && strcasecmp(np->name, name) == 0
1383 && of_node_get(np))
1384 break;
1385 if (from)
1386 of_node_put(from);
1387 read_unlock(&devtree_lock);
1388 return np;
1389 }
1390 EXPORT_SYMBOL(of_find_node_by_name);
1391
1392 /**
1393 * of_find_node_by_type - Find a node by its "device_type" property
1394 * @from: The node to start searching from or NULL, the node
1395 * you pass will not be searched, only the next one
1396 * will; typically, you pass what the previous call
1397 * returned. of_node_put() will be called on it
1398 * @name: The type string to match against
1399 *
1400 * Returns a node pointer with refcount incremented, use
1401 * of_node_put() on it when done.
1402 */
1403 struct device_node *of_find_node_by_type(struct device_node *from,
1404 const char *type)
1405 {
1406 struct device_node *np;
1407
1408 read_lock(&devtree_lock);
1409 np = from ? from->allnext : allnodes;
1410 for (; np != 0; np = np->allnext)
1411 if (np->type != 0 && strcasecmp(np->type, type) == 0
1412 && of_node_get(np))
1413 break;
1414 if (from)
1415 of_node_put(from);
1416 read_unlock(&devtree_lock);
1417 return np;
1418 }
1419 EXPORT_SYMBOL(of_find_node_by_type);
1420
1421 /**
1422 * of_find_compatible_node - Find a node based on type and one of the
1423 * tokens in its "compatible" property
1424 * @from: The node to start searching from or NULL, the node
1425 * you pass will not be searched, only the next one
1426 * will; typically, you pass what the previous call
1427 * returned. of_node_put() will be called on it
1428 * @type: The type string to match "device_type" or NULL to ignore
1429 * @compatible: The string to match to one of the tokens in the device
1430 * "compatible" list.
1431 *
1432 * Returns a node pointer with refcount incremented, use
1433 * of_node_put() on it when done.
1434 */
1435 struct device_node *of_find_compatible_node(struct device_node *from,
1436 const char *type, const char *compatible)
1437 {
1438 struct device_node *np;
1439
1440 read_lock(&devtree_lock);
1441 np = from ? from->allnext : allnodes;
1442 for (; np != 0; np = np->allnext) {
1443 if (type != NULL
1444 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1445 continue;
1446 if (device_is_compatible(np, compatible) && of_node_get(np))
1447 break;
1448 }
1449 if (from)
1450 of_node_put(from);
1451 read_unlock(&devtree_lock);
1452 return np;
1453 }
1454 EXPORT_SYMBOL(of_find_compatible_node);
1455
1456 /**
1457 * of_find_node_by_path - Find a node matching a full OF path
1458 * @path: The full path to match
1459 *
1460 * Returns a node pointer with refcount incremented, use
1461 * of_node_put() on it when done.
1462 */
1463 struct device_node *of_find_node_by_path(const char *path)
1464 {
1465 struct device_node *np = allnodes;
1466
1467 read_lock(&devtree_lock);
1468 for (; np != 0; np = np->allnext) {
1469 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1470 && of_node_get(np))
1471 break;
1472 }
1473 read_unlock(&devtree_lock);
1474 return np;
1475 }
1476 EXPORT_SYMBOL(of_find_node_by_path);
1477
1478 /**
1479 * of_find_node_by_phandle - Find a node given a phandle
1480 * @handle: phandle of the node to find
1481 *
1482 * Returns a node pointer with refcount incremented, use
1483 * of_node_put() on it when done.
1484 */
1485 struct device_node *of_find_node_by_phandle(phandle handle)
1486 {
1487 struct device_node *np;
1488
1489 read_lock(&devtree_lock);
1490 for (np = allnodes; np != 0; np = np->allnext)
1491 if (np->linux_phandle == handle)
1492 break;
1493 if (np)
1494 of_node_get(np);
1495 read_unlock(&devtree_lock);
1496 return np;
1497 }
1498 EXPORT_SYMBOL(of_find_node_by_phandle);
1499
1500 /**
1501 * of_find_all_nodes - Get next node in global list
1502 * @prev: Previous node or NULL to start iteration
1503 * of_node_put() will be called on it
1504 *
1505 * Returns a node pointer with refcount incremented, use
1506 * of_node_put() on it when done.
1507 */
1508 struct device_node *of_find_all_nodes(struct device_node *prev)
1509 {
1510 struct device_node *np;
1511
1512 read_lock(&devtree_lock);
1513 np = prev ? prev->allnext : allnodes;
1514 for (; np != 0; np = np->allnext)
1515 if (of_node_get(np))
1516 break;
1517 if (prev)
1518 of_node_put(prev);
1519 read_unlock(&devtree_lock);
1520 return np;
1521 }
1522 EXPORT_SYMBOL(of_find_all_nodes);
1523
1524 /**
1525 * of_get_parent - Get a node's parent if any
1526 * @node: Node to get parent
1527 *
1528 * Returns a node pointer with refcount incremented, use
1529 * of_node_put() on it when done.
1530 */
1531 struct device_node *of_get_parent(const struct device_node *node)
1532 {
1533 struct device_node *np;
1534
1535 if (!node)
1536 return NULL;
1537
1538 read_lock(&devtree_lock);
1539 np = of_node_get(node->parent);
1540 read_unlock(&devtree_lock);
1541 return np;
1542 }
1543 EXPORT_SYMBOL(of_get_parent);
1544
1545 /**
1546 * of_get_next_child - Iterate a node childs
1547 * @node: parent node
1548 * @prev: previous child of the parent node, or NULL to get first
1549 *
1550 * Returns a node pointer with refcount incremented, use
1551 * of_node_put() on it when done.
1552 */
1553 struct device_node *of_get_next_child(const struct device_node *node,
1554 struct device_node *prev)
1555 {
1556 struct device_node *next;
1557
1558 read_lock(&devtree_lock);
1559 next = prev ? prev->sibling : node->child;
1560 for (; next != 0; next = next->sibling)
1561 if (of_node_get(next))
1562 break;
1563 if (prev)
1564 of_node_put(prev);
1565 read_unlock(&devtree_lock);
1566 return next;
1567 }
1568 EXPORT_SYMBOL(of_get_next_child);
1569
1570 /**
1571 * of_node_get - Increment refcount of a node
1572 * @node: Node to inc refcount, NULL is supported to
1573 * simplify writing of callers
1574 *
1575 * Returns node.
1576 */
1577 struct device_node *of_node_get(struct device_node *node)
1578 {
1579 if (node)
1580 kref_get(&node->kref);
1581 return node;
1582 }
1583 EXPORT_SYMBOL(of_node_get);
1584
1585 static inline struct device_node * kref_to_device_node(struct kref *kref)
1586 {
1587 return container_of(kref, struct device_node, kref);
1588 }
1589
1590 /**
1591 * of_node_release - release a dynamically allocated node
1592 * @kref: kref element of the node to be released
1593 *
1594 * In of_node_put() this function is passed to kref_put()
1595 * as the destructor.
1596 */
1597 static void of_node_release(struct kref *kref)
1598 {
1599 struct device_node *node = kref_to_device_node(kref);
1600 struct property *prop = node->properties;
1601
1602 if (!OF_IS_DYNAMIC(node))
1603 return;
1604 while (prop) {
1605 struct property *next = prop->next;
1606 kfree(prop->name);
1607 kfree(prop->value);
1608 kfree(prop);
1609 prop = next;
1610 }
1611 kfree(node->intrs);
1612 kfree(node->full_name);
1613 kfree(node->data);
1614 kfree(node);
1615 }
1616
1617 /**
1618 * of_node_put - Decrement refcount of a node
1619 * @node: Node to dec refcount, NULL is supported to
1620 * simplify writing of callers
1621 *
1622 */
1623 void of_node_put(struct device_node *node)
1624 {
1625 if (node)
1626 kref_put(&node->kref, of_node_release);
1627 }
1628 EXPORT_SYMBOL(of_node_put);
1629
1630 /*
1631 * Plug a device node into the tree and global list.
1632 */
1633 void of_attach_node(struct device_node *np)
1634 {
1635 write_lock(&devtree_lock);
1636 np->sibling = np->parent->child;
1637 np->allnext = allnodes;
1638 np->parent->child = np;
1639 allnodes = np;
1640 write_unlock(&devtree_lock);
1641 }
1642
1643 /*
1644 * "Unplug" a node from the device tree. The caller must hold
1645 * a reference to the node. The memory associated with the node
1646 * is not freed until its refcount goes to zero.
1647 */
1648 void of_detach_node(const struct device_node *np)
1649 {
1650 struct device_node *parent;
1651
1652 write_lock(&devtree_lock);
1653
1654 parent = np->parent;
1655
1656 if (allnodes == np)
1657 allnodes = np->allnext;
1658 else {
1659 struct device_node *prev;
1660 for (prev = allnodes;
1661 prev->allnext != np;
1662 prev = prev->allnext)
1663 ;
1664 prev->allnext = np->allnext;
1665 }
1666
1667 if (parent->child == np)
1668 parent->child = np->sibling;
1669 else {
1670 struct device_node *prevsib;
1671 for (prevsib = np->parent->child;
1672 prevsib->sibling != np;
1673 prevsib = prevsib->sibling)
1674 ;
1675 prevsib->sibling = np->sibling;
1676 }
1677
1678 write_unlock(&devtree_lock);
1679 }
1680
1681 #ifdef CONFIG_PPC_PSERIES
1682 /*
1683 * Fix up the uninitialized fields in a new device node:
1684 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1685 *
1686 * A lot of boot-time code is duplicated here, because functions such
1687 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1688 * slab allocator.
1689 *
1690 * This should probably be split up into smaller chunks.
1691 */
1692
1693 static int of_finish_dynamic_node(struct device_node *node)
1694 {
1695 struct device_node *parent = of_get_parent(node);
1696 int err = 0;
1697 phandle *ibm_phandle;
1698
1699 node->name = get_property(node, "name", NULL);
1700 node->type = get_property(node, "device_type", NULL);
1701
1702 if (!parent) {
1703 err = -ENODEV;
1704 goto out;
1705 }
1706
1707 /* We don't support that function on PowerMac, at least
1708 * not yet
1709 */
1710 if (_machine == PLATFORM_POWERMAC)
1711 return -ENODEV;
1712
1713 /* fix up new node's linux_phandle field */
1714 if ((ibm_phandle = (unsigned int *)get_property(node,
1715 "ibm,phandle", NULL)))
1716 node->linux_phandle = *ibm_phandle;
1717
1718 out:
1719 of_node_put(parent);
1720 return err;
1721 }
1722
1723 static int prom_reconfig_notifier(struct notifier_block *nb,
1724 unsigned long action, void *node)
1725 {
1726 int err;
1727
1728 switch (action) {
1729 case PSERIES_RECONFIG_ADD:
1730 err = of_finish_dynamic_node(node);
1731 if (!err)
1732 finish_node(node, NULL, 0);
1733 if (err < 0) {
1734 printk(KERN_ERR "finish_node returned %d\n", err);
1735 err = NOTIFY_BAD;
1736 }
1737 break;
1738 default:
1739 err = NOTIFY_DONE;
1740 break;
1741 }
1742 return err;
1743 }
1744
1745 static struct notifier_block prom_reconfig_nb = {
1746 .notifier_call = prom_reconfig_notifier,
1747 .priority = 10, /* This one needs to run first */
1748 };
1749
1750 static int __init prom_reconfig_setup(void)
1751 {
1752 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1753 }
1754 __initcall(prom_reconfig_setup);
1755 #endif
1756
1757 /*
1758 * Find a property with a given name for a given node
1759 * and return the value.
1760 */
1761 unsigned char *get_property(struct device_node *np, const char *name,
1762 int *lenp)
1763 {
1764 struct property *pp;
1765
1766 for (pp = np->properties; pp != 0; pp = pp->next)
1767 if (strcmp(pp->name, name) == 0) {
1768 if (lenp != 0)
1769 *lenp = pp->length;
1770 return pp->value;
1771 }
1772 return NULL;
1773 }
1774 EXPORT_SYMBOL(get_property);
1775
1776 /*
1777 * Add a property to a node
1778 */
1779 int prom_add_property(struct device_node* np, struct property* prop)
1780 {
1781 struct property **next;
1782
1783 prop->next = NULL;
1784 write_lock(&devtree_lock);
1785 next = &np->properties;
1786 while (*next) {
1787 if (strcmp(prop->name, (*next)->name) == 0) {
1788 /* duplicate ! don't insert it */
1789 write_unlock(&devtree_lock);
1790 return -1;
1791 }
1792 next = &(*next)->next;
1793 }
1794 *next = prop;
1795 write_unlock(&devtree_lock);
1796
1797 #ifdef CONFIG_PROC_DEVICETREE
1798 /* try to add to proc as well if it was initialized */
1799 if (np->pde)
1800 proc_device_tree_add_prop(np->pde, prop);
1801 #endif /* CONFIG_PROC_DEVICETREE */
1802
1803 return 0;
1804 }
1805
1806
This page took 0.068308 seconds and 5 git commands to generate.