of/flattree: merge of_get_flat_dt_prop
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
1 /*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG
17
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/stringify.h>
27 #include <linux/delay.h>
28 #include <linux/initrd.h>
29 #include <linux/bitops.h>
30 #include <linux/module.h>
31 #include <linux/kexec.h>
32 #include <linux/debugfs.h>
33 #include <linux/irq.h>
34 #include <linux/lmb.h>
35
36 #include <asm/prom.h>
37 #include <asm/rtas.h>
38 #include <asm/page.h>
39 #include <asm/processor.h>
40 #include <asm/irq.h>
41 #include <asm/io.h>
42 #include <asm/kdump.h>
43 #include <asm/smp.h>
44 #include <asm/system.h>
45 #include <asm/mmu.h>
46 #include <asm/pgtable.h>
47 #include <asm/pci.h>
48 #include <asm/iommu.h>
49 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/machdep.h>
52 #include <asm/pSeries_reconfig.h>
53 #include <asm/pci-bridge.h>
54 #include <asm/phyp_dump.h>
55 #include <asm/kexec.h>
56 #include <mm/mmu_decl.h>
57
58 #ifdef DEBUG
59 #define DBG(fmt...) printk(KERN_ERR fmt)
60 #else
61 #define DBG(fmt...)
62 #endif
63
64
65 static int __initdata dt_root_addr_cells;
66 static int __initdata dt_root_size_cells;
67
68 #ifdef CONFIG_PPC64
69 int __initdata iommu_is_off;
70 int __initdata iommu_force_on;
71 unsigned long tce_alloc_start, tce_alloc_end;
72 #endif
73
74 typedef u32 cell_t;
75
76 extern struct device_node *allnodes; /* temporary while merging */
77
78 extern rwlock_t devtree_lock; /* temporary while merging */
79
80 /* export that to outside world */
81 struct device_node *of_chosen;
82
83 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
84 {
85 const char* cp;
86 unsigned long cplen, l;
87
88 cp = of_get_flat_dt_prop(node, "compatible", &cplen);
89 if (cp == NULL)
90 return 0;
91 while (cplen > 0) {
92 if (strncasecmp(cp, compat, strlen(compat)) == 0)
93 return 1;
94 l = strlen(cp) + 1;
95 cp += l;
96 cplen -= l;
97 }
98
99 return 0;
100 }
101
102 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
103 unsigned long align)
104 {
105 void *res;
106
107 *mem = _ALIGN(*mem, align);
108 res = (void *)*mem;
109 *mem += size;
110
111 return res;
112 }
113
114 static unsigned long __init unflatten_dt_node(unsigned long mem,
115 unsigned long *p,
116 struct device_node *dad,
117 struct device_node ***allnextpp,
118 unsigned long fpsize)
119 {
120 struct device_node *np;
121 struct property *pp, **prev_pp = NULL;
122 char *pathp;
123 u32 tag;
124 unsigned int l, allocl;
125 int has_name = 0;
126 int new_format = 0;
127
128 tag = *((u32 *)(*p));
129 if (tag != OF_DT_BEGIN_NODE) {
130 printk("Weird tag at start of node: %x\n", tag);
131 return mem;
132 }
133 *p += 4;
134 pathp = (char *)*p;
135 l = allocl = strlen(pathp) + 1;
136 *p = _ALIGN(*p + l, 4);
137
138 /* version 0x10 has a more compact unit name here instead of the full
139 * path. we accumulate the full path size using "fpsize", we'll rebuild
140 * it later. We detect this because the first character of the name is
141 * not '/'.
142 */
143 if ((*pathp) != '/') {
144 new_format = 1;
145 if (fpsize == 0) {
146 /* root node: special case. fpsize accounts for path
147 * plus terminating zero. root node only has '/', so
148 * fpsize should be 2, but we want to avoid the first
149 * level nodes to have two '/' so we use fpsize 1 here
150 */
151 fpsize = 1;
152 allocl = 2;
153 } else {
154 /* account for '/' and path size minus terminal 0
155 * already in 'l'
156 */
157 fpsize += l;
158 allocl = fpsize;
159 }
160 }
161
162
163 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
164 __alignof__(struct device_node));
165 if (allnextpp) {
166 memset(np, 0, sizeof(*np));
167 np->full_name = ((char*)np) + sizeof(struct device_node);
168 if (new_format) {
169 char *p = np->full_name;
170 /* rebuild full path for new format */
171 if (dad && dad->parent) {
172 strcpy(p, dad->full_name);
173 #ifdef DEBUG
174 if ((strlen(p) + l + 1) != allocl) {
175 DBG("%s: p: %d, l: %d, a: %d\n",
176 pathp, (int)strlen(p), l, allocl);
177 }
178 #endif
179 p += strlen(p);
180 }
181 *(p++) = '/';
182 memcpy(p, pathp, l);
183 } else
184 memcpy(np->full_name, pathp, l);
185 prev_pp = &np->properties;
186 **allnextpp = np;
187 *allnextpp = &np->allnext;
188 if (dad != NULL) {
189 np->parent = dad;
190 /* we temporarily use the next field as `last_child'*/
191 if (dad->next == 0)
192 dad->child = np;
193 else
194 dad->next->sibling = np;
195 dad->next = np;
196 }
197 kref_init(&np->kref);
198 }
199 while(1) {
200 u32 sz, noff;
201 char *pname;
202
203 tag = *((u32 *)(*p));
204 if (tag == OF_DT_NOP) {
205 *p += 4;
206 continue;
207 }
208 if (tag != OF_DT_PROP)
209 break;
210 *p += 4;
211 sz = *((u32 *)(*p));
212 noff = *((u32 *)((*p) + 4));
213 *p += 8;
214 if (initial_boot_params->version < 0x10)
215 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
216
217 pname = find_flat_dt_string(noff);
218 if (pname == NULL) {
219 printk("Can't find property name in list !\n");
220 break;
221 }
222 if (strcmp(pname, "name") == 0)
223 has_name = 1;
224 l = strlen(pname) + 1;
225 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
226 __alignof__(struct property));
227 if (allnextpp) {
228 if (strcmp(pname, "linux,phandle") == 0) {
229 np->node = *((u32 *)*p);
230 if (np->linux_phandle == 0)
231 np->linux_phandle = np->node;
232 }
233 if (strcmp(pname, "ibm,phandle") == 0)
234 np->linux_phandle = *((u32 *)*p);
235 pp->name = pname;
236 pp->length = sz;
237 pp->value = (void *)*p;
238 *prev_pp = pp;
239 prev_pp = &pp->next;
240 }
241 *p = _ALIGN((*p) + sz, 4);
242 }
243 /* with version 0x10 we may not have the name property, recreate
244 * it here from the unit name if absent
245 */
246 if (!has_name) {
247 char *p = pathp, *ps = pathp, *pa = NULL;
248 int sz;
249
250 while (*p) {
251 if ((*p) == '@')
252 pa = p;
253 if ((*p) == '/')
254 ps = p + 1;
255 p++;
256 }
257 if (pa < ps)
258 pa = p;
259 sz = (pa - ps) + 1;
260 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
261 __alignof__(struct property));
262 if (allnextpp) {
263 pp->name = "name";
264 pp->length = sz;
265 pp->value = pp + 1;
266 *prev_pp = pp;
267 prev_pp = &pp->next;
268 memcpy(pp->value, ps, sz - 1);
269 ((char *)pp->value)[sz - 1] = 0;
270 DBG("fixed up name for %s -> %s\n", pathp,
271 (char *)pp->value);
272 }
273 }
274 if (allnextpp) {
275 *prev_pp = NULL;
276 np->name = of_get_property(np, "name", NULL);
277 np->type = of_get_property(np, "device_type", NULL);
278
279 if (!np->name)
280 np->name = "<NULL>";
281 if (!np->type)
282 np->type = "<NULL>";
283 }
284 while (tag == OF_DT_BEGIN_NODE) {
285 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
286 tag = *((u32 *)(*p));
287 }
288 if (tag != OF_DT_END_NODE) {
289 printk("Weird tag at end of node: %x\n", tag);
290 return mem;
291 }
292 *p += 4;
293 return mem;
294 }
295
296 static int __init early_parse_mem(char *p)
297 {
298 if (!p)
299 return 1;
300
301 memory_limit = PAGE_ALIGN(memparse(p, &p));
302 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
303
304 return 0;
305 }
306 early_param("mem", early_parse_mem);
307
308 /**
309 * move_device_tree - move tree to an unused area, if needed.
310 *
311 * The device tree may be allocated beyond our memory limit, or inside the
312 * crash kernel region for kdump. If so, move it out of the way.
313 */
314 static void __init move_device_tree(void)
315 {
316 unsigned long start, size;
317 void *p;
318
319 DBG("-> move_device_tree\n");
320
321 start = __pa(initial_boot_params);
322 size = initial_boot_params->totalsize;
323
324 if ((memory_limit && (start + size) > memory_limit) ||
325 overlaps_crashkernel(start, size)) {
326 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
327 memcpy(p, initial_boot_params, size);
328 initial_boot_params = (struct boot_param_header *)p;
329 DBG("Moved device tree to 0x%p\n", p);
330 }
331
332 DBG("<- move_device_tree\n");
333 }
334
335 /**
336 * unflattens the device-tree passed by the firmware, creating the
337 * tree of struct device_node. It also fills the "name" and "type"
338 * pointers of the nodes so the normal device-tree walking functions
339 * can be used (this used to be done by finish_device_tree)
340 */
341 void __init unflatten_device_tree(void)
342 {
343 unsigned long start, mem, size;
344 struct device_node **allnextp = &allnodes;
345
346 DBG(" -> unflatten_device_tree()\n");
347
348 /* First pass, scan for size */
349 start = ((unsigned long)initial_boot_params) +
350 initial_boot_params->off_dt_struct;
351 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
352 size = (size | 3) + 1;
353
354 DBG(" size is %lx, allocating...\n", size);
355
356 /* Allocate memory for the expanded device tree */
357 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
358 mem = (unsigned long) __va(mem);
359
360 ((u32 *)mem)[size / 4] = 0xdeadbeef;
361
362 DBG(" unflattening %lx...\n", mem);
363
364 /* Second pass, do actual unflattening */
365 start = ((unsigned long)initial_boot_params) +
366 initial_boot_params->off_dt_struct;
367 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
368 if (*((u32 *)start) != OF_DT_END)
369 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
370 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
371 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
372 ((u32 *)mem)[size / 4] );
373 *allnextp = NULL;
374
375 /* Get pointer to OF "/chosen" node for use everywhere */
376 of_chosen = of_find_node_by_path("/chosen");
377 if (of_chosen == NULL)
378 of_chosen = of_find_node_by_path("/chosen@0");
379
380 DBG(" <- unflatten_device_tree()\n");
381 }
382
383 /*
384 * ibm,pa-features is a per-cpu property that contains a string of
385 * attribute descriptors, each of which has a 2 byte header plus up
386 * to 254 bytes worth of processor attribute bits. First header
387 * byte specifies the number of bytes following the header.
388 * Second header byte is an "attribute-specifier" type, of which
389 * zero is the only currently-defined value.
390 * Implementation: Pass in the byte and bit offset for the feature
391 * that we are interested in. The function will return -1 if the
392 * pa-features property is missing, or a 1/0 to indicate if the feature
393 * is supported/not supported. Note that the bit numbers are
394 * big-endian to match the definition in PAPR.
395 */
396 static struct ibm_pa_feature {
397 unsigned long cpu_features; /* CPU_FTR_xxx bit */
398 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
399 unsigned char pabyte; /* byte number in ibm,pa-features */
400 unsigned char pabit; /* bit number (big-endian) */
401 unsigned char invert; /* if 1, pa bit set => clear feature */
402 } ibm_pa_features[] __initdata = {
403 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
404 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
405 {CPU_FTR_SLB, 0, 0, 2, 0},
406 {CPU_FTR_CTRL, 0, 0, 3, 0},
407 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
408 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
409 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
410 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
411 };
412
413 static void __init scan_features(unsigned long node, unsigned char *ftrs,
414 unsigned long tablelen,
415 struct ibm_pa_feature *fp,
416 unsigned long ft_size)
417 {
418 unsigned long i, len, bit;
419
420 /* find descriptor with type == 0 */
421 for (;;) {
422 if (tablelen < 3)
423 return;
424 len = 2 + ftrs[0];
425 if (tablelen < len)
426 return; /* descriptor 0 not found */
427 if (ftrs[1] == 0)
428 break;
429 tablelen -= len;
430 ftrs += len;
431 }
432
433 /* loop over bits we know about */
434 for (i = 0; i < ft_size; ++i, ++fp) {
435 if (fp->pabyte >= ftrs[0])
436 continue;
437 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
438 if (bit ^ fp->invert) {
439 cur_cpu_spec->cpu_features |= fp->cpu_features;
440 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
441 } else {
442 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
443 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
444 }
445 }
446 }
447
448 static void __init check_cpu_pa_features(unsigned long node)
449 {
450 unsigned char *pa_ftrs;
451 unsigned long tablelen;
452
453 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
454 if (pa_ftrs == NULL)
455 return;
456
457 scan_features(node, pa_ftrs, tablelen,
458 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
459 }
460
461 #ifdef CONFIG_PPC_STD_MMU_64
462 static void __init check_cpu_slb_size(unsigned long node)
463 {
464 u32 *slb_size_ptr;
465
466 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
467 if (slb_size_ptr != NULL) {
468 mmu_slb_size = *slb_size_ptr;
469 return;
470 }
471 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
472 if (slb_size_ptr != NULL) {
473 mmu_slb_size = *slb_size_ptr;
474 }
475 }
476 #else
477 #define check_cpu_slb_size(node) do { } while(0)
478 #endif
479
480 static struct feature_property {
481 const char *name;
482 u32 min_value;
483 unsigned long cpu_feature;
484 unsigned long cpu_user_ftr;
485 } feature_properties[] __initdata = {
486 #ifdef CONFIG_ALTIVEC
487 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
488 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
489 #endif /* CONFIG_ALTIVEC */
490 #ifdef CONFIG_VSX
491 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
492 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
493 #endif /* CONFIG_VSX */
494 #ifdef CONFIG_PPC64
495 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
496 {"ibm,purr", 1, CPU_FTR_PURR, 0},
497 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
498 #endif /* CONFIG_PPC64 */
499 };
500
501 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
502 static inline void identical_pvr_fixup(unsigned long node)
503 {
504 unsigned int pvr;
505 char *model = of_get_flat_dt_prop(node, "model", NULL);
506
507 /*
508 * Since 440GR(x)/440EP(x) processors have the same pvr,
509 * we check the node path and set bit 28 in the cur_cpu_spec
510 * pvr for EP(x) processor version. This bit is always 0 in
511 * the "real" pvr. Then we call identify_cpu again with
512 * the new logical pvr to enable FPU support.
513 */
514 if (model && strstr(model, "440EP")) {
515 pvr = cur_cpu_spec->pvr_value | 0x8;
516 identify_cpu(0, pvr);
517 DBG("Using logical pvr %x for %s\n", pvr, model);
518 }
519 }
520 #else
521 #define identical_pvr_fixup(node) do { } while(0)
522 #endif
523
524 static void __init check_cpu_feature_properties(unsigned long node)
525 {
526 unsigned long i;
527 struct feature_property *fp = feature_properties;
528 const u32 *prop;
529
530 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
531 prop = of_get_flat_dt_prop(node, fp->name, NULL);
532 if (prop && *prop >= fp->min_value) {
533 cur_cpu_spec->cpu_features |= fp->cpu_feature;
534 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
535 }
536 }
537 }
538
539 static int __init early_init_dt_scan_cpus(unsigned long node,
540 const char *uname, int depth,
541 void *data)
542 {
543 static int logical_cpuid = 0;
544 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
545 const u32 *prop;
546 const u32 *intserv;
547 int i, nthreads;
548 unsigned long len;
549 int found = 0;
550
551 /* We are scanning "cpu" nodes only */
552 if (type == NULL || strcmp(type, "cpu") != 0)
553 return 0;
554
555 /* Get physical cpuid */
556 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
557 if (intserv) {
558 nthreads = len / sizeof(int);
559 } else {
560 intserv = of_get_flat_dt_prop(node, "reg", NULL);
561 nthreads = 1;
562 }
563
564 /*
565 * Now see if any of these threads match our boot cpu.
566 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
567 */
568 for (i = 0; i < nthreads; i++) {
569 /*
570 * version 2 of the kexec param format adds the phys cpuid of
571 * booted proc.
572 */
573 if (initial_boot_params && initial_boot_params->version >= 2) {
574 if (intserv[i] ==
575 initial_boot_params->boot_cpuid_phys) {
576 found = 1;
577 break;
578 }
579 } else {
580 /*
581 * Check if it's the boot-cpu, set it's hw index now,
582 * unfortunately this format did not support booting
583 * off secondary threads.
584 */
585 if (of_get_flat_dt_prop(node,
586 "linux,boot-cpu", NULL) != NULL) {
587 found = 1;
588 break;
589 }
590 }
591
592 #ifdef CONFIG_SMP
593 /* logical cpu id is always 0 on UP kernels */
594 logical_cpuid++;
595 #endif
596 }
597
598 if (found) {
599 DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
600 intserv[i]);
601 boot_cpuid = logical_cpuid;
602 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
603
604 /*
605 * PAPR defines "logical" PVR values for cpus that
606 * meet various levels of the architecture:
607 * 0x0f000001 Architecture version 2.04
608 * 0x0f000002 Architecture version 2.05
609 * If the cpu-version property in the cpu node contains
610 * such a value, we call identify_cpu again with the
611 * logical PVR value in order to use the cpu feature
612 * bits appropriate for the architecture level.
613 *
614 * A POWER6 partition in "POWER6 architected" mode
615 * uses the 0x0f000002 PVR value; in POWER5+ mode
616 * it uses 0x0f000001.
617 */
618 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
619 if (prop && (*prop & 0xff000000) == 0x0f000000)
620 identify_cpu(0, *prop);
621
622 identical_pvr_fixup(node);
623 }
624
625 check_cpu_feature_properties(node);
626 check_cpu_pa_features(node);
627 check_cpu_slb_size(node);
628
629 #ifdef CONFIG_PPC_PSERIES
630 if (nthreads > 1)
631 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
632 else
633 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
634 #endif
635
636 return 0;
637 }
638
639 #ifdef CONFIG_BLK_DEV_INITRD
640 static void __init early_init_dt_check_for_initrd(unsigned long node)
641 {
642 unsigned long l;
643 u32 *prop;
644
645 DBG("Looking for initrd properties... ");
646
647 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
648 if (prop) {
649 initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
650
651 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
652 if (prop) {
653 initrd_end = (unsigned long)
654 __va(of_read_ulong(prop, l/4));
655 initrd_below_start_ok = 1;
656 } else {
657 initrd_start = 0;
658 }
659 }
660
661 DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end);
662 }
663 #else
664 static inline void early_init_dt_check_for_initrd(unsigned long node)
665 {
666 }
667 #endif /* CONFIG_BLK_DEV_INITRD */
668
669 static int __init early_init_dt_scan_chosen(unsigned long node,
670 const char *uname, int depth, void *data)
671 {
672 unsigned long *lprop;
673 unsigned long l;
674 char *p;
675
676 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
677
678 if (depth != 1 ||
679 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
680 return 0;
681
682 #ifdef CONFIG_PPC64
683 /* check if iommu is forced on or off */
684 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
685 iommu_is_off = 1;
686 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
687 iommu_force_on = 1;
688 #endif
689
690 /* mem=x on the command line is the preferred mechanism */
691 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
692 if (lprop)
693 memory_limit = *lprop;
694
695 #ifdef CONFIG_PPC64
696 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
697 if (lprop)
698 tce_alloc_start = *lprop;
699 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
700 if (lprop)
701 tce_alloc_end = *lprop;
702 #endif
703
704 #ifdef CONFIG_KEXEC
705 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
706 if (lprop)
707 crashk_res.start = *lprop;
708
709 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
710 if (lprop)
711 crashk_res.end = crashk_res.start + *lprop - 1;
712 #endif
713
714 early_init_dt_check_for_initrd(node);
715
716 /* Retreive command line */
717 p = of_get_flat_dt_prop(node, "bootargs", &l);
718 if (p != NULL && l > 0)
719 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
720
721 #ifdef CONFIG_CMDLINE
722 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
723 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
724 #endif /* CONFIG_CMDLINE */
725
726 DBG("Command line is: %s\n", cmd_line);
727
728 /* break now */
729 return 1;
730 }
731
732 static int __init early_init_dt_scan_root(unsigned long node,
733 const char *uname, int depth, void *data)
734 {
735 u32 *prop;
736
737 if (depth != 0)
738 return 0;
739
740 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
741 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
742 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
743
744 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
745 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
746 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
747
748 /* break now */
749 return 1;
750 }
751
752 static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
753 {
754 cell_t *p = *cellp;
755
756 *cellp = p + s;
757 return of_read_number(p, s);
758 }
759
760 #ifdef CONFIG_PPC_PSERIES
761 /*
762 * Interpret the ibm,dynamic-memory property in the
763 * /ibm,dynamic-reconfiguration-memory node.
764 * This contains a list of memory blocks along with NUMA affinity
765 * information.
766 */
767 static int __init early_init_dt_scan_drconf_memory(unsigned long node)
768 {
769 cell_t *dm, *ls, *usm;
770 unsigned long l, n, flags;
771 u64 base, size, lmb_size;
772 unsigned int is_kexec_kdump = 0, rngs;
773
774 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
775 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
776 return 0;
777 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
778
779 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
780 if (dm == NULL || l < sizeof(cell_t))
781 return 0;
782
783 n = *dm++; /* number of entries */
784 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
785 return 0;
786
787 /* check if this is a kexec/kdump kernel. */
788 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
789 &l);
790 if (usm != NULL)
791 is_kexec_kdump = 1;
792
793 for (; n != 0; --n) {
794 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
795 flags = dm[3];
796 /* skip DRC index, pad, assoc. list index, flags */
797 dm += 4;
798 /* skip this block if the reserved bit is set in flags (0x80)
799 or if the block is not assigned to this partition (0x8) */
800 if ((flags & 0x80) || !(flags & 0x8))
801 continue;
802 size = lmb_size;
803 rngs = 1;
804 if (is_kexec_kdump) {
805 /*
806 * For each lmb in ibm,dynamic-memory, a corresponding
807 * entry in linux,drconf-usable-memory property contains
808 * a counter 'p' followed by 'p' (base, size) duple.
809 * Now read the counter from
810 * linux,drconf-usable-memory property
811 */
812 rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
813 if (!rngs) /* there are no (base, size) duple */
814 continue;
815 }
816 do {
817 if (is_kexec_kdump) {
818 base = dt_mem_next_cell(dt_root_addr_cells,
819 &usm);
820 size = dt_mem_next_cell(dt_root_size_cells,
821 &usm);
822 }
823 if (iommu_is_off) {
824 if (base >= 0x80000000ul)
825 continue;
826 if ((base + size) > 0x80000000ul)
827 size = 0x80000000ul - base;
828 }
829 lmb_add(base, size);
830 } while (--rngs);
831 }
832 lmb_dump_all();
833 return 0;
834 }
835 #else
836 #define early_init_dt_scan_drconf_memory(node) 0
837 #endif /* CONFIG_PPC_PSERIES */
838
839 static int __init early_init_dt_scan_memory(unsigned long node,
840 const char *uname, int depth, void *data)
841 {
842 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
843 cell_t *reg, *endp;
844 unsigned long l;
845
846 /* Look for the ibm,dynamic-reconfiguration-memory node */
847 if (depth == 1 &&
848 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
849 return early_init_dt_scan_drconf_memory(node);
850
851 /* We are scanning "memory" nodes only */
852 if (type == NULL) {
853 /*
854 * The longtrail doesn't have a device_type on the
855 * /memory node, so look for the node called /memory@0.
856 */
857 if (depth != 1 || strcmp(uname, "memory@0") != 0)
858 return 0;
859 } else if (strcmp(type, "memory") != 0)
860 return 0;
861
862 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
863 if (reg == NULL)
864 reg = of_get_flat_dt_prop(node, "reg", &l);
865 if (reg == NULL)
866 return 0;
867
868 endp = reg + (l / sizeof(cell_t));
869
870 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
871 uname, l, reg[0], reg[1], reg[2], reg[3]);
872
873 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
874 u64 base, size;
875
876 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
877 size = dt_mem_next_cell(dt_root_size_cells, &reg);
878
879 if (size == 0)
880 continue;
881 DBG(" - %llx , %llx\n", (unsigned long long)base,
882 (unsigned long long)size);
883 #ifdef CONFIG_PPC64
884 if (iommu_is_off) {
885 if (base >= 0x80000000ul)
886 continue;
887 if ((base + size) > 0x80000000ul)
888 size = 0x80000000ul - base;
889 }
890 #endif
891 lmb_add(base, size);
892
893 memstart_addr = min((u64)memstart_addr, base);
894 }
895
896 return 0;
897 }
898
899 static void __init early_reserve_mem(void)
900 {
901 u64 base, size;
902 u64 *reserve_map;
903 unsigned long self_base;
904 unsigned long self_size;
905
906 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
907 initial_boot_params->off_mem_rsvmap);
908
909 /* before we do anything, lets reserve the dt blob */
910 self_base = __pa((unsigned long)initial_boot_params);
911 self_size = initial_boot_params->totalsize;
912 lmb_reserve(self_base, self_size);
913
914 #ifdef CONFIG_BLK_DEV_INITRD
915 /* then reserve the initrd, if any */
916 if (initrd_start && (initrd_end > initrd_start))
917 lmb_reserve(__pa(initrd_start), initrd_end - initrd_start);
918 #endif /* CONFIG_BLK_DEV_INITRD */
919
920 #ifdef CONFIG_PPC32
921 /*
922 * Handle the case where we might be booting from an old kexec
923 * image that setup the mem_rsvmap as pairs of 32-bit values
924 */
925 if (*reserve_map > 0xffffffffull) {
926 u32 base_32, size_32;
927 u32 *reserve_map_32 = (u32 *)reserve_map;
928
929 while (1) {
930 base_32 = *(reserve_map_32++);
931 size_32 = *(reserve_map_32++);
932 if (size_32 == 0)
933 break;
934 /* skip if the reservation is for the blob */
935 if (base_32 == self_base && size_32 == self_size)
936 continue;
937 DBG("reserving: %x -> %x\n", base_32, size_32);
938 lmb_reserve(base_32, size_32);
939 }
940 return;
941 }
942 #endif
943 while (1) {
944 base = *(reserve_map++);
945 size = *(reserve_map++);
946 if (size == 0)
947 break;
948 DBG("reserving: %llx -> %llx\n", base, size);
949 lmb_reserve(base, size);
950 }
951 }
952
953 #ifdef CONFIG_PHYP_DUMP
954 /**
955 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
956 *
957 * Function to find the largest size we need to reserve
958 * during early boot process.
959 *
960 * It either looks for boot param and returns that OR
961 * returns larger of 256 or 5% rounded down to multiples of 256MB.
962 *
963 */
964 static inline unsigned long phyp_dump_calculate_reserve_size(void)
965 {
966 unsigned long tmp;
967
968 if (phyp_dump_info->reserve_bootvar)
969 return phyp_dump_info->reserve_bootvar;
970
971 /* divide by 20 to get 5% of value */
972 tmp = lmb_end_of_DRAM();
973 do_div(tmp, 20);
974
975 /* round it down in multiples of 256 */
976 tmp = tmp & ~0x0FFFFFFFUL;
977
978 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
979 }
980
981 /**
982 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
983 *
984 * This routine may reserve memory regions in the kernel only
985 * if the system is supported and a dump was taken in last
986 * boot instance or if the hardware is supported and the
987 * scratch area needs to be setup. In other instances it returns
988 * without reserving anything. The memory in case of dump being
989 * active is freed when the dump is collected (by userland tools).
990 */
991 static void __init phyp_dump_reserve_mem(void)
992 {
993 unsigned long base, size;
994 unsigned long variable_reserve_size;
995
996 if (!phyp_dump_info->phyp_dump_configured) {
997 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
998 return;
999 }
1000
1001 if (!phyp_dump_info->phyp_dump_at_boot) {
1002 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
1003 return;
1004 }
1005
1006 variable_reserve_size = phyp_dump_calculate_reserve_size();
1007
1008 if (phyp_dump_info->phyp_dump_is_active) {
1009 /* Reserve *everything* above RMR.Area freed by userland tools*/
1010 base = variable_reserve_size;
1011 size = lmb_end_of_DRAM() - base;
1012
1013 /* XXX crashed_ram_end is wrong, since it may be beyond
1014 * the memory_limit, it will need to be adjusted. */
1015 lmb_reserve(base, size);
1016
1017 phyp_dump_info->init_reserve_start = base;
1018 phyp_dump_info->init_reserve_size = size;
1019 } else {
1020 size = phyp_dump_info->cpu_state_size +
1021 phyp_dump_info->hpte_region_size +
1022 variable_reserve_size;
1023 base = lmb_end_of_DRAM() - size;
1024 lmb_reserve(base, size);
1025 phyp_dump_info->init_reserve_start = base;
1026 phyp_dump_info->init_reserve_size = size;
1027 }
1028 }
1029 #else
1030 static inline void __init phyp_dump_reserve_mem(void) {}
1031 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
1032
1033
1034 void __init early_init_devtree(void *params)
1035 {
1036 phys_addr_t limit;
1037
1038 DBG(" -> early_init_devtree(%p)\n", params);
1039
1040 /* Setup flat device-tree pointer */
1041 initial_boot_params = params;
1042
1043 #ifdef CONFIG_PPC_RTAS
1044 /* Some machines might need RTAS info for debugging, grab it now. */
1045 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
1046 #endif
1047
1048 #ifdef CONFIG_PHYP_DUMP
1049 /* scan tree to see if dump occured during last boot */
1050 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
1051 #endif
1052
1053 /* Retrieve various informations from the /chosen node of the
1054 * device-tree, including the platform type, initrd location and
1055 * size, TCE reserve, and more ...
1056 */
1057 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1058
1059 /* Scan memory nodes and rebuild LMBs */
1060 lmb_init();
1061 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1062 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1063
1064 /* Save command line for /proc/cmdline and then parse parameters */
1065 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
1066 parse_early_param();
1067
1068 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1069 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1070 /* If relocatable, reserve first 32k for interrupt vectors etc. */
1071 if (PHYSICAL_START > MEMORY_START)
1072 lmb_reserve(MEMORY_START, 0x8000);
1073 reserve_kdump_trampoline();
1074 reserve_crashkernel();
1075 early_reserve_mem();
1076 phyp_dump_reserve_mem();
1077
1078 limit = memory_limit;
1079 if (! limit) {
1080 phys_addr_t memsize;
1081
1082 /* Ensure that total memory size is page-aligned, because
1083 * otherwise mark_bootmem() gets upset. */
1084 lmb_analyze();
1085 memsize = lmb_phys_mem_size();
1086 if ((memsize & PAGE_MASK) != memsize)
1087 limit = memsize & PAGE_MASK;
1088 }
1089 lmb_enforce_memory_limit(limit);
1090
1091 lmb_analyze();
1092 lmb_dump_all();
1093
1094 DBG("Phys. mem: %llx\n", lmb_phys_mem_size());
1095
1096 /* We may need to relocate the flat tree, do it now.
1097 * FIXME .. and the initrd too? */
1098 move_device_tree();
1099
1100 DBG("Scanning CPUs ...\n");
1101
1102 /* Retreive CPU related informations from the flat tree
1103 * (altivec support, boot CPU ID, ...)
1104 */
1105 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1106
1107 DBG(" <- early_init_devtree()\n");
1108 }
1109
1110
1111 /**
1112 * Indicates whether the root node has a given value in its
1113 * compatible property.
1114 */
1115 int machine_is_compatible(const char *compat)
1116 {
1117 struct device_node *root;
1118 int rc = 0;
1119
1120 root = of_find_node_by_path("/");
1121 if (root) {
1122 rc = of_device_is_compatible(root, compat);
1123 of_node_put(root);
1124 }
1125 return rc;
1126 }
1127 EXPORT_SYMBOL(machine_is_compatible);
1128
1129 /*******
1130 *
1131 * New implementation of the OF "find" APIs, return a refcounted
1132 * object, call of_node_put() when done. The device tree and list
1133 * are protected by a rw_lock.
1134 *
1135 * Note that property management will need some locking as well,
1136 * this isn't dealt with yet.
1137 *
1138 *******/
1139
1140 /**
1141 * of_find_node_by_phandle - Find a node given a phandle
1142 * @handle: phandle of the node to find
1143 *
1144 * Returns a node pointer with refcount incremented, use
1145 * of_node_put() on it when done.
1146 */
1147 struct device_node *of_find_node_by_phandle(phandle handle)
1148 {
1149 struct device_node *np;
1150
1151 read_lock(&devtree_lock);
1152 for (np = allnodes; np != 0; np = np->allnext)
1153 if (np->linux_phandle == handle)
1154 break;
1155 of_node_get(np);
1156 read_unlock(&devtree_lock);
1157 return np;
1158 }
1159 EXPORT_SYMBOL(of_find_node_by_phandle);
1160
1161 /**
1162 * of_find_next_cache_node - Find a node's subsidiary cache
1163 * @np: node of type "cpu" or "cache"
1164 *
1165 * Returns a node pointer with refcount incremented, use
1166 * of_node_put() on it when done. Caller should hold a reference
1167 * to np.
1168 */
1169 struct device_node *of_find_next_cache_node(struct device_node *np)
1170 {
1171 struct device_node *child;
1172 const phandle *handle;
1173
1174 handle = of_get_property(np, "l2-cache", NULL);
1175 if (!handle)
1176 handle = of_get_property(np, "next-level-cache", NULL);
1177
1178 if (handle)
1179 return of_find_node_by_phandle(*handle);
1180
1181 /* OF on pmac has nodes instead of properties named "l2-cache"
1182 * beneath CPU nodes.
1183 */
1184 if (!strcmp(np->type, "cpu"))
1185 for_each_child_of_node(np, child)
1186 if (!strcmp(child->type, "cache"))
1187 return child;
1188
1189 return NULL;
1190 }
1191
1192 /**
1193 * of_node_get - Increment refcount of a node
1194 * @node: Node to inc refcount, NULL is supported to
1195 * simplify writing of callers
1196 *
1197 * Returns node.
1198 */
1199 struct device_node *of_node_get(struct device_node *node)
1200 {
1201 if (node)
1202 kref_get(&node->kref);
1203 return node;
1204 }
1205 EXPORT_SYMBOL(of_node_get);
1206
1207 static inline struct device_node * kref_to_device_node(struct kref *kref)
1208 {
1209 return container_of(kref, struct device_node, kref);
1210 }
1211
1212 /**
1213 * of_node_release - release a dynamically allocated node
1214 * @kref: kref element of the node to be released
1215 *
1216 * In of_node_put() this function is passed to kref_put()
1217 * as the destructor.
1218 */
1219 static void of_node_release(struct kref *kref)
1220 {
1221 struct device_node *node = kref_to_device_node(kref);
1222 struct property *prop = node->properties;
1223
1224 /* We should never be releasing nodes that haven't been detached. */
1225 if (!of_node_check_flag(node, OF_DETACHED)) {
1226 printk("WARNING: Bad of_node_put() on %s\n", node->full_name);
1227 dump_stack();
1228 kref_init(&node->kref);
1229 return;
1230 }
1231
1232 if (!of_node_check_flag(node, OF_DYNAMIC))
1233 return;
1234
1235 while (prop) {
1236 struct property *next = prop->next;
1237 kfree(prop->name);
1238 kfree(prop->value);
1239 kfree(prop);
1240 prop = next;
1241
1242 if (!prop) {
1243 prop = node->deadprops;
1244 node->deadprops = NULL;
1245 }
1246 }
1247 kfree(node->full_name);
1248 kfree(node->data);
1249 kfree(node);
1250 }
1251
1252 /**
1253 * of_node_put - Decrement refcount of a node
1254 * @node: Node to dec refcount, NULL is supported to
1255 * simplify writing of callers
1256 *
1257 */
1258 void of_node_put(struct device_node *node)
1259 {
1260 if (node)
1261 kref_put(&node->kref, of_node_release);
1262 }
1263 EXPORT_SYMBOL(of_node_put);
1264
1265 /*
1266 * Plug a device node into the tree and global list.
1267 */
1268 void of_attach_node(struct device_node *np)
1269 {
1270 unsigned long flags;
1271
1272 write_lock_irqsave(&devtree_lock, flags);
1273 np->sibling = np->parent->child;
1274 np->allnext = allnodes;
1275 np->parent->child = np;
1276 allnodes = np;
1277 write_unlock_irqrestore(&devtree_lock, flags);
1278 }
1279
1280 /*
1281 * "Unplug" a node from the device tree. The caller must hold
1282 * a reference to the node. The memory associated with the node
1283 * is not freed until its refcount goes to zero.
1284 */
1285 void of_detach_node(struct device_node *np)
1286 {
1287 struct device_node *parent;
1288 unsigned long flags;
1289
1290 write_lock_irqsave(&devtree_lock, flags);
1291
1292 parent = np->parent;
1293 if (!parent)
1294 goto out_unlock;
1295
1296 if (allnodes == np)
1297 allnodes = np->allnext;
1298 else {
1299 struct device_node *prev;
1300 for (prev = allnodes;
1301 prev->allnext != np;
1302 prev = prev->allnext)
1303 ;
1304 prev->allnext = np->allnext;
1305 }
1306
1307 if (parent->child == np)
1308 parent->child = np->sibling;
1309 else {
1310 struct device_node *prevsib;
1311 for (prevsib = np->parent->child;
1312 prevsib->sibling != np;
1313 prevsib = prevsib->sibling)
1314 ;
1315 prevsib->sibling = np->sibling;
1316 }
1317
1318 of_node_set_flag(np, OF_DETACHED);
1319
1320 out_unlock:
1321 write_unlock_irqrestore(&devtree_lock, flags);
1322 }
1323
1324 #ifdef CONFIG_PPC_PSERIES
1325 /*
1326 * Fix up the uninitialized fields in a new device node:
1327 * name, type and pci-specific fields
1328 */
1329
1330 static int of_finish_dynamic_node(struct device_node *node)
1331 {
1332 struct device_node *parent = of_get_parent(node);
1333 int err = 0;
1334 const phandle *ibm_phandle;
1335
1336 node->name = of_get_property(node, "name", NULL);
1337 node->type = of_get_property(node, "device_type", NULL);
1338
1339 if (!node->name)
1340 node->name = "<NULL>";
1341 if (!node->type)
1342 node->type = "<NULL>";
1343
1344 if (!parent) {
1345 err = -ENODEV;
1346 goto out;
1347 }
1348
1349 /* We don't support that function on PowerMac, at least
1350 * not yet
1351 */
1352 if (machine_is(powermac))
1353 return -ENODEV;
1354
1355 /* fix up new node's linux_phandle field */
1356 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
1357 node->linux_phandle = *ibm_phandle;
1358
1359 out:
1360 of_node_put(parent);
1361 return err;
1362 }
1363
1364 static int prom_reconfig_notifier(struct notifier_block *nb,
1365 unsigned long action, void *node)
1366 {
1367 int err;
1368
1369 switch (action) {
1370 case PSERIES_RECONFIG_ADD:
1371 err = of_finish_dynamic_node(node);
1372 if (err < 0) {
1373 printk(KERN_ERR "finish_node returned %d\n", err);
1374 err = NOTIFY_BAD;
1375 }
1376 break;
1377 default:
1378 err = NOTIFY_DONE;
1379 break;
1380 }
1381 return err;
1382 }
1383
1384 static struct notifier_block prom_reconfig_nb = {
1385 .notifier_call = prom_reconfig_notifier,
1386 .priority = 10, /* This one needs to run first */
1387 };
1388
1389 static int __init prom_reconfig_setup(void)
1390 {
1391 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1392 }
1393 __initcall(prom_reconfig_setup);
1394 #endif
1395
1396 /*
1397 * Add a property to a node
1398 */
1399 int prom_add_property(struct device_node* np, struct property* prop)
1400 {
1401 struct property **next;
1402 unsigned long flags;
1403
1404 prop->next = NULL;
1405 write_lock_irqsave(&devtree_lock, flags);
1406 next = &np->properties;
1407 while (*next) {
1408 if (strcmp(prop->name, (*next)->name) == 0) {
1409 /* duplicate ! don't insert it */
1410 write_unlock_irqrestore(&devtree_lock, flags);
1411 return -1;
1412 }
1413 next = &(*next)->next;
1414 }
1415 *next = prop;
1416 write_unlock_irqrestore(&devtree_lock, flags);
1417
1418 #ifdef CONFIG_PROC_DEVICETREE
1419 /* try to add to proc as well if it was initialized */
1420 if (np->pde)
1421 proc_device_tree_add_prop(np->pde, prop);
1422 #endif /* CONFIG_PROC_DEVICETREE */
1423
1424 return 0;
1425 }
1426
1427 /*
1428 * Remove a property from a node. Note that we don't actually
1429 * remove it, since we have given out who-knows-how-many pointers
1430 * to the data using get-property. Instead we just move the property
1431 * to the "dead properties" list, so it won't be found any more.
1432 */
1433 int prom_remove_property(struct device_node *np, struct property *prop)
1434 {
1435 struct property **next;
1436 unsigned long flags;
1437 int found = 0;
1438
1439 write_lock_irqsave(&devtree_lock, flags);
1440 next = &np->properties;
1441 while (*next) {
1442 if (*next == prop) {
1443 /* found the node */
1444 *next = prop->next;
1445 prop->next = np->deadprops;
1446 np->deadprops = prop;
1447 found = 1;
1448 break;
1449 }
1450 next = &(*next)->next;
1451 }
1452 write_unlock_irqrestore(&devtree_lock, flags);
1453
1454 if (!found)
1455 return -ENODEV;
1456
1457 #ifdef CONFIG_PROC_DEVICETREE
1458 /* try to remove the proc node as well */
1459 if (np->pde)
1460 proc_device_tree_remove_prop(np->pde, prop);
1461 #endif /* CONFIG_PROC_DEVICETREE */
1462
1463 return 0;
1464 }
1465
1466 /*
1467 * Update a property in a node. Note that we don't actually
1468 * remove it, since we have given out who-knows-how-many pointers
1469 * to the data using get-property. Instead we just move the property
1470 * to the "dead properties" list, and add the new property to the
1471 * property list
1472 */
1473 int prom_update_property(struct device_node *np,
1474 struct property *newprop,
1475 struct property *oldprop)
1476 {
1477 struct property **next;
1478 unsigned long flags;
1479 int found = 0;
1480
1481 write_lock_irqsave(&devtree_lock, flags);
1482 next = &np->properties;
1483 while (*next) {
1484 if (*next == oldprop) {
1485 /* found the node */
1486 newprop->next = oldprop->next;
1487 *next = newprop;
1488 oldprop->next = np->deadprops;
1489 np->deadprops = oldprop;
1490 found = 1;
1491 break;
1492 }
1493 next = &(*next)->next;
1494 }
1495 write_unlock_irqrestore(&devtree_lock, flags);
1496
1497 if (!found)
1498 return -ENODEV;
1499
1500 #ifdef CONFIG_PROC_DEVICETREE
1501 /* try to add to proc as well if it was initialized */
1502 if (np->pde)
1503 proc_device_tree_update_prop(np->pde, newprop, oldprop);
1504 #endif /* CONFIG_PROC_DEVICETREE */
1505
1506 return 0;
1507 }
1508
1509
1510 /* Find the device node for a given logical cpu number, also returns the cpu
1511 * local thread number (index in ibm,interrupt-server#s) if relevant and
1512 * asked for (non NULL)
1513 */
1514 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
1515 {
1516 int hardid;
1517 struct device_node *np;
1518
1519 hardid = get_hard_smp_processor_id(cpu);
1520
1521 for_each_node_by_type(np, "cpu") {
1522 const u32 *intserv;
1523 unsigned int plen, t;
1524
1525 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
1526 * fallback to "reg" property and assume no threads
1527 */
1528 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
1529 &plen);
1530 if (intserv == NULL) {
1531 const u32 *reg = of_get_property(np, "reg", NULL);
1532 if (reg == NULL)
1533 continue;
1534 if (*reg == hardid) {
1535 if (thread)
1536 *thread = 0;
1537 return np;
1538 }
1539 } else {
1540 plen /= sizeof(u32);
1541 for (t = 0; t < plen; t++) {
1542 if (hardid == intserv[t]) {
1543 if (thread)
1544 *thread = t;
1545 return np;
1546 }
1547 }
1548 }
1549 }
1550 return NULL;
1551 }
1552 EXPORT_SYMBOL(of_get_cpu_node);
1553
1554 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
1555 static struct debugfs_blob_wrapper flat_dt_blob;
1556
1557 static int __init export_flat_device_tree(void)
1558 {
1559 struct dentry *d;
1560
1561 flat_dt_blob.data = initial_boot_params;
1562 flat_dt_blob.size = initial_boot_params->totalsize;
1563
1564 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
1565 powerpc_debugfs_root, &flat_dt_blob);
1566 if (!d)
1567 return 1;
1568
1569 return 0;
1570 }
1571 __initcall(export_flat_device_tree);
1572 #endif
This page took 0.095819 seconds and 5 git commands to generate.