of/flattree: Merge earlyinit_dt_scan_root()
[deliverable/linux.git] / arch / microblaze / kernel / prom.c
1 /*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <stdarg.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/threads.h>
21 #include <linux/spinlock.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/stringify.h>
25 #include <linux/delay.h>
26 #include <linux/initrd.h>
27 #include <linux/bitops.h>
28 #include <linux/module.h>
29 #include <linux/kexec.h>
30 #include <linux/debugfs.h>
31 #include <linux/irq.h>
32 #include <linux/lmb.h>
33
34 #include <asm/prom.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/irq.h>
38 #include <linux/io.h>
39 #include <asm/system.h>
40 #include <asm/mmu.h>
41 #include <asm/pgtable.h>
42 #include <asm/sections.h>
43 #include <asm/pci-bridge.h>
44
45 typedef u32 cell_t;
46
47 /* export that to outside world */
48 struct device_node *of_chosen;
49
50 #define early_init_dt_scan_drconf_memory(node) 0
51
52 static int __init early_init_dt_scan_cpus(unsigned long node,
53 const char *uname, int depth,
54 void *data)
55 {
56 static int logical_cpuid;
57 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
58 const u32 *intserv;
59 int i, nthreads;
60 int found = 0;
61
62 /* We are scanning "cpu" nodes only */
63 if (type == NULL || strcmp(type, "cpu") != 0)
64 return 0;
65
66 /* Get physical cpuid */
67 intserv = of_get_flat_dt_prop(node, "reg", NULL);
68 nthreads = 1;
69
70 /*
71 * Now see if any of these threads match our boot cpu.
72 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
73 */
74 for (i = 0; i < nthreads; i++) {
75 /*
76 * version 2 of the kexec param format adds the phys cpuid of
77 * booted proc.
78 */
79 if (initial_boot_params && initial_boot_params->version >= 2) {
80 if (intserv[i] ==
81 initial_boot_params->boot_cpuid_phys) {
82 found = 1;
83 break;
84 }
85 } else {
86 /*
87 * Check if it's the boot-cpu, set it's hw index now,
88 * unfortunately this format did not support booting
89 * off secondary threads.
90 */
91 if (of_get_flat_dt_prop(node,
92 "linux,boot-cpu", NULL) != NULL) {
93 found = 1;
94 break;
95 }
96 }
97
98 #ifdef CONFIG_SMP
99 /* logical cpu id is always 0 on UP kernels */
100 logical_cpuid++;
101 #endif
102 }
103
104 if (found) {
105 pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
106 intserv[i]);
107 boot_cpuid = logical_cpuid;
108 }
109
110 return 0;
111 }
112
113 static int __init early_init_dt_scan_chosen(unsigned long node,
114 const char *uname, int depth, void *data)
115 {
116 unsigned long l;
117 char *p;
118
119 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
120
121 if (depth != 1 ||
122 (strcmp(uname, "chosen") != 0 &&
123 strcmp(uname, "chosen@0") != 0))
124 return 0;
125
126 #ifdef CONFIG_KEXEC
127 lprop = (u64 *)of_get_flat_dt_prop(node,
128 "linux,crashkernel-base", NULL);
129 if (lprop)
130 crashk_res.start = *lprop;
131
132 lprop = (u64 *)of_get_flat_dt_prop(node,
133 "linux,crashkernel-size", NULL);
134 if (lprop)
135 crashk_res.end = crashk_res.start + *lprop - 1;
136 #endif
137
138 early_init_dt_check_for_initrd(node);
139
140 /* Retreive command line */
141 p = of_get_flat_dt_prop(node, "bootargs", &l);
142 if (p != NULL && l > 0)
143 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
144
145 #ifdef CONFIG_CMDLINE
146 #ifndef CONFIG_CMDLINE_FORCE
147 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
148 #endif
149 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
150 #endif /* CONFIG_CMDLINE */
151
152 pr_debug("Command line is: %s\n", cmd_line);
153
154 /* break now */
155 return 1;
156 }
157
158 static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
159 {
160 cell_t *p = *cellp;
161
162 *cellp = p + s;
163 return of_read_number(p, s);
164 }
165
166 static int __init early_init_dt_scan_memory(unsigned long node,
167 const char *uname, int depth, void *data)
168 {
169 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
170 cell_t *reg, *endp;
171 unsigned long l;
172
173 /* Look for the ibm,dynamic-reconfiguration-memory node */
174 /* if (depth == 1 &&
175 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
176 return early_init_dt_scan_drconf_memory(node);
177 */
178 /* We are scanning "memory" nodes only */
179 if (type == NULL) {
180 /*
181 * The longtrail doesn't have a device_type on the
182 * /memory node, so look for the node called /memory@0.
183 */
184 if (depth != 1 || strcmp(uname, "memory@0") != 0)
185 return 0;
186 } else if (strcmp(type, "memory") != 0)
187 return 0;
188
189 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
190 if (reg == NULL)
191 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
192 if (reg == NULL)
193 return 0;
194
195 endp = reg + (l / sizeof(cell_t));
196
197 pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
198 uname, l, reg[0], reg[1], reg[2], reg[3]);
199
200 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
201 u64 base, size;
202
203 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
204 size = dt_mem_next_cell(dt_root_size_cells, &reg);
205
206 if (size == 0)
207 continue;
208 pr_debug(" - %llx , %llx\n", (unsigned long long)base,
209 (unsigned long long)size);
210
211 lmb_add(base, size);
212 }
213 return 0;
214 }
215
216 #ifdef CONFIG_PHYP_DUMP
217 /**
218 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
219 *
220 * Function to find the largest size we need to reserve
221 * during early boot process.
222 *
223 * It either looks for boot param and returns that OR
224 * returns larger of 256 or 5% rounded down to multiples of 256MB.
225 *
226 */
227 static inline unsigned long phyp_dump_calculate_reserve_size(void)
228 {
229 unsigned long tmp;
230
231 if (phyp_dump_info->reserve_bootvar)
232 return phyp_dump_info->reserve_bootvar;
233
234 /* divide by 20 to get 5% of value */
235 tmp = lmb_end_of_DRAM();
236 do_div(tmp, 20);
237
238 /* round it down in multiples of 256 */
239 tmp = tmp & ~0x0FFFFFFFUL;
240
241 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
242 }
243
244 /**
245 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
246 *
247 * This routine may reserve memory regions in the kernel only
248 * if the system is supported and a dump was taken in last
249 * boot instance or if the hardware is supported and the
250 * scratch area needs to be setup. In other instances it returns
251 * without reserving anything. The memory in case of dump being
252 * active is freed when the dump is collected (by userland tools).
253 */
254 static void __init phyp_dump_reserve_mem(void)
255 {
256 unsigned long base, size;
257 unsigned long variable_reserve_size;
258
259 if (!phyp_dump_info->phyp_dump_configured) {
260 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
261 return;
262 }
263
264 if (!phyp_dump_info->phyp_dump_at_boot) {
265 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
266 return;
267 }
268
269 variable_reserve_size = phyp_dump_calculate_reserve_size();
270
271 if (phyp_dump_info->phyp_dump_is_active) {
272 /* Reserve *everything* above RMR.Area freed by userland tools*/
273 base = variable_reserve_size;
274 size = lmb_end_of_DRAM() - base;
275
276 /* XXX crashed_ram_end is wrong, since it may be beyond
277 * the memory_limit, it will need to be adjusted. */
278 lmb_reserve(base, size);
279
280 phyp_dump_info->init_reserve_start = base;
281 phyp_dump_info->init_reserve_size = size;
282 } else {
283 size = phyp_dump_info->cpu_state_size +
284 phyp_dump_info->hpte_region_size +
285 variable_reserve_size;
286 base = lmb_end_of_DRAM() - size;
287 lmb_reserve(base, size);
288 phyp_dump_info->init_reserve_start = base;
289 phyp_dump_info->init_reserve_size = size;
290 }
291 }
292 #else
293 static inline void __init phyp_dump_reserve_mem(void) {}
294 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
295
296 #ifdef CONFIG_EARLY_PRINTK
297 /* MS this is Microblaze specifig function */
298 static int __init early_init_dt_scan_serial(unsigned long node,
299 const char *uname, int depth, void *data)
300 {
301 unsigned long l;
302 char *p;
303 int *addr;
304
305 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
306
307 /* find all serial nodes */
308 if (strncmp(uname, "serial", 6) != 0)
309 return 0;
310
311 early_init_dt_check_for_initrd(node);
312
313 /* find compatible node with uartlite */
314 p = of_get_flat_dt_prop(node, "compatible", &l);
315 if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
316 (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
317 return 0;
318
319 addr = of_get_flat_dt_prop(node, "reg", &l);
320 return *addr; /* return address */
321 }
322
323 /* this function is looking for early uartlite console - Microblaze specific */
324 int __init early_uartlite_console(void)
325 {
326 return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
327 }
328 #endif
329
330 void __init early_init_devtree(void *params)
331 {
332 pr_debug(" -> early_init_devtree(%p)\n", params);
333
334 /* Setup flat device-tree pointer */
335 initial_boot_params = params;
336
337 #ifdef CONFIG_PHYP_DUMP
338 /* scan tree to see if dump occured during last boot */
339 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
340 #endif
341
342 /* Retrieve various informations from the /chosen node of the
343 * device-tree, including the platform type, initrd location and
344 * size, TCE reserve, and more ...
345 */
346 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
347
348 /* Scan memory nodes and rebuild LMBs */
349 lmb_init();
350 of_scan_flat_dt(early_init_dt_scan_root, NULL);
351 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
352
353 /* Save command line for /proc/cmdline and then parse parameters */
354 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
355 parse_early_param();
356
357 lmb_analyze();
358
359 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
360
361 pr_debug("Scanning CPUs ...\n");
362
363 /* Retreive CPU related informations from the flat tree
364 * (altivec support, boot CPU ID, ...)
365 */
366 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
367
368 pr_debug(" <- early_init_devtree()\n");
369 }
370
371 /**
372 * Indicates whether the root node has a given value in its
373 * compatible property.
374 */
375 int machine_is_compatible(const char *compat)
376 {
377 struct device_node *root;
378 int rc = 0;
379
380 root = of_find_node_by_path("/");
381 if (root) {
382 rc = of_device_is_compatible(root, compat);
383 of_node_put(root);
384 }
385 return rc;
386 }
387 EXPORT_SYMBOL(machine_is_compatible);
388
389 /*******
390 *
391 * New implementation of the OF "find" APIs, return a refcounted
392 * object, call of_node_put() when done. The device tree and list
393 * are protected by a rw_lock.
394 *
395 * Note that property management will need some locking as well,
396 * this isn't dealt with yet.
397 *
398 *******/
399
400 /**
401 * of_find_node_by_phandle - Find a node given a phandle
402 * @handle: phandle of the node to find
403 *
404 * Returns a node pointer with refcount incremented, use
405 * of_node_put() on it when done.
406 */
407 struct device_node *of_find_node_by_phandle(phandle handle)
408 {
409 struct device_node *np;
410
411 read_lock(&devtree_lock);
412 for (np = allnodes; np != NULL; np = np->allnext)
413 if (np->linux_phandle == handle)
414 break;
415 of_node_get(np);
416 read_unlock(&devtree_lock);
417 return np;
418 }
419 EXPORT_SYMBOL(of_find_node_by_phandle);
420
421 /**
422 * of_node_get - Increment refcount of a node
423 * @node: Node to inc refcount, NULL is supported to
424 * simplify writing of callers
425 *
426 * Returns node.
427 */
428 struct device_node *of_node_get(struct device_node *node)
429 {
430 if (node)
431 kref_get(&node->kref);
432 return node;
433 }
434 EXPORT_SYMBOL(of_node_get);
435
436 static inline struct device_node *kref_to_device_node(struct kref *kref)
437 {
438 return container_of(kref, struct device_node, kref);
439 }
440
441 /**
442 * of_node_release - release a dynamically allocated node
443 * @kref: kref element of the node to be released
444 *
445 * In of_node_put() this function is passed to kref_put()
446 * as the destructor.
447 */
448 static void of_node_release(struct kref *kref)
449 {
450 struct device_node *node = kref_to_device_node(kref);
451 struct property *prop = node->properties;
452
453 /* We should never be releasing nodes that haven't been detached. */
454 if (!of_node_check_flag(node, OF_DETACHED)) {
455 printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
456 node->full_name);
457 dump_stack();
458 kref_init(&node->kref);
459 return;
460 }
461
462 if (!of_node_check_flag(node, OF_DYNAMIC))
463 return;
464
465 while (prop) {
466 struct property *next = prop->next;
467 kfree(prop->name);
468 kfree(prop->value);
469 kfree(prop);
470 prop = next;
471
472 if (!prop) {
473 prop = node->deadprops;
474 node->deadprops = NULL;
475 }
476 }
477 kfree(node->full_name);
478 kfree(node->data);
479 kfree(node);
480 }
481
482 /**
483 * of_node_put - Decrement refcount of a node
484 * @node: Node to dec refcount, NULL is supported to
485 * simplify writing of callers
486 *
487 */
488 void of_node_put(struct device_node *node)
489 {
490 if (node)
491 kref_put(&node->kref, of_node_release);
492 }
493 EXPORT_SYMBOL(of_node_put);
494
495 /*
496 * Plug a device node into the tree and global list.
497 */
498 void of_attach_node(struct device_node *np)
499 {
500 unsigned long flags;
501
502 write_lock_irqsave(&devtree_lock, flags);
503 np->sibling = np->parent->child;
504 np->allnext = allnodes;
505 np->parent->child = np;
506 allnodes = np;
507 write_unlock_irqrestore(&devtree_lock, flags);
508 }
509
510 /*
511 * "Unplug" a node from the device tree. The caller must hold
512 * a reference to the node. The memory associated with the node
513 * is not freed until its refcount goes to zero.
514 */
515 void of_detach_node(struct device_node *np)
516 {
517 struct device_node *parent;
518 unsigned long flags;
519
520 write_lock_irqsave(&devtree_lock, flags);
521
522 parent = np->parent;
523 if (!parent)
524 goto out_unlock;
525
526 if (allnodes == np)
527 allnodes = np->allnext;
528 else {
529 struct device_node *prev;
530 for (prev = allnodes;
531 prev->allnext != np;
532 prev = prev->allnext)
533 ;
534 prev->allnext = np->allnext;
535 }
536
537 if (parent->child == np)
538 parent->child = np->sibling;
539 else {
540 struct device_node *prevsib;
541 for (prevsib = np->parent->child;
542 prevsib->sibling != np;
543 prevsib = prevsib->sibling)
544 ;
545 prevsib->sibling = np->sibling;
546 }
547
548 of_node_set_flag(np, OF_DETACHED);
549
550 out_unlock:
551 write_unlock_irqrestore(&devtree_lock, flags);
552 }
553
554 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
555 static struct debugfs_blob_wrapper flat_dt_blob;
556
557 static int __init export_flat_device_tree(void)
558 {
559 struct dentry *d;
560
561 flat_dt_blob.data = initial_boot_params;
562 flat_dt_blob.size = initial_boot_params->totalsize;
563
564 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
565 of_debugfs_root, &flat_dt_blob);
566 if (!d)
567 return 1;
568
569 return 0;
570 }
571 device_initcall(export_flat_device_tree);
572 #endif
This page took 0.0451 seconds and 5 git commands to generate.